prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# coding=utf-8
import numpy as np
import scipy.sparse as sp
from scipy.linalg import inv as scipy_inv
from scipy.linalg import lstsq as scipy_lstsq
from scipy.sparse import linalg as ln
def parse_st_var(ds, st_var, st_label='st', ix_sel=None):
"""
Utility function to check the st_var input, and to return in the correct
format.
Parameters
----------
ds : DataStore
st_var : float, callable, array-like
If `float` the variance of the noise from the Stokes detector is
described with a single value.
If `callable` the variance of the noise from the Stokes detector is
a function of the intensity, as defined in the callable function.
Or when the variance is a function of the intensity (Poisson
distributed) define a DataArray of the shape shape as ds.st, where the
variance can be a function of time and/or x.
st_label : string
Name of the (reverse) stokes/anti-stokes data variable which is being
parsed.
ix_sel : None, array-like
Index mapping along the x-dimension to apply to st_var. Definition
required when st_var is array-like
Returns
-------
Parsed st_var
"""
if callable(st_var):
st_var_sec = st_var(ds[st_label].isel(x=ix_sel)).values
elif np.size(st_var) > 1:
if ix_sel is None:
raise ValueError(
'`ix_sel` kwarg not defined while `st_var` is array-like')
for a, b in zip(st_var.shape[::-1], ds[st_label].shape[::-1]):
if a == 1 or b == 1 or a == b:
pass
else:
raise ValueError(
st_label + '_var is not broadcastable to ds.' + st_label)
if len(st_var.shape) > 1:
st_var_sec = np.asarray(st_var, dtype=float)[ix_sel]
else:
st_var_sec = np.asarray(st_var, dtype=float)
else:
st_var_sec = np.asarray(st_var, dtype=float)
assert np.all(np.isfinite(st_var_sec)), \
'NaN/inf values detected in ' + st_label + '_var. Please check input.'
return st_var_sec
def calibration_single_ended_solver(
ds,
st_var=None,
ast_var=None,
calc_cov=True,
solver='sparse',
matching_indices=None,
transient_att_x=None,
verbose=False):
"""
Parameters
----------
ds : DataStore
Should have sections and reference temperature timeseries already
configured.
st_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
ast_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
calc_cov : bool
whether to calculate the covariance matrix. Required for calculation
of confidence boundaries. But uses a lot of memory.
solver : {'sparse', 'sparse2', 'stats', 'external', 'external_split'}
Always use sparse to save memory. The statsmodel can be used to validate
sparse solver. `external` returns the matrices that would enter the
matrix solver (Eq.37). `external_split` returns a dictionary with
matrix X split in the coefficients per parameter. The use case for
the latter is when certain parameters are fixed/combined.
matching_indices : array-like
Is an array of size (np, 2), where np is the number of paired
locations. This array is produced by `matching_sections()`.
transient_att_x : iterable, optional
Splices can cause jumps in differential attenuation. Normal single
ended calibration assumes these are not present. An additional loss term
is added in the 'shadow' of the splice. Each location introduces an
additional nt parameters to solve for. Requiring either an additional
calibration section or matching sections. If multiple locations are
defined, the losses are added.
verbose : bool
Returns
-------
"""
# get ix_sec argsort so the sections are in order of increasing x
ix_sec = ds.ufunc_per_section(x_indices=True, calc_per='all')
ds_sec = ds.isel(x=ix_sec)
x_sec = ds_sec['x'].values
x_all = ds['x'].values
nx = x_sec.size
nt = ds.time.size
nta = len(transient_att_x) if transient_att_x else 0
nm = matching_indices.shape[0] if np.any(matching_indices) else 0
if np.any(matching_indices):
ds_ms0 = ds.isel(x=matching_indices[:, 0])
ds_ms1 = ds.isel(x=matching_indices[:, 1])
p0_est = np.asarray([485., 0.1] + nt * [1.4] + nta * nt * [0.])
# X \gamma # Eq.34
cal_ref = ds.ufunc_per_section(
label='st', ref_temp_broadcasted=True, calc_per='all')
cal_ref = cal_ref # sort by increasing x
data_gamma = 1 / (cal_ref.T.ravel() + 273.15) # gamma
coord_gamma_row = np.arange(nt * nx, dtype=int)
coord_gamma_col = np.zeros(nt * nx, dtype=int)
X_gamma = sp.coo_matrix(
(data_gamma, (coord_gamma_row, coord_gamma_col)),
shape=(nt * nx, 1),
copy=False)
# X \Delta\alpha # Eq.34
data_dalpha = np.tile(-x_sec, nt) # dalpha
coord_dalpha_row = np.arange(nt * nx, dtype=int)
coord_dalpha_col = np.zeros(nt * nx, dtype=int)
X_dalpha = sp.coo_matrix(
(data_dalpha, (coord_dalpha_row, coord_dalpha_col)),
shape=(nt * nx, 1),
copy=False)
# X C # Eq.34
data_c = -np.ones(nt * nx, dtype=int)
coord_c_row = np.arange(nt * nx, dtype=int)
coord_c_col = np.repeat(np.arange(nt, dtype=int), nx)
X_c = sp.coo_matrix(
(data_c, (coord_c_row, coord_c_col)), shape=(nt * nx, nt), copy=False)
# X ta #not documented
if transient_att_x:
TA_list = list()
for transient_att_xi in transient_att_x:
# first index on the right hand side a the difficult splice
# Deal with connector outside of fiber
if transient_att_xi >= x_sec[-1]:
ix_sec_ta_ix0 = nx
elif transient_att_xi <= x_sec[0]:
ix_sec_ta_ix0 = 0
else:
ix_sec_ta_ix0 = np.flatnonzero(x_sec >= transient_att_xi)[0]
# Data is -1
# I = 1/Tref*gamma - C - da - TA
data_ta = -np.ones(nt * (nx - ix_sec_ta_ix0), dtype=float)
# skip ix_sec_ta_ix0 locations, because they are upstream of
# the connector.
coord_ta_row = (
np.tile(np.arange(ix_sec_ta_ix0, nx), nt)
+ np.repeat(np.arange(nx * nt, step=nx), nx - ix_sec_ta_ix0))
# nt parameters
coord_ta_col = np.repeat(
np.arange(nt, dtype=int), nx - ix_sec_ta_ix0)
TA_list.append(
sp.coo_matrix(
(data_ta, (coord_ta_row, coord_ta_col)),
shape=(nt * nx, nt),
copy=False))
X_TA = sp.hstack(TA_list)
else:
X_TA = sp.coo_matrix(([], ([], [])), shape=(nt * nx, 0))
if np.any(matching_indices):
# first make matrix without the TA part (only diff in attentuation)
data_ma = np.tile(ds_ms1['x'].values - ds_ms0['x'].values, nt)
coord_ma_row = np.arange(nm * nt)
coord_ma_col = np.ones(nt * nm)
X_ma = sp.coo_matrix(
(data_ma, (coord_ma_row, coord_ma_col)),
shape=(nm * nt, 2 + nt),
copy=False)
# make TA matrix
if transient_att_x:
transient_m_data = np.zeros((nm, nta))
for ii, row in enumerate(matching_indices):
for jj, transient_att_xi in enumerate(transient_att_x):
transient_m_data[ii, jj] = np.logical_and(
transient_att_xi > x_all[row[0]],
transient_att_xi < x_all[row[1]]).astype(int)
data_mt = np.tile(transient_m_data, (nt, 1)).flatten('F')
coord_mt_row = (np.tile(np.arange(nm * nt), nta))
coord_mt_col = (
np.tile(np.repeat(np.arange(nt), nm), nta)
+ np.repeat(np.arange(nta * nt, step=nt), nt * nm))
X_mt = sp.coo_matrix(
(data_mt, (coord_mt_row, coord_mt_col)),
shape=(nm * nt, nta * nt),
copy=False)
else:
X_mt = sp.coo_matrix(
([], ([], [])), shape=(nm * nt, 0), copy=False)
# merge the two
X_m = sp.hstack((X_ma, X_mt))
else:
X_m = sp.coo_matrix(([], ([], [])), shape=(0, 2 + nt + nta * nt))
# Stack all X's
X = sp.vstack((sp.hstack((X_gamma, X_dalpha, X_c, X_TA)), X_m))
# y, transpose the values to arrange them correctly
y = np.log(ds_sec.st / ds_sec.ast).values.T.ravel()
if np.any(matching_indices):
# y_m = I_1 - I_2
y_m = (
np.log(ds_ms0.st.values / ds_ms0.ast.values)
- np.log(ds_ms1.st.values / ds_ms1.ast.values)).T.ravel()
y = np.hstack((y, y_m))
# w
if st_var is not None:
st_var_sec = parse_st_var(ds, st_var, st_label='st', ix_sel=ix_sec)
ast_var_sec = parse_st_var(ds, ast_var, st_label='ast', ix_sel=ix_sec)
w = 1 / (ds_sec.st**-2 * st_var_sec
+ ds_sec.ast**-2 * ast_var_sec).values.ravel()
if np.any(matching_indices):
st_var_ms0 = parse_st_var(
ds, st_var, st_label='st', ix_sel=matching_indices[:, 0])
st_var_ms1 = parse_st_var(
ds, st_var, st_label='st', ix_sel=matching_indices[:, 1])
ast_var_ms0 = parse_st_var(
ds, ast_var, st_label='ast', ix_sel=matching_indices[:, 0])
ast_var_ms1 = parse_st_var(
ds, ast_var, st_label='ast', ix_sel=matching_indices[:, 1])
w_ms = 1 / (
(ds_ms0.st.values**-2 * st_var_ms0) +
(ds_ms0.ast.values**-2 * ast_var_ms0) +
(ds_ms1.st.values**-2 * st_var_ms1) +
(ds_ms1.ast.values**-2 * ast_var_ms1)).ravel()
w = np.hstack((w, w_ms))
else:
w = 1. # unweighted
if solver == 'sparse':
if calc_cov:
p_sol, p_var, p_cov = wls_sparse(
X, y, w=w, x0=p0_est, calc_cov=calc_cov, verbose=verbose)
else:
p_sol, p_var = wls_sparse(
X, y, w=w, x0=p0_est, calc_cov=calc_cov, verbose=verbose)
elif solver == 'sparse2':
if calc_cov:
p_sol, p_var, p_cov = wls_sparse2(
X, y, w=w, x0=p0_est, calc_cov=calc_cov, verbose=verbose)
else:
p_sol, p_var = wls_sparse2(
X, y, w=w, x0=p0_est, calc_cov=calc_cov, verbose=verbose)
elif solver == 'stats':
if calc_cov:
p_sol, p_var, p_cov = wls_stats(
X, y, w=w, calc_cov=calc_cov, verbose=verbose)
else:
p_sol, p_var = wls_stats(
X, y, w=w, calc_cov=calc_cov, verbose=verbose)
elif solver == 'external':
return X, y, w, p0_est
elif solver == 'external_split':
return dict(
y=y,
w=w,
X_gamma=X_gamma,
X_dalpha=X_dalpha,
X_c=X_c,
X_m=X_m,
X_TA=X_TA,
p0_est=p0_est)
else:
raise ValueError("Choose a valid solver")
if calc_cov:
return p_sol, p_var, p_cov
else:
return p_sol, p_var
def calibration_double_ended_solver(
ds,
st_var=None,
ast_var=None,
rst_var=None,
rast_var=None,
calc_cov=True,
solver='sparse',
matching_indices=None,
transient_asym_att_x=None,
verbose=False):
"""
The construction of X differs a bit from what is presented in the
article. The choice to divert from the article is made because
then remaining modular is easier.
Eq34 and Eq43 become:
y = [F, B, (B-F)/2], F=[F_0, F_1, .., F_M], B=[B_0, B_1, .., B_M],
where F_m and B_m contain the coefficients for all times.
Parameters
----------
ds : DataStore
Should have sections and reference temperature timeseries already
configured.
st_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
ast_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
rst_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
rast_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
calc_cov : bool
whether to calculate the covariance matrix. Required for calculation
of confidence boundaries. But uses a lot of memory.
solver : {'sparse', 'sparse2', 'stats', 'external', 'external_split'}
Always use sparse to save memory. The statsmodel can be used to validate
sparse solver. `external` returns the matrices that would enter the
matrix solver (Eq.37). `external_split` returns a dictionary with
matrix X split in the coefficients per parameter. The use case for
the latter is when certain parameters are fixed/combined.
matching_indices : array-like
Is an array of size (np, 2), where np is the number of paired
locations. This array is produced by `matching_sections()`.
transient_asym_att_x : iterable, optional
Connectors cause assymetrical attenuation. Normal double ended
calibration assumes symmetrical attenuation. An additional loss
term is added in the 'shadow' of the forward and backward
measurements. This loss term varies over time. Provide a list
containing the x locations of the connectors along the fiber.
Each location introduces an additional 2*nt parameters to solve
for. Requiering either an additional calibration section or
matching sections. If multiple locations are defined, the losses are
added.
verbose : bool
Returns
-------
"""
ix_sec = ds.ufunc_per_section(x_indices=True, calc_per='all')
ds_sec = ds.isel(x=ix_sec)
ix_alpha_is_zero = ix_sec[0] # per definition of E
x_sec = ds_sec['x'].values
nx_sec = x_sec.size
nt = ds.time.size
nta = len(transient_asym_att_x) if transient_asym_att_x else 0
# Calculate E as initial estimate for the E calibration.
# Does not require ta to be passed on
E_all_guess, E_all_var_guess = calc_alpha_double(
'guess',
ds,
st_var,
ast_var,
rst_var,
rast_var,
ix_alpha_is_zero=ix_alpha_is_zero)
df_est, db_est = calc_df_db_double_est(ds, ix_alpha_is_zero, 485.)
E, Z_D, Z_gamma, Zero_d, Z_TA_fw, Z_TA_bw, = \
construct_submatrices(nt, nx_sec, ds, transient_asym_att_x, x_sec)
# y # Eq.41--45
y_F = np.log(ds_sec.st / ds_sec.ast).values.ravel()
y_B = np.log(ds_sec.rst / ds_sec.rast).values.ravel()
# w
if st_var is not None: # WLS
st_var_sec = parse_st_var(ds, st_var, st_label='st', ix_sel=ix_sec)
ast_var_sec = parse_st_var(ds, ast_var, st_label='ast', ix_sel=ix_sec)
rst_var_sec = parse_st_var(ds, rst_var, st_label='rst', ix_sel=ix_sec)
rast_var_sec = parse_st_var(
ds, rast_var, st_label='rast', ix_sel=ix_sec)
w_F = 1 / (ds_sec.st**-2 * st_var_sec
+ ds_sec.ast**-2 * ast_var_sec).values.ravel()
w_B = 1 / (
ds_sec.rst**-2 * rst_var_sec
+ ds_sec.rast**-2 * rast_var_sec).values.ravel()
else: # OLS
w_F = np.ones(nt * nx_sec)
w_B = np.ones(nt * nx_sec)
if not np.any(matching_indices):
p0_est = np.concatenate(
(
[485.], df_est, db_est, E_all_guess[ix_sec[1:]],
nta * nt * 2 * [0.]))
# Stack all X's
X = sp.vstack(
(
sp.hstack((Z_gamma, -Z_D, Zero_d, -E, Z_TA_fw)),
sp.hstack((Z_gamma, Zero_d, -Z_D, E, Z_TA_bw))))
y = np.concatenate((y_F, y_B))
w = np.concatenate((w_F, w_B))
else:
E_match_F, E_match_B, E_match_no_cal, Z_TA_eq1, Z_TA_eq2, \
Z_TA_eq3, d_no_cal, ix_from_cal_match_to_glob, ix_match_not_cal, \
Zero_eq12_gamma, Zero_eq3_gamma, Zero_d_eq12 = \
construct_submatrices_matching_sections(
ds.x.values, ix_sec, matching_indices[:, 0],
matching_indices[:, 1], nt, transient_asym_att_x)
p0_est = np.concatenate(
(
np.asarray([485.] + 2 * nt * [1.4]),
E_all_guess[ix_from_cal_match_to_glob], nta * nt * 2 * [0.]))
# Stack all X's
# X_sec contains a different number of columns than X.
X_sec = sp.vstack(
(
sp.hstack((Z_gamma, -Z_D, Zero_d, -E, Z_TA_fw)),
sp.hstack((Z_gamma, Zero_d, -Z_D, E, Z_TA_bw))))
X_sec2 = sp.csr_matrix(
([], ([], [])),
shape=(2 * nt * nx_sec, 1 + 2 * nt + ds.x.size + 2 * nta * nt))
from_i = np.concatenate(
(
np.arange(1 + 2 * nt), 1 + 2 * nt + ix_sec[1:],
np.arange(
1 + 2 * nt + ds.x.size,
1 + 2 * nt + ds.x.size + 2 * nta * nt)))
X_sec2[:, from_i] = X_sec
from_i2 = np.concatenate(
(
np.arange(1 + 2 * nt), 1 + 2 * nt + ix_from_cal_match_to_glob,
np.arange(
1 + 2 * nt + ds.x.size,
1 + 2 * nt + ds.x.size + 2 * nta * nt)))
X = sp.vstack(
(
X_sec2[:, from_i2],
sp.hstack((Zero_eq12_gamma, Zero_d_eq12, E_match_F, Z_TA_eq1)),
sp.hstack((Zero_eq12_gamma, Zero_d_eq12, E_match_B, Z_TA_eq2)),
sp.hstack(
(Zero_eq3_gamma, d_no_cal, E_match_no_cal, Z_TA_eq3))))
y_F = np.log(ds_sec.st / ds_sec.ast).values.ravel()
y_B = np.log(ds_sec.rst / ds_sec.rast).values.ravel()
hix = matching_indices[:, 0]
tix = matching_indices[:, 1]
ds_hix = ds.isel(x=hix)
ds_tix = ds.isel(x=tix)
y_eq1 = (
np.log(ds_hix.st / ds_hix.ast).values.ravel()
- np.log(ds_tix.st / ds_tix.ast).values.ravel())
y_eq2 = (
np.log(ds_hix.rst / ds_hix.rast).values.ravel()
- np.log(ds_tix.rst / ds_tix.rast).values.ravel())
ds_mnc = ds.isel(x=ix_match_not_cal)
y_eq3 = (
(
np.log(ds_mnc.rst / ds_mnc.rast)
- np.log(ds_mnc.st / ds_mnc.ast)) / 2).values.ravel()
y = np.concatenate((y_F, y_B, y_eq1, y_eq2, y_eq3))
st_var_hix = parse_st_var(ds, st_var, st_label='st', ix_sel=hix)
ast_var_hix = parse_st_var(ds, ast_var, st_label='ast', ix_sel=hix)
rst_var_hix = parse_st_var(ds, rst_var, st_label='rst', ix_sel=hix)
rast_var_hix = parse_st_var(ds, rast_var, st_label='rast', ix_sel=hix)
st_var_tix = parse_st_var(ds, st_var, st_label='st', ix_sel=tix)
ast_var_tix = parse_st_var(ds, ast_var, st_label='ast', ix_sel=tix)
rst_var_tix = parse_st_var(ds, rst_var, st_label='rst', ix_sel=tix)
rast_var_tix = parse_st_var(ds, rast_var, st_label='rast', ix_sel=tix)
st_var_mnc = parse_st_var(
ds, st_var, st_label='st', ix_sel=ix_match_not_cal)
ast_var_mnc = parse_st_var(
ds, ast_var, st_label='ast', ix_sel=ix_match_not_cal)
rst_var_mnc = parse_st_var(
ds, rst_var, st_label='rst', ix_sel=ix_match_not_cal)
rast_var_mnc = parse_st_var(
ds, rast_var, st_label='rast', ix_sel=ix_match_not_cal)
w_eq1 = 1 / (
(ds_hix.st**-2 * st_var_hix
+ ds_hix.ast**-2 * ast_var_hix).values.ravel() +
(ds_tix.st**-2 * st_var_tix
+ ds_tix.ast**-2 * ast_var_tix).values.ravel())
w_eq2 = 1 / (
(ds_hix.rst**-2 * rst_var_hix
+ ds_hix.rast**-2 * rast_var_hix).values.ravel() +
(ds_tix.rst**-2 * rst_var_tix
+ ds_tix.rast**-2 * rast_var_tix).values.ravel())
w_eq3 = 1 / (
ds_mnc.st**-2 * st_var_mnc + ds_mnc.ast**-2 * ast_var_mnc
+ ds_mnc.rst**-2 * rst_var_mnc
+ ds_mnc.rast**-2 * rast_var_mnc).values.ravel()
w = np.concatenate((w_F, w_B, w_eq1, w_eq2, w_eq3))
if solver == 'sparse':
solver_fun = wls_sparse
elif solver == 'sparse2':
solver_fun = wls_sparse2
elif solver == 'stats':
solver_fun = wls_stats
elif solver == 'external':
return X, y, w, p0_est
elif solver == 'external_split':
out = dict(
y_F=y_F,
y_B=y_B,
w_F=w_F,
w_B=w_B,
Z_gamma=Z_gamma,
Z_D=Z_D,
Zero_d=Zero_d,
E=E,
Z_TA_fw=Z_TA_fw,
Z_TA_bw=Z_TA_bw,
p0_est=p0_est,
E_all_guess=E_all_guess,
E_all_var_guess=E_all_var_guess)
if np.any(matching_indices):
out.update(
ix_from_cal_match_to_glob=ix_from_cal_match_to_glob,
E_match_F=E_match_F,
E_match_B=E_match_B,
E_match_no_cal=E_match_no_cal,
Zero_eq12_gamma=Zero_eq12_gamma,
Zero_eq3_gamma=Zero_eq3_gamma,
Zero_d_eq12=Zero_d_eq12,
d_no_cal=d_no_cal,
Z_TA_eq1=Z_TA_eq1,
Z_TA_eq2=Z_TA_eq2,
Z_TA_eq3=Z_TA_eq3,
y_eq1=y_eq1,
y_eq2=y_eq2,
y_eq3=y_eq3,
w_eq1=w_eq1,
w_eq2=w_eq2,
w_eq3=w_eq3)
return out
else:
raise ValueError("Choose a valid solver")
out = solver_fun(
X,
y,
w=w,
x0=p0_est,
calc_cov=calc_cov,
verbose=verbose,
return_werr=verbose)
if calc_cov and verbose:
p_sol, p_var, p_cov, werr = out
elif not calc_cov and verbose:
p_sol, p_var, werr = out
elif calc_cov and not verbose:
p_sol, p_var, p_cov = out
elif not calc_cov and not verbose:
p_sol, p_var = out
# if verbose:
# from dtscalibration.plot import plot_location_residuals_double_ended
#
# dv = plot_location_residuals_double_ended(ds, werr, hix, tix, ix_sec,
# ix_match_not_cal, nt)
# p_sol contains the int diff att of all the locations within the
# reference sections. po_sol is its expanded version that contains also
# the int diff att for outside the reference sections.
# calculate talpha_fw and bw for attenuation
if transient_asym_att_x:
if np.any(matching_indices):
ta = p_sol[1 + 2 * nt + ix_from_cal_match_to_glob.size:].reshape(
(nt, 2, nta), order='F')
ta_var = p_var[1 + 2 * nt
+ ix_from_cal_match_to_glob.size:].reshape(
(nt, 2, nta), order='F')
else:
ta = p_sol[2 * nt + nx_sec:].reshape((nt, 2, nta), order='F')
ta_var = p_var[2 * nt + nx_sec:].reshape((nt, 2, nta), order='F')
talpha_fw = ta[:, 0, :]
talpha_bw = ta[:, 1, :]
talpha_fw_var = ta_var[:, 0, :]
talpha_bw_var = ta_var[:, 1, :]
else:
talpha_fw = None
talpha_bw = None
talpha_fw_var = None
talpha_bw_var = None
# put E outside of reference section in solution
# concatenating makes a copy of the data instead of using a pointer
ds_sub = ds[['st', 'ast', 'rst', 'rast']]
time_dim = ds_sub.get_time_dim()
ds_sub['df'] = ((time_dim,), p_sol[1:1 + nt])
ds_sub['df_var'] = ((time_dim,), p_var[1:1 + nt])
ds_sub['db'] = ((time_dim,), p_sol[1 + nt:1 + 2 * nt])
ds_sub['db_var'] = ((time_dim,), p_var[1 + nt:1 + 2 * nt])
E_all_exact, E_all_var_exact = calc_alpha_double(
'exact',
ds_sub,
st_var,
ast_var,
rst_var,
rast_var,
'df',
'db',
'df_var',
'db_var',
ix_alpha_is_zero=ix_alpha_is_zero,
transient_asym_att_x=transient_asym_att_x,
talpha_fw=talpha_fw,
talpha_bw=talpha_bw,
talpha_fw_var=talpha_fw_var,
talpha_bw_var=talpha_bw_var)
if np.any(matching_indices):
p_sol_size = 1 + 2 * nt + ix_from_cal_match_to_glob.size + 2 * nt * nta
else:
p_sol_size = 1 + 2 * nt + (nx_sec - 1) + 2 * nt * nta
assert p_sol.size == p_sol_size
assert p_var.size == p_sol_size
if np.any(matching_indices):
po_sol = np.concatenate(
(
p_sol[:1 + 2 * nt], E_all_exact,
p_sol[1 + 2 * nt + ix_from_cal_match_to_glob.size:]))
po_sol[1 + 2 * nt + ix_from_cal_match_to_glob] = \
p_sol[1 + 2 * nt:1 + 2 * nt + ix_from_cal_match_to_glob.size]
else:
po_sol = np.concatenate(
(p_sol[:1 + 2 * nt], E_all_exact, p_sol[2 * nt + nx_sec:]))
po_sol[1 + 2 * nt + ix_sec[1:]] = p_sol[1 + 2 * nt:2 * nt + nx_sec]
po_sol[1 + 2 * nt + ix_sec[0]] = 0. # per definition
if np.any(matching_indices):
po_var = np.concatenate(
(
p_var[:1 + 2 * nt], E_all_var_exact,
p_var[1 + 2 * nt + ix_from_cal_match_to_glob.size:]))
po_var[1 + 2 * nt + ix_from_cal_match_to_glob] = \
p_var[1 + 2 * nt:1 + 2 * nt + ix_from_cal_match_to_glob.size]
else:
po_var = np.concatenate(
(p_var[:1 + 2 * nt], E_all_var_exact, p_var[2 * nt + nx_sec:]))
po_var[1 + 2 * nt + ix_sec[1:]] = p_var[1 + 2 * nt:2 * nt + nx_sec]
po_var[1 + 2 * nt + ix_sec[0]] = 0. # per definition
if calc_cov:
# the COV can be expensive to compute (in the least squares routine)
po_cov = np.diag(po_var).copy()
if np.any(matching_indices):
from_i = np.concatenate(
(
np.arange(1 + 2 * nt),
1 + 2 * nt + ix_from_cal_match_to_glob,
np.arange(
1 + 2 * nt + ix_from_cal_match_to_glob.size, 1 + 2 * nt
+ ix_from_cal_match_to_glob.size + nta * nt * 2)))
else:
from_i = np.concatenate(
(
np.arange(1 + 2 * nt), 1 + 2 * nt + ix_sec[1:],
np.arange(
1 + 2 * nt + nx_sec,
1 + 2 * nt + nx_sec + nta * nt * 2)))
iox_sec1, iox_sec2 = np.meshgrid(from_i, from_i, indexing='ij')
po_cov[iox_sec1, iox_sec2] = p_cov
return po_sol, po_var, po_cov
else:
return po_sol, po_var
def matching_section_location_indices(ix_sec, hix, tix):
# contains all indices of the entire fiber that either are used for
# calibrating to reference temperature or for matching sections. Is sorted.
ix_cal_match = np.unique(np.concatenate((ix_sec, hix, tix)))
# number of locations of interest, width of the section of interest.
nx_cal_match = ix_cal_match.size
# indices in the section of interest. Including E0.
ix_sec2 = np.searchsorted(ix_cal_match, ix_sec)
# indices in the section of interest. Excluding E0
# ix_E0 mask - to exclude E[ix_sec[0]] from the E matrices
ix_E0_mask = np.array(
[ix for ix in range(nx_cal_match) if ix != ix_sec2[0]])
# contains the global coordinate indices of the E
ix_from_cal_match_to_glob = ix_cal_match[ix_E0_mask]
return ix_from_cal_match_to_glob
def construct_submatrices_matching_sections(
x, ix_sec, hix, tix, nt, transient_asym_att_x):
"""
For all matching indices, where subscript 1 refers to the indices in
`hix` and subscript 2 refers to the indices in `tix`.
F1 - F2 = E2 - E1 + TAF2 - TAF1 # EQ1
B1 - B2 = E1 - E2 + TAB2 - TAB1 # EQ2
For matching indices (`hix` and `tix`) that are outside of the reference
sections an additional equation is needed for `E` per time step.
(B3 - F3) / 2 = E3 + (df-db) / 2 + (TAF3 - TAB3) / 2 # EQ3
where subscript 3 refers an a hix or a tix that is not in a reference
section.
Note that E[ix_sec[0]] = 0, and not included in the parameters. Dealt
with by first assuming it is a parameter, then remove it from coefficent
matrices. Note that indices _sec2 contain E[ix_sec[0]]
Ordering when unpaking square matrix: nt observations for location 1 then
nt observations for location 2.
# ix of Observations and weights
# ix_y_eq1_f1 = hix
# ix_y_eq1_f2 = tix
# ix_y_eq2_b1 = hix
# ix_y_eq2_b2 = tix
# ix_y_eq3 = ix_match_not_cal
Parameters
----------
x : array-like of float
coordinates along the fiber, needed to create the matrices for
transient attenuation.
ix_sec : array-like of int
hix : array-like of int
tix : array-like of int
nt : int
Returns
-------
"""
# contains all indices of the entire fiber that either are using for
# calibrating to reference temperature or for matching sections. Is sorted.
ix_cal_match = np.unique(np.concatenate((ix_sec, hix, tix)))
# subscript 3 in doc-eqns
ix_match_not_cal = np.array(
[ix for ix in ix_cal_match if ix not in ix_sec])
# number of locations of interest, width of the section of interest.
nx_cal_match = ix_cal_match.size
npair = len(hix)
# indices in the section of interest.
ix_match_not_cal_sec2 = np.searchsorted(ix_cal_match, ix_match_not_cal)
# indices in the section of interest. Including E0.
ix_sec2 = np.searchsorted(ix_cal_match, ix_sec)
hix_sec2 = np.searchsorted(ix_cal_match, hix) # subscript 1 in doc-eqns
tix_sec2 = np.searchsorted(ix_cal_match, tix) # subscript 2 in doc-eqns
# indices in the section of interest. Excluding E0
# ix_E0 mask - to exclude E[ix_sec[0]] from the E matrices
ix_E0_mask = np.array(
[ix for ix in range(nx_cal_match) if ix != ix_sec2[0]])
# contains the global coordinate indices of the E
ix_from_cal_match_to_glob = ix_cal_match[ix_E0_mask]
# E in EQ1
data = np.ones(nt * npair, dtype=float)
row = np.arange(nt * npair, dtype=int)
col1 = np.repeat(hix_sec2, nt)
col2 = np.repeat(tix_sec2, nt)
E_match_F = sp.coo_matrix(
(
np.concatenate((-data, data)),
(np.concatenate((row, row)), np.concatenate((col1, col2)))),
shape=(nt * npair, nx_cal_match),
copy=False).tocsr(copy=False)[:, ix_E0_mask].tocoo()
Zero_eq12_gamma = sp.coo_matrix(([], ([], [])), shape=(nt * npair, 1))
Zero_d_eq12 = sp.coo_matrix(([], ([], [])), shape=(nt * npair, 2 * nt))
# E in EQ2
data = np.ones(nt * npair, dtype=float)
row = np.arange(nt * npair, dtype=int)
col1 = np.repeat(hix_sec2, nt)
col2 = np.repeat(tix_sec2, nt)
E_match_B = sp.coo_matrix(
(
np.concatenate((data, -data)),
(np.concatenate((row, row)), np.concatenate((col1, col2)))),
shape=(nt * npair, nx_cal_match),
copy=False).tocsr(copy=False)[:, ix_E0_mask].tocoo()
# E in EQ3
nx_nm = ix_match_not_cal_sec2.size
data = np.ones(nt * nx_nm, dtype=float)
row = np.arange(nt * nx_nm, dtype=int)
col = np.repeat(ix_match_not_cal_sec2, nt)
E_match_no_cal = sp.coo_matrix(
(data, (row, col)), shape=(nt * nx_nm, nx_cal_match),
copy=False).tocsr(copy=False)[:, ix_E0_mask].tocoo()
# DF and DB in EQ3
data = np.ones(nt * nx_nm, dtype=float) / 2
row = np.arange(nt * nx_nm, dtype=int)
colf = np.tile(np.arange(nt, dtype=int), nx_nm)
colb = np.tile(np.arange(nt, 2 * nt, dtype=int), nx_nm)
d_no_cal = sp.coo_matrix(
(
np.concatenate((data, -data)),
(np.concatenate((row, row)), np.concatenate((colf, colb)))),
shape=(nt * nx_nm, 2 * nt),
copy=False)
Zero_eq3_gamma = sp.coo_matrix(([], ([], [])), shape=(nt * nx_nm, 1))
# TA
if transient_asym_att_x:
# unpublished BdT
TA_eq1_list = list()
TA_eq2_list = list()
TA_eq3_list = list()
for transient_asym_att_xi in transient_asym_att_x:
"""For forward direction."""
# first index on the right hand side a the difficult splice
# Deal with connector outside of fiber
if transient_asym_att_xi >= x[-1]:
ix_ta_ix0 = x.size
elif transient_asym_att_xi <= x[0]:
ix_ta_ix0 = 0
else:
ix_ta_ix0 = np.flatnonzero(x >= transient_asym_att_xi)[0]
# TAF1 and TAF2 in EQ1
data_taf = np.repeat(
-np.array(hix >= ix_ta_ix0, dtype=float)
+ np.array(tix >= ix_ta_ix0, dtype=float), nt)
row_taf = np.arange(nt * npair)
col_taf = np.tile(np.arange(nt, dtype=int), npair)
mask_taf = data_taf.astype(
bool) # only store non-zeros in sparse m
TA_eq1_list.append(
sp.coo_matrix(
(
data_taf[mask_taf],
(row_taf[mask_taf], col_taf[mask_taf])),
shape=(nt * npair, 2 * nt),
copy=False))
# TAB1 and TAB2 in EQ2
data_tab = np.repeat(
-np.array(hix < ix_ta_ix0, dtype=float)
+ np.array(tix < ix_ta_ix0, dtype=float), nt)
row_tab = np.arange(nt * npair)
col_tab = np.tile(np.arange(nt, 2 * nt, dtype=int), npair)
mask_tab = data_tab.astype(
bool) # only store non-zeros in sparse m
TA_eq2_list.append(
sp.coo_matrix(
(
data_tab[mask_tab],
(row_tab[mask_tab], col_tab[mask_tab])),
shape=(nt * npair, 2 * nt),
copy=False))
data_taf = np.repeat(
np.array(ix_match_not_cal >= ix_ta_ix0, dtype=float) / 2, nt)
data_tab = np.repeat(
-np.array(ix_match_not_cal < ix_ta_ix0, dtype=float) / 2, nt)
row_ta = np.arange(nt * nx_nm)
col_taf = np.tile(np.arange(nt, dtype=int), nx_nm)
col_tab = np.tile(np.arange(nt, 2 * nt, dtype=int), nx_nm)
mask_taf = data_taf.astype(
bool) # only store non-zeros in sparse m
mask_tab = data_tab.astype(
bool) # only store non-zeros in sparse m
TA_eq3_list.append(
sp.coo_matrix(
(
np.concatenate(
(data_taf[mask_taf], data_tab[mask_tab])), (
np.concatenate(
(row_ta[mask_taf], row_ta[mask_tab])),
np.concatenate(
(col_taf[mask_taf], col_tab[mask_tab])))),
shape=(nt * nx_nm, 2 * nt),
copy=False))
Z_TA_eq1 = sp.hstack(TA_eq1_list)
Z_TA_eq2 = sp.hstack(TA_eq2_list)
Z_TA_eq3 = sp.hstack(TA_eq3_list)
else:
Z_TA_eq1 = sp.coo_matrix(([], ([], [])), shape=(nt * npair, 0))
Z_TA_eq2 = sp.coo_matrix(([], ([], [])), shape=(nt * npair, 0))
Z_TA_eq3 = sp.coo_matrix(([], ([], [])), shape=(nt * nx_nm, 0))
return (
E_match_F, E_match_B, E_match_no_cal, Z_TA_eq1, Z_TA_eq2, Z_TA_eq3,
d_no_cal, ix_from_cal_match_to_glob, ix_match_not_cal, Zero_eq12_gamma,
Zero_eq3_gamma, Zero_d_eq12)
def construct_submatrices(nt, nx, ds, transient_asym_att_x, x_sec):
"""Wrapped in a function to reduce memory usage.
E is zero at the first index of the reference section (ds_sec)
Constructing:
Z_gamma (nt * nx, 1). Data: positive 1/temp
Z_D (nt * nx, nt). Data: ones
E (nt * nx, nx). Data: ones
Zero_gamma (nt * nx, 1)
zero_d (nt * nx, nt)
Z_TA_fw (nt * nx, nta * 2 * nt) minus ones
Z_TA_bw (nt * nx, nta * 2 * nt) minus ones
I_fw = 1/Tref*gamma - D_fw - E - TA_fw
I_bw = 1/Tref*gamma - D_bw + E - TA_bw
"""
# Z \gamma # Eq.47
cal_ref = np.array(
ds.ufunc_per_section(
label='st', ref_temp_broadcasted=True, calc_per='all'))
data_gamma = 1 / (cal_ref.ravel() + 273.15) # gamma
coord_gamma_row = np.arange(nt * nx, dtype=int)
coord_gamma_col = np.zeros(nt * nx, dtype=int)
Z_gamma = sp.coo_matrix(
(data_gamma, (coord_gamma_row, coord_gamma_col)),
shape=(nt * nx, 1),
copy=False)
# Z D # Eq.47
data_c = np.ones(nt * nx, dtype=float)
coord_c_row = np.arange(nt * nx, dtype=int)
coord_c_col = np.tile(np.arange(nt, dtype=int), nx)
Z_D = sp.coo_matrix(
(data_c, (coord_c_row, coord_c_col)), shape=(nt * nx, nt), copy=False)
# E # Eq.47
# E is 0 at ix=0
data_c = np.ones(nt * (nx - 1), dtype=float)
coord_c_row = np.arange(nt, nt * nx, dtype=int)
coord_c_col = np.repeat(np.arange(nx - 1, dtype=int), nt)
E = sp.coo_matrix(
(data_c, (coord_c_row, coord_c_col)),
shape=(nt * nx, (nx - 1)),
copy=False)
# Zero # Eq.45
Zero_d = sp.coo_matrix(([], ([], [])), shape=(nt * nx, nt))
# Zero_E = sp.coo_matrix(([], ([], [])), shape=(nt * nx, (nx - 1)))
if transient_asym_att_x:
# unpublished BdT
TA_fw_list = list()
TA_bw_list = list()
for transient_asym_att_xi in transient_asym_att_x:
"""For forward direction. """
# first index on the right hand side a the difficult splice
# Deal with connector outside of fiber
if transient_asym_att_xi >= x_sec[-1]:
ix_sec_ta_ix0 = nx
elif transient_asym_att_xi <= x_sec[0]:
ix_sec_ta_ix0 = 0
else:
ix_sec_ta_ix0 = np.flatnonzero(
x_sec >= transient_asym_att_xi)[0]
# Data is -1 for both forward and backward
# I_fw = 1/Tref*gamma - D_fw - E - TA_fw. Eq40
data_ta_fw = -np.ones(nt * (nx - ix_sec_ta_ix0), dtype=float)
# skip ix_sec_ta_ix0 locations, because they are upstream of
# the connector.
coord_ta_fw_row = np.arange(nt * ix_sec_ta_ix0, nt * nx, dtype=int)
# nt parameters
coord_ta_fw_col = np.tile(
| np.arange(nt, dtype=int) | numpy.arange |
import rematbal.matbal as mb
import numpy as np
from scipy.optimize import fsolve
import pandas as pd
def mbal_inner_calc(dict, P, Pres_calc, We, aquifer_pres, step):
Np, Wp, Gp, N, Wei, pvt_oil_pressure, pvt_oil_Bo, pvt_oil_Bg, pvt_oil_Rs, Rsb, \
Bti, Bgi, Pi, m, Boi, cw, Swi, cf, Rsi, Bw, Winj, Bwinj, Ginj, J, \
ts, VEH_aq_type, td_array, VEH_dp_array, r, rr, aq_type = mbal_setup(dict)
Bo = np.interp(P, pvt_oil_pressure, pvt_oil_Bo)
Bg = np.interp(P, pvt_oil_pressure, pvt_oil_Bg)
Bginj = Bg
Rs = np.interp(P, pvt_oil_pressure, pvt_oil_Rs)
Bt = mb.formation_total_volume_factor(Bo, Bg, Rsb, Rs)
Eo = mb.dissolved_oil_and_gas_expansion(Bt, Bti)
Eg = mb.gas_cap_expansion(Bti, Bg, Bgi)
dP = Pi - P
Efw = mb.pore_volume_reduction_connate_water_expansion(m, Boi, cw, Swi, cf, dP)
Npx = Np[step]
Wpx = Wp[step]
Gpx = Gp[step]
Winjx = Winj[step]
Ginjx = Ginj[step]
F, produced_oil_and_gas, produced_water, injected_gas, injected_water = mb.production_injection_balance(Npx, Bt, Rs,
Rsi, Bg,
Wpx,
Bw, Winjx,
Bwinj, Ginjx,
Bginj, Gpx)
if aq_type == 'VEH':
Wex, VEH_avg_pressure, VEH_dp_array = mb.VEH_aquifer_influx(VEH_aq_type, step, ts, td_array, VEH_dp_array,
r, rr, P, Pi, VEH_avg_pressure)
if aq_type == 'Fetkovich':
Wex, aq_pres = aquifer_influx(step, P, Wei, We, ts, Pres_calc, Pi, J, aquifer_pres)
aquifer_pres[step] = aq_pres
We[step] = Wex
return F, Eo, m, Eg, Efw, We, aquifer_pres, Bw, Bti, N
def obj_funtion2(P, *data):
dict = data[0]
Pres_calc = data[1]
We = data[2]
aq_pres = data[3]
step = data[4]
F, Eo, m, Eg, Efw, We, aq_pres, Bw, Bti, N = mbal_inner_calc(dict, P, Pres_calc, We, aq_pres, step)
Wex = We[step]
Ncalc = mb.oil_in_place(F, Eo, m, Eg, Efw, Wex, Bw, Bti)
of = (N - Ncalc)
return of
def pressure_calculation(data, Pres_calc):
step = data[4]
x0 = Pres_calc[step - 1] - 10.0
res = fsolve(obj_funtion2, x0, args=data)
return res
def aquifer_influx(step, P, Wei, We, ts, Pres_calc, Pi, J, aquifer_pres):
We_prev = We[step - 1]
ts_prev = ts[step - 1]
tsx = ts[step]
avg_pres = (Pres_calc[step - 1] + P) / 2
aq_pres = aquifer_pressure(step, Wei, We, aquifer_pres, Pi)
# print(step,aq_pres)
Wex = (Wei / Pi) * (aq_pres - avg_pres) * (1 - np.exp(-J * Pi * (tsx - ts_prev) / Wei))
Wex = We_prev + Wex
return Wex, aq_pres
def aquifer_pressure(step, Wei, We, aquifer_pres, Pi):
We_prev = We[step - 1]
if step == 1:
aq_pres = Pi
else:
aq_pres = Pi * (1 - We_prev / (Wei))
return aq_pres
def mbal_setup(dict):
df_prod = dict['df_prod']
dict_tank = dict['dict_tank']
dict_pvtmaster = dict['dict_pvtmaster']
df_pvt_gas = dict['df_pvt_gas']
df_pvt_oil = dict['df_pvt_oil']
dates = df_prod['datestamp']
ts = pd.to_numeric(dates - dates.min()) / 864e11
Np = df_prod['np']
Gp = df_prod['gp']
Wp = df_prod['wp']
N = float(dict_tank['initial_inplace'])
Swi = float(dict_tank['swi'])
cw = float(dict_tank['cw'])
cf = float(dict_tank['cf'])
m = float(dict_tank['initial_gascap'])
Winj = df_prod['wi']
Winj = Winj.fillna(0)
Ginj = df_prod['gi']
Ginj = Ginj.fillna(0)
#####General PVT
Rsi = dict_pvtmaster['gor'] # scf/stb
try:
aq_type = dict_tank['aq_type']
except:
aq_type = "Fetkovich"
VEH_aq_type = ""
r = ""
rr = ""
td_array = ""
VEH_dp_array = ""
if aq_type == 'Fetkovich':
Wei = float(dict_tank['wei'])
J = float(dict_tank['J'])
if aq_type == 'VEH':
VEH_dp_array = [None] * len(Np)
VEH_aq_type = dict_tank['VEH_aq_type']
r = dict_tank['r']
rr = dict_tank['rr']
td_array = mb.VEH_td(VEH_aq_type, dict_tank['k'], ts, dict_tank['poro'], dict_tank['visc'], dict_tank['ct'], rr,
dict_tank['La'])
else:
Wei = float(dict_tank['wei'])
J = float(dict_tank['J'])
Pi = float(dict_tank['initial_pressure'])
Boi = dict_tank['Boi']
Bgi = dict_tank['Bgi']
Rsb = dict_pvtmaster['gor']
Bti = mb.formation_total_volume_factor(Boi, Bgi, Rsb, Rsi)
#####Water PVT
Bw = 1.0 # dict_tank['Bw']
Bwinj = 1.0
#####Oil PVT
pvt_oil_pressure = df_pvt_oil['pressure']
pvt_oil_Bo = df_pvt_oil['oil_fvf']
pvt_oil_Rs = df_pvt_oil['solution_gas']
#####Gas PVT
pvt_gas_pressure = df_pvt_gas['pressure']
pvt_gas_Bg = df_pvt_gas['gas_fvf']
pvt_gas_Bg = pvt_gas_Bg / 1000
arr = np.array(pvt_oil_pressure)
interpol = lambda P: | np.interp(P, pvt_gas_pressure, pvt_gas_Bg) | numpy.interp |
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Dict, List, Tuple
import gym
import numpy as np
import torch as th
from torch import nn
from bisk import BiskSingleRobotEnv
from bisk.features import make_featurizer
from hucc.envs.goal_spaces import g_goal_spaces, g_delta_feats
log = logging.getLogger(__name__)
class CtrlgsPreTrainingEnv(BiskSingleRobotEnv):
'''
A multi-task, goal-based pre-training environment.
The environment is "empty" except for a single robot that can be controlled.
The "tasks" consider the control of one or more observed features -- those
will be sampled according to `feature_dist` (which can also be changed after
constructing the environment). For each task (combination of features), a
goal space is constructed using `psi` and `offset`, and goals are sampled in
this goal space in [-1,1].
A continual version of this environment can be obtained with a
`hard_reset_interval` of > 1. This parameter specifices the frequency at
which the simulation is reset to its initial state. Other resets will simply
result in a new goal to be sampled.
'''
def __init__(
self,
robot: str,
features: str,
feature_dist: Dict[str, float],
task_map: Dict[str, int],
precision: float = 0.1,
idle_steps: int = 0,
max_steps: int = 20,
backproject_goal: bool = True,
reward: str = 'potential',
hard_reset_interval: int = 1,
reset_p: float = 0.0,
resample_features: str = 'hard',
full_episodes: bool = False,
allow_fallover: bool = False,
fallover_penalty: float = -1.0,
implicit_soft_resets: bool = False,
goal_sampling: str = 'random',
ctrl_cost: float = 0.0,
normalize_gs_observation: bool = False,
zero_twist_goals: bool = False,
relative_frame_of_reference: bool = False,
):
# XXX hack to have DMC robots operate with their "native" sensor input
super().__init__(
robot=robot,
features='joints'
if features not in ('sensorsnoc', 'native')
else features,
allow_fallover=allow_fallover,
)
self.goal_featurizer = make_featurizer(
features, self.p, self.robot, 'robot'
)
gsdim = self.goal_featurizer.observation_space.shape[0]
self.goal_space = g_goal_spaces[features][robot]
# Construct goal space
self.psi, self.offset = self.abstraction_matrix(robot, features, gsdim)
self.psi_1 = np.linalg.inv(self.psi)
self.offset_1 = -np.matmul(self.offset, self.psi_1)
assert len(self.observation_space.shape) == 1
assert self.psi.shape == (gsdim, gsdim)
assert self.offset.shape == (gsdim,)
self.precision = precision
self.idle_steps = idle_steps
self.max_steps = max_steps
self.backproject_goal = backproject_goal
self.reward = reward
self.hard_reset_interval = hard_reset_interval
self.reset_p = reset_p
self.resample_features = resample_features
self.full_episodes = full_episodes
self.fallover_penalty = fallover_penalty
self.ctrl_cost = ctrl_cost
self.implicit_soft_resets = implicit_soft_resets
self.goal_sampling = goal_sampling
self.normalize_gs_observation = normalize_gs_observation
self.zero_twist_goals = zero_twist_goals
self.relative_frame_of_reference = relative_frame_of_reference
self.task_idx = [0] * len(task_map)
for k, v in task_map.items():
self.task_idx[v] = int(k)
if len(self.goal_space['twist_feats']) > 0:
negpi = self.proj(
-np.pi * np.ones(gsdim), self.goal_space['twist_feats']
)
pospi = self.proj(
np.pi * np.ones(gsdim), self.goal_space['twist_feats']
)
if not np.allclose(-negpi, pospi):
# This could be supported by more elobarte delta computation
# logic in step()
raise ValueError('Twist feature ranges not symmetric')
self.proj_pi = pospi
if backproject_goal:
all_feats = list(range(gsdim))
gmin_back = self.backproj(-np.ones(gsdim), all_feats)
gmax_back = self.backproj(np.ones(gsdim), all_feats)
goal_space = gym.spaces.Box(gmin_back, gmax_back)
else:
max_features = max(
(
len(f.replace('+', ',').split(','))
for f in feature_dist.keys()
)
)
goal_space = gym.spaces.Box(
low=-2, high=2, shape=(max_features,), dtype=np.float32
)
self.task_map = {int(k): v for k, v in task_map.items()}
# Hide position-related invariant features from the observation, i.e.
# X/Y or ant X for cheetah
delta_feats = g_delta_feats[robot]
self.obs_mask = list(range(self.observation_space.shape[0]))
for d in delta_feats:
self.obs_mask.remove(d)
self.observation_space = gym.spaces.Dict(
{
'observation': gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(len(self.obs_mask),),
dtype=np.float32,
),
'desired_goal': goal_space,
'task': gym.spaces.Box(
low=0, high=1, shape=(len(self.task_map),), dtype=np.float32
),
'gs_observation': self.goal_featurizer.observation_space,
}
)
self._do_hard_reset = True
self._reset_counter = 0
self.set_feature_dist(feature_dist)
# Current features
self._features: List[int] = []
self._features_s = ''
self._feature_mask = np.zeros(len(self.task_map))
self.model = None
self.gamma = 1.0
def set_goal_dims(self, dims):
self.set_feature_dist(dims)
def set_model(self, model: nn.Module, gamma: float):
self.model = model
self.gamma = gamma
def set_feature_dist(self, feature_dist: Dict[str, float]):
# Deduplicate features from combinations
fdist: Dict[str, float] = {}
self._feature_strings = {}
for fs, p in feature_dist.items():
ufeats = []
for f in fs.replace('+', ',').split(','):
if not f in ufeats:
ufeats.append(f)
fdist[','.join(ufeats)] = p
self._feature_strings[','.join(ufeats)] = fs.replace('+', ',')
if not self.backproject_goal:
# Check that maximum number of features doesn't change
max_features = max((len(fs.split(',')) for fs in fdist.keys()))
assert (
self.observation_space['desired_goal'].shape[0] == max_features
)
for fs in fdist.keys():
for fi in map(int, fs.split(',')):
assert fi in self.task_map
self._feature_dist_v = [k for k, v in fdist.items()]
s = sum([v for k, v in fdist.items()])
self._feature_dist_p = [v / s for k, v in fdist.items()]
def proj(self, obs: np.ndarray, feats: List[int]) -> np.ndarray:
return np.matmul(obs, self.psi[feats].T) + self.offset[feats]
def backproj(self, obs_w: np.ndarray, feats: List[int]) -> np.ndarray:
s_p = np.matmul(obs_w, self.psi_1[feats]) + self.offset_1
return s_p[self.task_idx]
def seed(self, seed=None):
self._do_hard_reset = True
return super().seed(seed)
def get_observation(self):
obs = super().get_observation()[self.obs_mask]
gs_obs = self.goal_featurizer()
if self.backproject_goal:
s = gs_obs[self.task_idx]
bpg = self.backproj(self.goal, self._features)
g = bpg - s
if len(self.goal_space['twist_feats']) > 0:
twf = [self.task_map[f] for f in self.goal_space['twist_feats']]
g[twf] = (
np.remainder((bpg[twf] - s[twf]) + np.pi, 2 * np.pi) - np.pi
)
g *= self._feature_mask
else:
if len(self.goal_space['twist_feats']) > 0:
raise NotImplementedError()
gs = self.proj(gs_obs, self._features)
g = np.zeros(self.observation_space['desired_goal'].shape)
g[0 : len(self.goal)] = self.goal - gs
if self.normalize_gs_observation:
# XXX if the goal space is defined for fewer features than
# gs_observation, this will be yield bogus values for undefined
# ones.
gs_obs = self.proj(gs_obs, np.arange(0, len(gs_obs)))
return {
'observation': obs,
'desired_goal': g,
'task': self._feature_mask,
'gs_observation': gs_obs,
}
def hard_reset(self):
# Disable contacts during reset to prevent potentially large contact
# forces that can be applied during initial positioning of bodies in
# reset_state().
with self.p.model.disable('contact'):
self.p.reset()
self.reset_state()
for _ in range(self.idle_steps):
self.p.set_control(np.zeros_like(self.p.data.ctrl))
self.step_simulation()
if self.idle_steps <= 0:
self.step_simulation()
def sample_features(self) -> List[int]:
fs = self.np_random.choice(
self._feature_dist_v, 1, p=self._feature_dist_p
)[0]
return list(map(int, fs.split(',')))
def sample_goals_random(self, N: int = 1) -> np.ndarray:
gstate = self.proj(self.goal_featurizer(), self._features)
goal = self.np_random.uniform(
low=-1.0, high=1.0, size=(N, len(self._features))
)
# For delta features we offset the goal by the current state to get
# meaningful deltas afterwards
for i, f in enumerate(self._features):
if f in self.goal_space['delta_feats']:
goal[:, i] += gstate[i]
if self.zero_twist_goals and f in self.goal_space['twist_feats']:
goal[:, i] = 0
return goal
def sample_goal_using_r(self) -> np.ndarray:
N = 128
cand = self.sample_goals_random(N=N)
if self.backproject_goal:
s = self.goal_featurizer()[self.task_idx]
gb = (np.matmul(cand, self.psi_1[self._features]) + self.offset_1)[
:, self.task_idx
]
g = gb - s
g *= self._feature_mask
else:
gs = self.proj(self.goal_featurizer(), self._features)
g = np.zeros((N, self.observation_space['desired_goal'].shape[0]))
g[:, 0 : len(self._features)] = cand - gs
obs = super().get_observation()[self.obs_mask]
inp = {
'observation': th.tensor(obs, dtype=th.float32)
.unsqueeze(0)
.expand(N, obs.shape[0]),
'desired_goal': th.tensor(g, dtype=th.float32),
'task': th.tensor(self._feature_mask, dtype=th.float32)
.unsqueeze(0)
.expand(N, self._feature_mask.shape[0]),
}
with th.no_grad():
action = self.model.pi(inp).mean
inp['action'] = action
with th.no_grad():
r = self.model.reachability(inp).clamp(0, 1)
if self.goal_sampling in {'r2', 'reachability2'}:
# Favor samples reachable with 50% probability
dist = th.tanh(2 * (1 - th.abs(r * 2 - 1) + 1e-1))
else:
# Favor unreachable samples
dist = 1 / (r.view(-1) + 0.1)
return cand[th.multinomial(dist, 1).item()]
def sample_goal_using_q(self, obs: np.ndarray) -> np.ndarray:
N = 128
cand = self.sample_goals_random(N=N)
if self.backproject_goal:
s = self.goal_featurizer()[self.task_idx]
gb = (np.matmul(cand, self.psi_1[self._features]) + self.offset_1)[
:, self.task_idx
]
g = gb - s
g *= self._feature_mask
else:
gs = self.proj(self.goal_featurizer(), self._features)
g = np.zeros((N, self.observation_space['desired_goal'].shape[0]))
g[:, 0 : len(self._features)] = cand - gs
obs = super().get_observation()[self.obs_mask]
inp = {
'observation': th.tensor(obs, dtype=th.float32)
.unsqueeze(0)
.expand(N, obs.shape[0]),
'desired_goal': th.tensor(g, dtype=th.float32),
'task': th.tensor(self._feature_mask, dtype=th.float32)
.unsqueeze(0)
.expand(N, self._feature_mask.shape[0]),
}
with th.no_grad():
action = self.model.pi(inp).mean
inp['action'] = action
with th.no_grad():
q = th.min(self.model.q(inp), dim=-1).values
ctrl_cost = (
self.max_steps
* self.ctrl_cost
* (0.25 * self.action_space.shape[0])
)
wobs = self.proj(obs, self._features)
dist = np.linalg.norm(cand - wobs, ord=2, axis=1)
min_ret = (dist - ctrl_cost) * self.gamma ** self.max_steps
slack = q - min_ret
dist = 1 / (slack - slack.min() + 1)
return cand[th.multinomial(dist, 1).item()]
def reset(self):
need_hard_reset = self._do_hard_reset or (
self.hard_reset_interval > 0
and self._reset_counter % self.hard_reset_interval == 0
)
# Reset
if need_hard_reset:
self.hard_reset()
self._reset_counter = 0
if self.relative_frame_of_reference:
self.goal_featurizer.set_frame_of_reference()
# Sample features and goal
resample_features = False
if need_hard_reset:
resample_features = True
if self.resample_features == 'soft':
resample_features = True
elif self.resample_features.startswith('soft'):
freq = int(self.resample_features[4:])
resample_features = self._reset_counter % freq == 0
if resample_features:
self._features = self.sample_features()
self._features_s = self._feature_strings[
','.join(map(str, self._features))
]
self._feature_mask *= 0
for f in self._features:
self._feature_mask[self.task_map[f]] = 1.0
self.goal = self.sample_goals_random()[0]
if self.goal_sampling in {'q', 'q_value'}:
if self.model:
self.goal = self.sample_goal_using_q()
elif self.goal_sampling in {'r', 'reachability', 'r2', 'reachability2'}:
if self.model:
self.goal = self.sample_goal_using_r()
elif self.goal_sampling not in {'random', 'uniform'}:
raise ValueError(
f'Unknown goal sampling method "{self.goal_sampling}"'
)
def distance_to_goal():
gs = self.proj(self.goal_featurizer(), self._features)
d = self.goal - gs
for i, f in enumerate(self._features):
if f in self.goal_space['twist_feats']:
# Wrap around projected pi/-pi for distance
d[i] = (
np.remainder(
(self.goal[i] - gs[i]) + self.proj_pi,
2 * self.proj_pi,
)
- self.proj_pi
)
return np.linalg.norm(d, ord=2)
self._d_initial = distance_to_goal()
self._do_hard_reset = False
self._reset_counter += 1
self._step = 0
return self.get_observation()
def step(self, action):
def distance_to_goal():
gs = self.proj(self.goal_featurizer(), self._features)
d = self.goal - gs
for i, f in enumerate(self._features):
if f in self.goal_space['twist_feats']:
# Wrap around projected pi/-pi for distance
d[i] = (
np.remainder(
(self.goal[i] - gs[i]) + self.proj_pi,
2 * self.proj_pi,
)
- self.proj_pi
)
return np.linalg.norm(d, ord=2)
d_prev = distance_to_goal()
next_obs, reward, done, info = super().step(action)
d_new = distance_to_goal()
info['potential'] = d_prev - d_new
info['distance'] = d_new
info['reached_goal'] = info['distance'] < self.precision
if self.reward == 'potential':
reward = info['potential']
elif self.reward == 'potential2':
reward = d_prev - self.gamma * d_new
elif self.reward == 'potential3':
reward = 1.0 if info['reached_goal'] else 0.0
reward += d_prev - self.gamma * d_new
elif self.reward == 'potential4':
reward = (d_prev - d_new) / self._d_initial
elif self.reward == 'distance':
reward = -info['distance']
elif self.reward == 'sparse':
reward = 1.0 if info['reached_goal'] else 0.0
else:
raise ValueError(f'Unknown reward: {self.reward}')
reward -= self.ctrl_cost * np.square(action).sum()
info['EpisodeContinues'] = True
if info['reached_goal'] == True and not self.full_episodes:
done = True
info['time'] = self._step
self._step += 1
if self._step >= self.max_steps:
done = True
elif (
not info['reached_goal'] and self.np_random.random() < self.reset_p
):
info['RandomReset'] = True
done = True
if not self.allow_fallover and self.fell_over():
reward = self.fallover_penalty
done = True
self._do_hard_reset = True
info['reached_goal'] = False
info['fell_over'] = True
if done and (
self._do_hard_reset
or (self._reset_counter % self.hard_reset_interval == 0)
):
del info['EpisodeContinues']
if done:
info['LastStepOfTask'] = True
if done and 'EpisodeContinues' in info and self.implicit_soft_resets:
need_hard_reset = self._do_hard_reset or (
self.hard_reset_interval > 0
and self._reset_counter % self.hard_reset_interval == 0
)
if not need_hard_reset:
# Do implicit resets, let episode continue
next_obs = self.reset()
done = False
del info['EpisodeContinues']
info['SoftReset'] = True
info['features'] = self._features_s
return next_obs, reward, done, info
@staticmethod
def feature_controllable(robot: str, features: str, dim: int) -> bool:
if not features in g_goal_spaces:
raise ValueError(f'Unsupported feature space: {robot}')
if not robot in g_goal_spaces[features]:
raise ValueError(f'Unsupported robot: {robot}')
gs = g_goal_spaces[features][robot]
if dim < 0:
raise ValueError(f'Feature {dim} out of range')
if dim >= len(gs['min']):
return False
# Return whether feature is controllable, i.e. range is non-zero
return gs['min'][dim] != gs['max'][dim]
@staticmethod
def abstraction_matrix(
robot: str, features: str, sdim: int
) -> Tuple[np.array, np.array]:
if not features in g_goal_spaces:
raise ValueError(f'Unsupported feature space: {robot}')
if not robot in g_goal_spaces[features]:
raise ValueError(f'Unsupported robot: {robot}')
gs = g_goal_spaces[features][robot]
gmin = np.array(gs['min'])
gmax = np.array(gs['max'])
if gmin.size == 0:
# Dummy values
gmin = - | np.ones(sdim) | numpy.ones |
import os
import torch
import numpy as np
from src.crowd_count import CrowdCounter
from src import network
from src.data_loader import ImageDataLoader
from src import utils
import cv2
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
vis = True
save_output = False
image_path = 'othertest/test.1.jpg'
model_path = 'saved_models/mcnn_SH_B_95.h5'
# output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
# file_results = os.path.join(output_dir,'results_' + model_name + '_.txt')
# if not os.path.exists(output_dir):
# os.mkdir(output_dir)
# output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
# if not os.path.exists(output_dir):
# os.mkdir(output_dir)
net = CrowdCounter()
trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
# net.cuda()
# net.eval()
mae = 0.0
mse = 0.0
#load test data
# data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=True)
img = cv2.imread(image_path, 0)
img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
ht_1 = int((ht/4)*4)
wd_1 = int((wd/4)*4)
img = cv2.resize(img, (int(wd_1), int(ht_1)))
img = img.reshape((1, 1, img.shape[0], img.shape[1]))
density_map = net(img)
density_map = density_map.data.cpu().numpy()
et_count = | np.sum(density_map) | numpy.sum |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import warnings
from itertools import combinations_with_replacement as combinations_w_r
from distutils.version import LooseVersion
import numpy as np
from scipy import sparse
from scipy import stats
from scipy import optimize
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import boxcox, nanpercentile, nanmedian
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
from ._encoders import OneHotEncoder
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'PowerTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
'power_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray or None, shape (n_features,)
Per feature relative scaling of the data. Equal to ``None`` when
``with_std=False``.
.. versionadded:: 0.17
*scale_*
mean_ : ndarray or None, shape (n_features,)
The mean value for each feature in the training set.
Equal to ``None`` when ``with_mean=False``.
var_ : ndarray or None, shape (n_features,)
The variance for each feature in the training set. Used to compute
`scale_`. Equal to ``None`` when ``with_std=False``.
n_samples_seen_ : int or array, shape (n_features,)
The number of samples processed by the estimator for each feature.
If there are not missing samples, the ``n_samples_seen`` will be an
integer, otherwise it will be an array.
Will be reset on new calls to fit, but increments across
``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, <NAME>., <NAME>, and <NAME>. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
# if n_samples_seen_ is an integer (i.e. no missing values), we need to
# transform it to a NumPy array of shape (n_features,) required by
# incr_mean_variance_axis and _incremental_variance_axis
if (hasattr(self, 'n_samples_seen_') and
isinstance(self.n_samples_seen_, (int, np.integer))):
self.n_samples_seen_ = np.repeat(self.n_samples_seen_,
X.shape[1]).astype(np.int64)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
sparse_constructor = (sparse.csr_matrix
if X.format == 'csr' else sparse.csc_matrix)
counts_nan = sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr),
shape=X.shape).sum(axis=0).A.ravel()
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = (X.shape[0] -
counts_nan).astype(np.int64)
if self.with_std:
# First pass
if not hasattr(self, 'scale_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
if hasattr(self, 'scale_'):
self.n_samples_seen_ += X.shape[0] - counts_nan
else:
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(X.shape[1], dtype=np.int64)
# First pass
if not hasattr(self, 'scale_'):
self.mean_ = .0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler(copy=True)
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature by
computing the relevant statistics on the samples in the training
set. Median and interquartile range are then stored to be used on
later data using the ``transform`` method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
Examples
--------
>>> from sklearn.preprocessing import RobustScaler
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> transformer = RobustScaler().fit(X)
>>> transformer
RobustScaler(copy=True, quantile_range=(25.0, 75.0), with_centering=True,
with_scaling=True)
>>> transformer.transform(X)
array([[ 0. , -2. , 0. ],
[-1. , 0. , 0.4],
[ 1. , 0. , -1.6]])
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
# at fit, convert sparse matrices to csc for optimized computation of
# the quantiles
X = check_array(X, accept_sparse='csc', copy=self.copy, estimator=self,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
if self.with_centering:
if sparse.issparse(X):
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
self.center_ = nanmedian(X, axis=0)
else:
self.center_ = None
if self.with_scaling:
quantiles = []
for feature_idx in range(X.shape[1]):
if sparse.issparse(X):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
else:
column_data = X[:, feature_idx]
quantiles.append(nanpercentile(column_data,
self.quantile_range))
quantiles = np.transpose(quantiles)
self.scale_ = quantiles[1] - quantiles[0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
else:
self.scale_ = None
return self
def transform(self, X):
"""Center and scale the data.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
check_is_fitted(self, 'center_', 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
check_is_fitted(self, 'center_', 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X, accept_sparse=True).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
The data to transform, row by row.
Sparse input should preferably be in CSC format.
Returns
-------
XP : np.ndarray or CSC sparse matrix, shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES, accept_sparse='csc')
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
if sparse.isspmatrix(X):
columns = []
for comb in combinations:
if comb:
out_col = 1
for col_idx in comb:
out_col = X[:, col_idx].multiply(out_col)
columns.append(out_col)
else:
columns.append(sparse.csc_matrix(np.ones((X.shape[0], 1))))
XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
else:
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
for i, comb in enumerate(combinations):
XP[:, i] = X[:, comb].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Normalizer
>>> X = [[4, 1, 2, 2],
... [1, 3, 9, 3],
... [5, 7, 5, 1]]
>>> transformer = Normalizer().fit(X) # fit does nothing.
>>> transformer
Normalizer(copy=True, norm='l2')
>>> transformer.transform(X)
array([[0.8, 0.2, 0.4, 0.4],
[0.1, 0.3, 0.9, 0.3],
[0.5, 0.7, 0.5, 0.1]])
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Binarizer
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = Binarizer().fit(X) # fit does nothing.
>>> transformer
Binarizer(copy=True, threshold=0.0)
>>> transformer.transform(X)
array([[1., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
Examples
--------
>>> from sklearn.preprocessing import KernelCenterer
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> K = pairwise_kernels(X, metric='linear')
>>> K
array([[ 9., 2., -2.],
[ 2., 14., -13.],
[ -2., -13., 21.]])
>>> transformer = KernelCenterer().fit(K)
>>> transformer
KernelCenterer()
>>> transformer.transform(K)
array([[ 5., 0., -5.],
[ 0., 14., -14.],
[ -5., -14., 19.]])
"""
def __init__(self):
# Needed for backported inspect.signature compatibility with PyPy
pass
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(nanpercentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = .5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col[isfinite_mask] = np.interp(X_col_finite,
self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if (not accept_sparse_negative and not self.ignore_implicit_zeros
and (sparse.issparse(X) and | np.any(X.data < 0) | numpy.any |
from bokeh.plotting import show, figure, gridplot
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource, Label, Range1d
from bokeh.io import show, output_notebook, push_notebook
from bokeh.models.glyphs import Line, Image
from bokeh.models.mappers import LinearColorMapper
import numpy as np
output_notebook()
class NeuronView():
def __init__(self,
input_tokens=None,
keys=None,
queries=None,
layers=None,
step=0,
head=0,
n_tokens=20,
layer_names=None):
self.layers = layers
self.head = head
self.step = step
self.query = 0
self.p = None
self.input_tokens = input_tokens[:n_tokens]
self.n_tokens = n_tokens
self.keys = keys
self.queries = queries
self.layer_names = layer_names
self.key_name = layers[0][0]
self.query_name = layers[0][1]
self.source_key = None
self.source_query = None
self.product = None
self.create()
def update(self):
key = self.keys[self.key_name][self.step][0, self.head, :, :]
self.source_key.data["image"] = [key[:self.n_tokens,:]]
query = self.queries[self.query_name][self.step][0, self.head, :self.n_tokens, :]
query_input = np.zeros((self.n_tokens,query.shape[1]))
query_input[:,:] = np.nan
query_input[self.query,:] = query[self.n_tokens - self.query - 1, :]
self.source_query.data["image"] = [query_input]
product = | np.multiply(query[self.n_tokens - self.query - 1, :], key) | numpy.multiply |
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
ndarray, sqrt, nextafter, stack, errstate
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
)
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
class PhysicalQuantity2(ndarray):
__array_priority__ = 10
class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_start_stop_array(self):
start = array([0., 1.])
stop = array([6., 7.])
t1 = logspace(start, stop, 6)
t2 = stack([logspace(_start, _stop, 6)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = logspace(start, stop[0], 6)
t4 = stack([logspace(_start, stop[0], 6)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = logspace(start, stop, 6, axis=-1)
assert_equal(t5, t2.T)
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_boundaries_match_start_and_stop_exactly(self):
# make sure that the boundaries of the returned array exactly
# equal 'start' and 'stop' - this isn't obvious because
# np.exp(np.log(x)) isn't necessarily exactly equal to x
start = 0.3
stop = 20.3
y = geomspace(start, stop, num=1)
assert_equal(y[0], start)
y = geomspace(start, stop, num=1, endpoint=False)
assert_equal(y[0], start)
y = geomspace(start, stop, num=3)
assert_equal(y[0], start)
assert_equal(y[-1], stop)
y = geomspace(start, stop, num=3, endpoint=False)
assert_equal(y[0], start)
def test_nan_interior(self):
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:-1]).all())
assert_equal(y[3], 3.0)
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4, endpoint=False)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:]).all())
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1+1j, 1000+1000j, num=4)
assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
y = geomspace(-1+1j, -1000+1000j, num=4)
assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(0+3j, 3+0j, 3)
assert_allclose(y, [0+3j, 3/sqrt(2)+3j/ | sqrt(2) | numpy.sqrt |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `univariate.gaussian_kde` module."""
from unittest import TestCase
from unittest.mock import MagicMock, patch
import numpy as np
from scipy.stats import gaussian_kde
from copulas.univariate.gaussian_kde import GaussianKDE
class TestGaussianKDE(TestCase):
def test__get_model_no_sample_size(self):
self = MagicMock()
self._sample_size = None
self._params = {
'dataset': np.array([1, 2, 3, 4, 5])
}
self.bw_method = None
self.weights = None
model = GaussianKDE._get_model(self)
assert isinstance(model, gaussian_kde)
assert self._sample_size == 5
np.testing.assert_allclose(model.dataset, np.array([[1, 2, 3, 4, 5]]))
def test__get_model_sample_size(self):
self = MagicMock()
self._sample_size = 3
self._params = {
'dataset': np.array([1, 2, 3, 4, 5])
}
self.bw_method = None
self.weights = None
model = GaussianKDE._get_model(self)
assert isinstance(model, gaussian_kde)
assert self._sample_size == 3
np.testing.assert_allclose(model.dataset, np.array([[1, 2, 3, 4, 5]]))
def test__get_bounds(self):
self = MagicMock()
self._params = {
'dataset': np.array([1, 2, 3, 4, 5])
}
lower, upper = GaussianKDE._get_bounds(self)
k = 5 * np.std([1, 2, 3, 4, 5])
assert lower == 1 - k
assert upper == 5 + k
def test__fit_constant(self):
distribution = GaussianKDE()
distribution._fit_constant(np.array([1, 1, 1, 1]))
assert distribution._params == {
'dataset': [1, 1, 1, 1],
}
def test__fit_constant_sample_size(self):
distribution = GaussianKDE(sample_size=3)
distribution._fit_constant(np.array([1, 1, 1, 1]))
assert distribution._params == {
'dataset': [1, 1, 1],
}
def test__fit(self):
distribution = GaussianKDE()
distribution._fit(np.array([1, 2, 3, 4]))
assert distribution._params == {
'dataset': [1, 2, 3, 4],
}
def test__fit_sample_size(self):
distribution = GaussianKDE(sample_size=3)
distribution._fit(np.array([1, 2, 3, 4]))
assert len(distribution._params['dataset']) == 1
assert len(distribution._params['dataset'][0]) == 3
def test__is_constant_true(self):
distribution = GaussianKDE()
distribution.fit(np.array([1, 1, 1, 1]))
assert distribution._is_constant()
def test__is_constant_false(self):
distribution = GaussianKDE()
distribution.fit(np.array([1, 2, 3, 4]))
assert not distribution._is_constant()
@patch('copulas.univariate.gaussian_kde.scalarize', autospec=True)
@patch('copulas.univariate.gaussian_kde.partial', autospec=True)
def test__brentq_cdf(self, partial_mock, scalarize_mock):
"""_brentq_cdf returns a function that computes the cdf of a scalar minus its argument."""
# Setup
instance = GaussianKDE()
def mock_partial_return_value(x):
return x
scalarize_mock.return_value = 'scalar_function'
partial_mock.return_value = mock_partial_return_value
# Run
result = instance._brentq_cdf(0.5)
# Check
assert callable(result)
# result uses the return_value of partial_mock, so every value returned
# is (x - 0.5)
assert result(1.0) == 0.5
assert result(0.5) == 0
assert result(0.0) == -0.5
scalarize_mock.assert_called_once_with(GaussianKDE.cumulative_distribution)
partial_mock.assert_called_once_with('scalar_function', instance)
def test_cumulative_distribution(self):
"""cumulative_distribution evaluates with the model."""
instance = GaussianKDE()
instance.fit(np.array([0.9, 1.0, 1.1]))
cdf = instance.cumulative_distribution(np.array([
0.0, # There is no data below this (cdf = 0.0).
1.0, # Half the data is below this (cdf = 0.5).
2.0, # All the data is below this (cdf = 1.0).
-1.0 # There is no data below this (cdf = 0).
]))
assert np.all(np.isclose(cdf, np.array([0.0, 0.5, 1.0, 0.0]), atol=1e-3))
def test_percent_point(self):
"""percent_point evaluates with the model."""
instance = GaussianKDE()
instance.fit(np.array([0.5, 1.0, 1.5]))
cdf = instance.percent_point(np.array([0.001, 0.5, 0.999]))
assert cdf[0] < 0.0, "The 0.001th percentile should be small."
assert abs(cdf[1] - 1.0) < 0.1, "The 50% percentile should be the median."
assert cdf[2] > 2.0, "The 0.999th percentile should be large."
def test_percent_point_invalid_value(self):
"""Evaluating an invalid value will raise ValueError."""
fit_data = np.array([1, 2, 3, 4, 5])
instance = GaussianKDE()
instance.fit(fit_data)
with self.assertRaises(ValueError):
instance.percent_point(np.array([2.]))
@patch('copulas.univariate.gaussian_kde.gaussian_kde', autospec=True)
def test_sample(self, kde_mock):
"""Sample calls the gaussian_kde.resample method."""
instance = GaussianKDE()
instance.fit(np.array([1, 2, 3, 4]))
model = kde_mock.return_value
model.resample.return_value = np.array([[1, 2, 3]])
samples = instance.sample(3)
instance._model.resample.assert_called_once_with(3)
np.testing.assert_equal(samples, np.array([1, 2, 3]))
@patch('copulas.univariate.gaussian_kde.gaussian_kde', autospec=True)
def test_probability_density(self, kde_mock):
"""Sample calls the gaussian_kde.resample method."""
instance = GaussianKDE()
instance.fit( | np.array([1, 2, 3, 4]) | numpy.array |
import unittest
from functools import partial
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
## RELIEF ALGORITHM IMPLEMENTATION UNIT TESTS ##########
from algorithms.relief import Relief
class TestRelief(unittest.TestCase):
# Test initialization with default parameters.
def test_init_default(self):
relief = Relief()
self.assertEqual(relief.n_features_to_select, 10)
self.assertEqual(relief.m, -1)
self.assertNotEqual(relief.dist_func, None)
self.assertEqual(relief.learned_metric_func, None)
# Test initialization with explicit parameters.
def test_init_custom(self):
relief = Relief(n_features_to_select=15, m=80, dist_func=lambda x1, x2: np.sum(np.abs(x1-x2), 1), learned_metric_func = lambda x1, x2: np.sum(np.abs(x1-x2), 1))
self.assertEqual(relief.n_features_to_select, 15)
self.assertEqual(relief.m, 80)
self.assertNotEqual(relief.dist_func, None)
self.assertNotEqual(relief.learned_metric_func, None)
# Test update of feature weights.
def test_weights_update(self):
relief = Relief()
# Initialize parameter values.
data = np.array([[2.09525, 0.26961, 3.99627],
[9.86248, 6.22487, 8.77424],
[7.03015, 9.24269, 3.02136],
[8.95009, 8.52854, 0.16166],
[3.41438, 4.03548, 7.88157],
[2.01185, 0.84564, 6.16909],
[2.79316, 1.71541, 2.97578],
[3.22177, 0.16564, 5.79036],
[1.81406, 2.74643, 2.13259],
[4.77481, 8.01036, 7.57880]])
target = np.array([1, 2, 2, 2, 1, 1, 3, 3, 3, 1])
e = data[2, :] # The third example
closest_same = data[3, :] # Closest example from same class
closest_other = data[9, :] # Closest example from different class
weights = np.ones(data.shape[1]) # Current feature weights
m = data.shape[0] # Number of examples to sample
max_f_vals = np.max(data, 0) # Max value of each feature
min_f_vals = np.min(data, 0) # Min value of each feature
# Compute weights update
res = relief._update_weights(data, e, closest_same, closest_other, weights, m, max_f_vals, min_f_vals)
# Compare with results computed by hand.
correct_res = np.array([1.004167277552613, 1.0057086828870614, 1.01971232778099])
| assert_array_almost_equal(res, correct_res, decimal=5) | numpy.testing.assert_array_almost_equal |
'''Predict OCR images using keras model.
'''
import os
import itertools
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras.preprocessing import image
from scipy import ndimage
from PIL import Image, ImageDraw, ImageFont
# digit classes
# alphabet = u'0123456789 '
# English characters classes
alphabet = u'abcdefghijklmnopqrstuvwxyz '
OUT_DIR = r"E:\Users\Desktop\download\1"
# Reverse translation of numerical classes back to characters
def labels_to_text(labels):
ret = []
for c in labels:
if c == len(alphabet): # CTC Blank
ret.append("")
else:
ret.append(alphabet[c])
return "".join(ret)
# creates larger "blotches" of noise
def speckle(img):
severity = np.random.uniform(0, 0.6)
blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1)
img_speck = (img + blur)
img_speck[img_speck > 1] = 1
img_speck[img_speck <= 0] = 0
return img_speck
# paints text on canvas with random rotation and noise
def paint_text(text, w, h, rotate=True, ud=True, multi_fonts=True, save_path=None):
surface = Image.new('L', (w, h), 255) # "L" represents gray
border_w_h = (2, 2)
box_width = 0
box_height = 0
for font_size in range(32, 20, -1):
if multi_fonts:
fonts = ["simsun.ttc", "simhei.ttf", "msyh.ttc", "msyhbd.ttc", "msyhl.ttc",
"simfang.ttf", "simkai.ttf"]
font = ImageFont.truetype(np.random.choice(fonts), font_size)
else:
font = ImageFont.truetype("msyh.ttc", font_size)
box_width, box_height = font.getsize(text)
if box_width <= (w - 2 * border_w_h[0]) and box_height <= (h - border_w_h[1]):
break
elif font_size <= 21:
raise IOError(('Could not fit string into image.'
'Max char count is too large for given image width.'))
max_shift_x = w - box_width - border_w_h[0]
max_shift_y = h - box_height - border_w_h[1]
top_left_x = np.random.randint(border_w_h[0], int(max_shift_x) + 1)
if ud:
top_left_y = np.random.randint(0, int(max_shift_y) + 1)
else:
top_left_y = max_shift_y // 2
draw = ImageDraw.Draw(surface)
draw.text((top_left_x, top_left_y), text, fill=0, font=font)
a = np.array(surface, np.uint8)
a = a.astype(np.float32) / 255
a = | np.expand_dims(a, 0) | numpy.expand_dims |
from .. import util
from ..probabilities import pulsars, mass
from ..core.data import Observations, Model
import h5py
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import matplotlib.colors as mpl_clr
import astropy.visualization as astroviz
__all__ = ['ModelVisualizer', 'CIModelVisualizer', 'ObservationsVisualizer']
class _ClusterVisualizer:
_MARKERS = ('o', '^', 'D', '+', 'x', '*', 's', 'p', 'h', 'v', '1', '2')
# Default xaxis limits for all profiles. Set by inits, can be reset by user
rlims = None
# -----------------------------------------------------------------------
# Artist setups
# -----------------------------------------------------------------------
def _setup_artist(self, fig, ax, *, use_name=True):
'''setup a plot (figure and ax) with one single ax'''
if ax is None:
if fig is None:
# no figure or ax provided, make one here
fig, ax = plt.subplots()
else:
# Figure provided, no ax provided. Try to grab it from the fig
# if that doens't work, create it
cur_axes = fig.axes
if len(cur_axes) > 1:
raise ValueError(f"figure {fig} already has too many axes")
elif len(cur_axes) == 1:
ax = cur_axes[0]
else:
ax = fig.add_subplot()
else:
if fig is None:
# ax is provided, but no figure. Grab it's figure from it
fig = ax.get_figure()
if hasattr(self, 'name') and use_name:
fig.suptitle(self.name)
return fig, ax
def _setup_multi_artist(self, fig, shape, *, allow_blank=True,
use_name=True, constrained_layout=True,
subfig_kw=None, **sub_kw):
'''setup a subplot with multiple axes'''
if subfig_kw is None:
subfig_kw = {}
def create_axes(base, shape):
'''create the axes of `shape` on this base (fig)'''
# make sure shape is a tuple of atleast 1d, at most 2d
if not isinstance(shape, tuple):
# TODO doesnt work on an int
shape = tuple(shape)
if len(shape) == 1:
shape = (shape, 1)
elif len(shape) > 2:
mssg = f"Invalid `shape` for subplots {shape}, must be 2D"
raise ValueError(mssg)
# split into dict of nrows, ncols
shape = dict(zip(("nrows", "ncols"), shape))
# if either of them is also a tuple, means we want columns or rows
# of varying sizes, switch to using subfigures
# TODO what are the chances stuff like `sharex` works correctly?
if isinstance(shape['nrows'], tuple):
subfigs = base.subfigures(ncols=shape['ncols'], nrows=1,
squeeze=False, **subfig_kw)
for ind, sf in enumerate(subfigs.flatten()):
try:
nr = shape['nrows'][ind]
except IndexError:
if allow_blank:
continue
mssg = (f"Number of row entries {shape['nrows']} must "
f"match number of columns ({shape['ncols']})")
raise ValueError(mssg)
sf.subplots(ncols=1, nrows=nr, **sub_kw)
elif isinstance(shape['ncols'], tuple):
subfigs = base.subfigures(nrows=shape['nrows'], ncols=1,
squeeze=False, **subfig_kw)
for ind, sf in enumerate(subfigs.flatten()):
try:
nc = shape['ncols'][ind]
except IndexError:
if allow_blank:
continue
mssg = (f"Number of col entries {shape['ncols']} must "
f"match number of rows ({shape['nrows']})")
raise ValueError(mssg)
sf.subplots(nrows=1, ncols=nc, **sub_kw)
# otherwise just make a simple subplots and return that
else:
base.subplots(**shape, **sub_kw)
return base, base.axes
# ------------------------------------------------------------------
# Create figure, if necessary
# ------------------------------------------------------------------
if fig is None:
fig = plt.figure(constrained_layout=constrained_layout)
# ------------------------------------------------------------------
# If no shape is provided, just return the figure, probably empty
# ------------------------------------------------------------------
if shape is None:
axarr = []
# ------------------------------------------------------------------
# Otherwise attempt to first grab this figures axes, or create them
# ------------------------------------------------------------------
else:
# this fig has axes, check that they match shape
if axarr := fig.axes:
# TODO this won't actually work, cause fig.axes is just a list
if axarr.shape != shape:
mssg = (f"figure {fig} already contains axes with "
f"mismatched shape ({axarr.shape} != {shape})")
raise ValueError(mssg)
else:
fig, axarr = create_axes(fig, shape)
# ------------------------------------------------------------------
# If desired, default to titling the figure based on it's "name"
# ------------------------------------------------------------------
if hasattr(self, 'name') and use_name:
fig.suptitle(self.name)
# ------------------------------------------------------------------
# Ensure the axes are always returned in an array
# ------------------------------------------------------------------
return fig, np.atleast_1d(axarr)
# -----------------------------------------------------------------------
# Unit support
# -----------------------------------------------------------------------
def _support_units(method):
import functools
@functools.wraps(method)
def _unit_decorator(self, *args, **kwargs):
# convert based on median distance parameter
eqvs = util.angular_width(self.d)
with astroviz.quantity_support(), u.set_enabled_equivalencies(eqvs):
return method(self, *args, **kwargs)
return _unit_decorator
# -----------------------------------------------------------------------
# Plotting functionality
# -----------------------------------------------------------------------
def _get_median(self, percs):
'''from an array of data percentiles, return the median array'''
return percs[percs.shape[0] // 2] if percs.ndim > 1 else percs
def _get_err(self, dataset, key):
'''gather the error variables corresponding to `key` from `dataset`'''
try:
return dataset[f'Δ{key}']
except KeyError:
try:
return (dataset[f'Δ{key},down'], dataset[f'Δ{key},up'])
except KeyError:
return None
def _plot_model(self, ax, data, intervals=None, *,
x_data=None, x_unit='pc', y_unit=None,
CI_kwargs=None, **kwargs):
CI_kwargs = dict() if CI_kwargs is None else CI_kwargs
# ------------------------------------------------------------------
# Evaluate the shape of the data array to determine confidence
# intervals, if applicable
# ------------------------------------------------------------------
if data is None or data.ndim == 0:
return
elif data.ndim == 1:
data = data.reshape((1, data.size))
if not (data.shape[0] % 2):
mssg = 'Invalid `data`, must have odd-numbered zeroth axis shape'
raise ValueError(mssg)
midpoint = data.shape[0] // 2
if intervals is None:
intervals = midpoint
elif intervals > midpoint:
mssg = f'{intervals}σ is outside stored range of {midpoint}σ'
raise ValueError(mssg)
# ------------------------------------------------------------------
# Convert any units desired
# ------------------------------------------------------------------
x_domain = self.r if x_data is None else x_data
if x_unit:
x_domain = x_domain.to(x_unit)
if y_unit:
data = data.to(y_unit)
# ------------------------------------------------------------------
# Plot the median (assumed to be the middle axis of the intervals)
# ------------------------------------------------------------------
median = data[midpoint]
med_plot, = ax.plot(x_domain, median, **kwargs)
# ------------------------------------------------------------------
# Plot confidence intervals successively from the midpoint
# ------------------------------------------------------------------
output = [med_plot]
CI_kwargs.setdefault('color', med_plot.get_color())
alpha = 0.8 / (intervals + 1)
for sigma in range(1, intervals + 1):
CI = ax.fill_between(
x_domain, data[midpoint + sigma], data[midpoint - sigma],
alpha=(1 - alpha), **CI_kwargs
)
output.append(CI)
alpha += alpha
return output
def _plot_data(self, ax, dataset, y_key, *,
x_key='r', x_unit='pc', y_unit=None,
err_transform=None, **kwargs):
# TODO need to handle colours better
defaultcolour = None
# ------------------------------------------------------------------
# Get data and relevant errors for plotting
# ------------------------------------------------------------------
xdata = dataset[x_key]
ydata = dataset[y_key]
xerr = self._get_err(dataset, x_key)
yerr = self._get_err(dataset, y_key)
# ------------------------------------------------------------------
# Convert any units desired
# ------------------------------------------------------------------
if x_unit is not None:
xdata = xdata.to(x_unit)
if y_unit is not None:
ydata = ydata.to(y_unit)
# ------------------------------------------------------------------
# If given, transform errors based on `err_transform` function
# ------------------------------------------------------------------
if err_transform is not None:
yerr = err_transform(yerr)
# ------------------------------------------------------------------
# Setup default plotting details, style, labels
# ------------------------------------------------------------------
kwargs.setdefault('marker', '.')
kwargs.setdefault('linestyle', 'None')
kwargs.setdefault('color', defaultcolour)
# TODO should try to cite, but if that fails just use raw bibcode?
label = dataset.cite()
if 'm' in dataset.mdata:
label += fr' ($m={dataset.mdata["m"]}\ M_\odot$)'
# ------------------------------------------------------------------
# Plot
# ------------------------------------------------------------------
# TODO not sure if I like the mfc=none style,
# mostly due to https://github.com/matplotlib/matplotlib/issues/3400
return ax.errorbar(xdata, ydata, xerr=xerr, yerr=yerr, mfc='none',
label=label, **kwargs)
def _plot_profile(self, ax, ds_pattern, y_key, model_data, *,
residuals=False, err_transform=None,
**kwargs):
'''figure out what needs to be plotted and call model/data plotters
all **kwargs passed to both _plot_model and _plot_data
model_data dimensions *must* be (mass bins, intervals, r axis)
'''
# TODO we might still want to allow for specific model/data kwargs?
ds_pattern = ds_pattern or ''
strict = kwargs.pop('strict', False)
# Restart marker styles each plotting call
markers = iter(self._MARKERS)
# TODO need to figure out how we handle passed kwargs better
default_clr = kwargs.pop('color', None)
# ------------------------------------------------------------------
# Determine the relevant datasets to the given pattern
# ------------------------------------------------------------------
datasets = self.obs.filter_datasets(ds_pattern)
if strict and ds_pattern and not datasets:
mssg = f"Dataset matching '{ds_pattern}' do not exist in {self.obs}"
# raise DataError
raise KeyError(mssg)
# ------------------------------------------------------------------
# Iterate over the datasets, keeping track of all relevant masses
# and calling `_plot_data`
# ------------------------------------------------------------------
masses = {}
for key, dset in datasets.items():
mrk = next(markers)
# get mass bin of this dataset, for later model plotting
if 'm' in dset.mdata:
m = dset.mdata['m'] * u.Msun
mass_bin = np.where(self.mj == m)[0][0]
else:
mass_bin = self.star_bin
if mass_bin in masses:
clr = masses[mass_bin][0][0].get_color()
else:
clr = default_clr
# plot the data
try:
line = self._plot_data(ax, dset, y_key, marker=mrk, color=clr,
err_transform=err_transform, **kwargs)
except KeyError as err:
if strict:
raise err
else:
# warnings.warn(err.args[0])
continue
masses.setdefault(mass_bin, [])
masses[mass_bin].append(line)
# ------------------------------------------------------------------
# Based on the masses of data plotted, plot the corresponding axes of
# the model data, calling `_plot_model`
# ------------------------------------------------------------------
if model_data is not None:
# ensure that the data is (mass bin, intervals, r domain)
if len(model_data.shape) != 3:
raise ValueError("invalid model data shape")
# No data plotted, use the star_bin
if not masses:
if model_data.shape[0] > 1:
masses = {self.star_bin: None}
else:
masses = {0: None}
res_ax = None
for mbin, errbars in masses.items():
ymodel = model_data[mbin, :, :]
# TODO having model/data be same color is kinda hard to read
# this is why I added mfc=none, but I dont like that either
if errbars is not None:
clr = errbars[0][0].get_color()
else:
clr = default_clr
self._plot_model(ax, ymodel, color=clr, **kwargs)
if residuals:
res_ax = self._add_residuals(ax, ymodel, errbars,
res_ax=res_ax, **kwargs)
if self.rlims is not None:
ax.set_xlim(*self.rlims)
# -----------------------------------------------------------------------
# Plot extras
# -----------------------------------------------------------------------
def _add_residuals(self, ax, ymodel, errorbars, *,
xmodel=None, y_unit=None, res_ax=None, **kwargs):
'''
errorbars : a list of outputs from calls to plt.errorbars
'''
from mpl_toolkits.axes_grid1 import make_axes_locatable
if not errorbars:
mssg = "Cannot compute residuals, no observables data provided"
raise ValueError(mssg)
# ------------------------------------------------------------------
# Get model data and spline
# ------------------------------------------------------------------
if xmodel is None:
xmodel = self.r
if y_unit is not None:
ymodel = ymodel.to(y_unit)
ymedian = self._get_median(ymodel)
yspline = util.QuantitySpline(xmodel, ymedian)
# ------------------------------------------------------------------
# Setup axes, adding a new smaller axe for the residual underneath,
# if it hasn't already been created (and passed to `res_ax`)
# ------------------------------------------------------------------
if res_ax is None:
divider = make_axes_locatable(ax)
res_ax = divider.append_axes('bottom', size="15%", pad=0, sharex=ax)
res_ax.grid()
res_ax.set_xscale(ax.get_xscale())
# ------------------------------------------------------------------
# Plot the model line, hopefully centred on zero
# ------------------------------------------------------------------
self._plot_model(res_ax, ymodel - ymedian, color='k')
# ------------------------------------------------------------------
# Get data from the plotted errorbars
# ------------------------------------------------------------------
for errbar in errorbars:
# --------------------------------------------------------------
# Get the actual datapoints, and the hopefully correct units
# --------------------------------------------------------------
xdata, ydata = errbar[0].get_data()
ydata = ydata.to(ymedian.unit)
# --------------------------------------------------------------
# Grab relevant formatting (colours and markers)
# --------------------------------------------------------------
clr = errbar[0].get_color()
mrk = errbar[0].get_marker()
# --------------------------------------------------------------
# Parse the errors from the size of the errorbar lines (messy)
# --------------------------------------------------------------
xerr = yerr = None
if errbar.has_xerr:
xerr_lines = errbar[2][0]
yerr_lines = errbar[2][1] if errbar.has_yerr else None
elif errbar.has_yerr:
xerr_lines, yerr_lines = None, errbar[2][0]
else:
xerr_lines = yerr_lines = None
if xerr_lines:
xerr = np.array([(np.diff(seg, axis=0) / 2)[..., -1]
for seg in xerr_lines.get_segments()]).T[0]
xerr <<= xdata.unit
if yerr_lines:
yerr = np.array([(np.diff(seg, axis=0) / 2)[..., -1]
for seg in yerr_lines.get_segments()]).T[0]
yerr <<= ydata.unit
# --------------------------------------------------------------
# Compute the residuals and plot them
# --------------------------------------------------------------
res = yspline(xdata) - ydata
res_ax.errorbar(xdata, res, xerr=xerr, yerr=yerr,
color=clr, marker=mrk, linestyle='none')
return res_ax
def _add_hyperparam(self, ax, ymodel, xdata, ydata, yerr):
# TODO this is still a complete mess
yspline = util.QuantitySpline(self.r, ymodel)
if hasattr(ax, 'aeff_text'):
aeff_str = ax.aeff_text.get_text()
aeff = float(aeff_str[aeff_str.rfind('$') + 1:])
else:
# TODO figure out best place to place this at
ax.aeff_text = ax.text(0.1, 0.3, '')
aeff = 0.
aeff += util.hyperparam_effective(ydata, yspline(xdata), yerr)
ax.aeff_text.set_text(fr'$\alpha_{{eff}}=${aeff:.4e}')
# -----------------------------------------------------------------------
# Observables plotting
# -----------------------------------------------------------------------
@_support_units
def plot_LOS(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='km/s'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title('Line-of-Sight Velocity Dispersion')
ax.set_xscale("log")
if show_obs:
pattern, var = '*velocity_dispersion*', 'σ'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
self._plot_profile(ax, pattern, var, self.LOS,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_pm_tot(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Total Proper Motion")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_tot'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
self._plot_profile(ax, pattern, var, self.pm_tot,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_pm_ratio(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Proper Motion Anisotropy")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_ratio'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
self._plot_profile(ax, pattern, var, self.pm_ratio,
strict=strict, residuals=residuals,
x_unit=x_unit)
ax.legend()
return fig
@_support_units
def plot_pm_T(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Tangential Proper Motion")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_T'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
# pm_T = self.pm_T.to('mas/yr')
self._plot_profile(ax, pattern, var, self.pm_T,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_pm_R(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Radial Proper Motion")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_R'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
# pm_R = self.pm_R.to('mas/yr')
self._plot_profile(ax, pattern, var, self.pm_R,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_number_density(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc'):
def quad_nuisance(err):
return np.sqrt(err**2 + (self.s2 << err.unit**2))
fig, ax = self._setup_artist(fig, ax)
ax.set_title('Number Density')
ax.loglog()
if show_obs:
pattern, var = '*number_density*', 'Σ'
strict = show_obs == 'strict'
kwargs = {'err_transform': quad_nuisance}
else:
pattern = var = None
strict = False
kwargs = {}
self._plot_profile(ax, pattern, var, self.numdens,
strict=strict, residuals=residuals,
x_unit=x_unit, **kwargs)
# bit arbitrary, but probably fine for the most part
ax.set_ylim(bottom=1e-4)
ax.legend()
return fig
@_support_units
def plot_pulsar(self, fig=None, ax=None, show_obs=True):
# TODO this is out of date with the new pulsar probability code
# TODO I dont even think this is what we should use anymore, but the
# new convolved distributions peak
fig, ax = self._setup_artist(fig, ax)
ax.set_title('Pulsar LOS Acceleration')
ax.set_xlabel('R')
ax.set_ylabel(r'$a_{los}$')
maz = u.Quantity(np.empty(self.model.nstep - 1), '1/s')
for i in range(self.model.nstep - 1):
a_domain, Paz = pulsars.cluster_component(self.model, self.model.r[i], -1)
maz[i] = a_domain[Paz.argmax()] << maz.unit
maz = (self.obs['pulsar/P'] * maz).decompose()
if show_obs:
try:
obs_pulsar = self.obs['pulsar']
ax.errorbar(obs_pulsar['r'],
self.obs['pulsar/Pdot'],
yerr=self.obs['pulsar/ΔPdot'],
fmt='k.')
except KeyError as err:
if show_obs != 'attempt':
raise err
model_r = self.model.r.to(u.arcmin, util.angular_width(self.model.d))
upper_az, = ax.plot(model_r[:-1], maz)
ax.plot(model_r[:-1], -maz, c=upper_az.get_color())
return fig
@_support_units
def plot_pulsar_spin_dist(self, fig=None, ax=None, pulsar_ind=0,
show_obs=True, show_conv=False):
import scipy.interpolate as interp
fig, ax = self._setup_artist(fig, ax)
# pulsars = self.obs['pulsar']
puls_obs = self.obs['pulsar/spin']
id_ = puls_obs['id'][pulsar_ind].value.decode()
ax.set_title(f'Pulsar "{id_}" Period Derivative Likelihood')
ax.set_ylabel('Probability')
ax.set_xlabel(r'$\dot{P}/P$ $\left[s^{-1}\right]$')
mass_bin = -1
kde = pulsars.field_Pdot_KDE()
Pdot_min, Pdot_max = kde.dataset[1].min(), kde.dataset[1].max()
R = puls_obs['r'][pulsar_ind].to(u.pc)
P = puls_obs['P'][pulsar_ind].to('s')
Pdot_meas = puls_obs['Pdot'][pulsar_ind]
ΔPdot_meas = np.abs(puls_obs['ΔPdot'][pulsar_ind])
PdotP_domain, PdotP_c_prob = pulsars.cluster_component(self.model,
R, mass_bin)
Pdot_domain = (P * PdotP_domain).decompose()
# linear to avoid effects around asymptote
Pdot_c_spl = interp.UnivariateSpline(
Pdot_domain, PdotP_c_prob, k=1, s=0, ext=1
)
err = util.gaussian(x=Pdot_domain, sigma=ΔPdot_meas, mu=0)
err_spl = interp.UnivariateSpline(Pdot_domain, err, k=3, s=0, ext=1)
lg_P = np.log10(P / P.unit)
P_grid, Pdot_int_domain = np.mgrid[lg_P:lg_P:1j, Pdot_min:Pdot_max:200j]
P_grid, Pdot_int_domain = P_grid.ravel(), Pdot_int_domain.ravel()
Pdot_int_prob = kde(np.vstack([P_grid, Pdot_int_domain]))
Pdot_int_spl = interp.UnivariateSpline(
Pdot_int_domain, Pdot_int_prob, k=3, s=0, ext=1
)
Pdot_int_prob = util.RV_transform(
domain=10**Pdot_int_domain, f_X=Pdot_int_spl,
h=np.log10, h_prime=lambda y: (1 / (np.log(10) * y))
)
Pdot_int_spl = interp.UnivariateSpline(
10**Pdot_int_domain, Pdot_int_prob, k=3, s=0, ext=1
)
lin_domain = np.linspace(0., 1e-18, 5_000 // 2)
lin_domain = np.concatenate((np.flip(-lin_domain[1:]), lin_domain))
conv1 = np.convolve(err_spl(lin_domain), Pdot_c_spl(lin_domain), 'same')
conv2 = np.convolve(conv1, Pdot_int_spl(lin_domain), 'same')
# Normalize
conv2 /= interp.UnivariateSpline(
lin_domain, conv2, k=3, s=0, ext=1
).integral(-np.inf, np.inf)
cluster_μ = self.obs.mdata['μ'] << u.Unit("mas/yr")
PdotP_pm = pulsars.shklovskii_component(cluster_μ, self.model.d)
cluster_coords = (self.obs.mdata['b'], self.obs.mdata['l']) * u.deg
PdotP_gal = pulsars.galactic_component(*cluster_coords, D=self.model.d)
x_total = (lin_domain / P) + PdotP_pm + PdotP_gal
ax.plot(x_total, conv2)
if show_conv:
# Will really mess the scaling up, usually
ax.plot(x_total, Pdot_c_spl(lin_domain))
ax.plot(x_total, conv1)
if show_obs:
ax.axvline((Pdot_meas / P).decompose(), c='r', ls=':')
prob_dist = interp.interp1d(
(lin_domain / P) + PdotP_pm + PdotP_gal, conv2,
assume_sorted=True, bounds_error=False, fill_value=0.0
)
print('prob=', prob_dist((Pdot_meas / P).decompose()))
return fig
@_support_units
def plot_pulsar_orbital_dist(self, fig=None, ax=None, pulsar_ind=0,
show_obs=True, show_conv=False):
import scipy.interpolate as interp
fig, ax = self._setup_artist(fig, ax)
# pulsars = self.obs['pulsar']
puls_obs = self.obs['pulsar/orbital']
id_ = puls_obs['id'][pulsar_ind].value.decode()
ax.set_title(f'Pulsar "{id_}" Period Derivative Likelihood')
ax.set_ylabel('Probability')
ax.set_xlabel(r'$\dot{P}/P$ $\left[s^{-1}\right]$')
mass_bin = -1
R = puls_obs['r'][pulsar_ind].to(u.pc)
P = puls_obs['Pb'][pulsar_ind].to('s')
Pdot_meas = puls_obs['Pbdot'][pulsar_ind]
ΔPdot_meas = np.abs(puls_obs['ΔPbdot'][pulsar_ind])
PdotP_domain, PdotP_c_prob = pulsars.cluster_component(self.model,
R, mass_bin)
Pdot_domain = (P * PdotP_domain).decompose()
Pdot_c_spl = interp.UnivariateSpline(
Pdot_domain, PdotP_c_prob, k=1, s=0, ext=1
)
err = util.gaussian(x=Pdot_domain, sigma=ΔPdot_meas, mu=0)
err_spl = interp.UnivariateSpline(Pdot_domain, err, k=3, s=0, ext=1)
lin_domain = np.linspace(0., 1e-11, 5_000 // 2)
lin_domain = np.concatenate((np.flip(-lin_domain[1:]), lin_domain))
conv = np.convolve(err_spl(lin_domain), Pdot_c_spl(lin_domain), 'same')
# conv = np.convolve(err, PdotP_c_prob, 'same')
# Normalize
conv /= interp.UnivariateSpline(
lin_domain, conv, k=3, s=0, ext=1
).integral(-np.inf, np.inf)
cluster_μ = self.obs.mdata['μ'] << u.Unit("mas/yr")
PdotP_pm = pulsars.shklovskii_component(cluster_μ, self.model.d)
cluster_coords = (self.obs.mdata['b'], self.obs.mdata['l']) * u.deg
PdotP_gal = pulsars.galactic_component(*cluster_coords, D=self.model.d)
x_total = (lin_domain / P) + PdotP_pm + PdotP_gal
ax.plot(x_total, conv)
if show_conv:
# Will really mess the scaling up, usually
ax.plot(x_total, PdotP_c_prob)
ax.plot(x_total, conv)
if show_obs:
ax.axvline((Pdot_meas / P).decompose(), c='r', ls=':')
prob_dist = interp.interp1d(
x_total, conv,
assume_sorted=True, bounds_error=False, fill_value=0.0
)
print('prob=', prob_dist((Pdot_meas / P).decompose()))
return fig
@_support_units
def plot_all(self, fig=None, show_obs='attempt'):
'''Plots all the primary profiles (numdens, LOS, PM)
but *not* the mass function, pulsars, or any secondary profiles
(cum-mass, remnants, etc)
'''
fig, axes = self._setup_multi_artist(fig, (3, 2))
axes = axes.reshape((3, 2))
fig.suptitle(str(self.obs))
kw = {}
self.plot_number_density(fig=fig, ax=axes[0, 0], **kw)
self.plot_LOS(fig=fig, ax=axes[1, 0], **kw)
self.plot_pm_ratio(fig=fig, ax=axes[2, 0], **kw)
self.plot_pm_tot(fig=fig, ax=axes[0, 1], **kw)
self.plot_pm_T(fig=fig, ax=axes[1, 1], **kw)
self.plot_pm_R(fig=fig, ax=axes[2, 1], **kw)
for ax in axes.flatten():
ax.set_xlabel('')
return fig
# ----------------------------------------------------------------------
# Mass Function Plotting
# ----------------------------------------------------------------------
@_support_units
def plot_mass_func(self, fig=None, show_obs=True, show_fields=False, *,
colours=None, PI_legend=False, logscaled=False,
field_kw=None):
# ------------------------------------------------------------------
# Setup axes, splitting into two columns if necessary and adding the
# extra ax for the field plot if desired
# ------------------------------------------------------------------
N_rbins = sum([len(d) for d in self.mass_func.values()])
shape = ((int(np.ceil(N_rbins / 2)), int(np.floor(N_rbins / 2))), 2)
# If adding the fields, include an extra column on the left for it
if show_fields:
shape = ((1, *shape[0]), shape[1] + 1)
fig, axes = self._setup_multi_artist(fig, shape, sharex=True)
axes = axes.T.flatten()
ax_ind = 0
# ------------------------------------------------------------------
# If desired, use the `plot_MF_fields` method to show the fields
# ------------------------------------------------------------------
if show_fields:
ax = axes[ax_ind]
if field_kw is None:
field_kw = {}
field_kw.setdefault('radii', [])
# TODO need to figure out a good size and how to do it, for this ax
self.plot_MF_fields(fig, ax, **field_kw)
ax_ind += 1
# ------------------------------------------------------------------
# Iterate over each PI, gathering data to plot
# ------------------------------------------------------------------
for PI in sorted(self.mass_func,
key=lambda k: self.mass_func[k][0]['r1']):
bins = self.mass_func[PI]
# Get data for this PI
mf = self.obs[PI]
mbin_mean = (mf['m1'] + mf['m2']) / 2.
mbin_width = mf['m2'] - mf['m1']
N = mf['N'] / mbin_width
ΔN = mf['ΔN'] / mbin_width
# --------------------------------------------------------------
# Iterate over radial bin dicts for this PI
# --------------------------------------------------------------
for rind, rbin in enumerate(bins):
ax = axes[ax_ind]
clr = rbin.get('colour', None)
# ----------------------------------------------------------
# Plot observations
# ----------------------------------------------------------
if show_obs:
r_mask = ((mf['r1'] == rbin['r1'])
& (mf['r2'] == rbin['r2']))
N_data = N[r_mask].value
err_data = ΔN[r_mask].value
err = self.F * err_data
pnts = ax.errorbar(mbin_mean[r_mask], N_data, yerr=err,
fmt='o', color=clr)
clr = pnts[0].get_color()
# ----------------------------------------------------------
# Plot model. Doesn't utilize the `_plot_profile` method, as
# this is *not* a profile, but does use similar, but simpler,
# logic
# ----------------------------------------------------------
dNdm = rbin['dNdm']
midpoint = dNdm.shape[0] // 2
m_domain = self.mj[:dNdm.shape[-1]]
median = dNdm[midpoint]
med_plot, = ax.plot(m_domain, median, '--', c=clr)
alpha = 0.8 / (midpoint + 1)
for sigma in range(1, midpoint + 1):
ax.fill_between(
m_domain,
dNdm[midpoint + sigma],
dNdm[midpoint - sigma],
alpha=1 - alpha, color=clr
)
alpha += alpha
if logscaled:
ax.set_xscale('log')
ax.set_xlabel(None)
# ----------------------------------------------------------
# "Label" each bin with it's radial bounds.
# Uses fake text to allow for using loc='best' from `legend`.
# Really this should be a part of plt (see matplotlib#17946)
# ----------------------------------------------------------
r1 = rbin['r1'].to_value('arcmin')
r2 = rbin['r2'].to_value('arcmin')
fake = plt.Line2D([], [], label=f"r = {r1:.2f}'-{r2:.2f}'")
handles = [fake]
leg_kw = {'handlelength': 0, 'handletextpad': 0}
# If this is the first bin, also add a PI tag
if PI_legend and not rind and not show_fields:
pi_fake = plt.Line2D([], [], label=PI)
handles.append(pi_fake)
leg_kw['labelcolor'] = ['k', clr]
ax.legend(handles=handles, **leg_kw)
ax_ind += 1
# ------------------------------------------------------------------
# Put labels on subfigs
# ------------------------------------------------------------------
for sf in fig.subfigs[show_fields:]:
sf.supxlabel(r'Mass [$M_\odot$]')
fig.subfigs[show_fields].supylabel('dN/dm')
return fig
@_support_units
def plot_MF_fields(self, fig=None, ax=None, *, radii=("rh",),
cmap=None, grid=True):
'''plot all mass function fields in this observation
'''
import shapely.geometry as geom
fig, ax = self._setup_artist(fig, ax)
# Centre dot
ax.plot(0, 0, 'kx')
# ------------------------------------------------------------------
# Iterate over each PI and it's radial bins
# ------------------------------------------------------------------
for PI, bins in self.mass_func.items():
for rbin in bins:
# ----------------------------------------------------------
# Plot the field using this `Field` slice's own plotting method
# ----------------------------------------------------------
clr = rbin.get("colour", None)
rbin['field'].plot(ax, fc=clr, alpha=0.7, ec='k', label=PI)
# make this label private so it's only added once to legend
PI = f'_{PI}'
# ------------------------------------------------------------------
# If desired, add a "pseudo" grid in the polar projection, at 2
# arcmin intervals, up to the rt
# ------------------------------------------------------------------
# Ensure the gridlines don't affect the axes scaling
ax.autoscale(False)
if grid:
rt = self.rt if hasattr(self, 'rt') else (20 << u.arcmin)
ticks = np.arange(2, rt.to_value('arcmin'), 2)
# make sure this grid matches normal grids
grid_kw = {
'color': plt.rcParams.get('grid.color'),
'linestyle': plt.rcParams.get('grid.linestyle'),
'linewidth': plt.rcParams.get('grid.linewidth'),
'alpha': plt.rcParams.get('grid.alpha'),
'zorder': 0.5
}
for gr in ticks:
circle = np.array(geom.Point(0, 0).buffer(gr).exterior).T
gr_line, = ax.plot(*circle, **grid_kw)
ax.annotate(f'{gr:.0f}"', xy=(circle[0].max(), 0),
color=grid_kw['color'])
# ------------------------------------------------------------------
# Try to plot the various radii quantities from this model, if desired
# ------------------------------------------------------------------
# TODO for CI this could be a CI of rh, ra, rt actually (60)
for r_type in radii:
# This is to explicitly avoid very ugly exceptions from geom
if r_type not in {'rh', 'ra', 'rt'}:
mssg = f'radii must be one of {{rh, ra, rt}}, not `{r_type}`'
raise TypeError(mssg)
radius = getattr(self, r_type).to_value('arcmin')
circle = np.array(geom.Point(0, 0).buffer(radius).exterior).T
ax.plot(*circle, ls='--')
ax.text(0, circle[1].max(), r_type)
# ------------------------------------------------------------------
# Add plot labels and legends
# ------------------------------------------------------------------
ax.set_xlabel('RA [arcmin]')
ax.set_ylabel('DEC [arcmin]')
# TODO figure out a better way of handling this always using best? (75)
ax.legend(loc='upper left' if grid else 'best')
return fig
# -----------------------------------------------------------------------
# Model plotting
# -----------------------------------------------------------------------
@_support_units
def plot_density(self, fig=None, ax=None, kind='all', *,
x_unit='pc'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Surface Mass Density')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.rho_tot,
x_unit=x_unit, **kw)
# Total Remnant density
if 'rem' in kind:
kw = {"label": "Remnants", "color": "tab:purple"}
self._plot_profile(ax, None, None, self.rho_rem,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.rho_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.rho_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.rho_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.rho_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel(rf'Surface Density $[M_\odot / pc^3]$')
# ax.set_xlabel('arcsec')
# ax.legend()
fig.legend(loc='upper center', ncol=6,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_surface_density(self, fig=None, ax=None, kind='all', *,
x_unit='pc'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Surface Mass Density')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.Sigma_tot,
x_unit=x_unit, **kw)
# Total Remnant density
if 'rem' in kind:
kw = {"label": "Remnants", "color": "tab:purple"}
self._plot_profile(ax, None, None, self.Sigma_rem,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.Sigma_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.Sigma_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.Sigma_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.Sigma_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel(rf'Surface Density $[M_\odot / pc^2]$')
# ax.set_xlabel('arcsec')
# ax.legend()
fig.legend(loc='upper center', ncol=6,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_cumulative_mass(self, fig=None, ax=None, kind='all', *,
x_unit='pc'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Cumulative Mass')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.cum_M_tot,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.cum_M_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.cum_M_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.cum_M_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.cum_M_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
# ax.set_ylabel(rf'$M_{{enc}} ({self.cum_M_tot.unit})$')
ax.set_ylabel(rf'$M_{{enc}}$ $[M_\odot]$')
# ax.set_xlabel('arcsec')
# ax.legend()
fig.legend(loc='upper center', ncol=5,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_remnant_fraction(self, fig=None, ax=None, *, x_unit='pc'):
'''Fraction of mass in remnants vs MS stars, like in baumgardt'''
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Remnant Fraction")
ax.set_xscale("log")
self._plot_profile(ax, None, None, self.frac_M_MS,
x_unit=x_unit, label="Main-sequence stars")
self._plot_profile(ax, None, None, self.frac_M_rem,
x_unit=x_unit, label="Remnants")
ax.set_ylabel(r"Mass fraction $M_{MS}/M_{tot}$, $M_{remn.}/M_{tot}$")
ax.set_ylim(0.0, 1.0)
ax.legend()
return fig
# --------------------------------------------------------------------------
# Visualizers
# --------------------------------------------------------------------------
class ModelVisualizer(_ClusterVisualizer):
'''
class for making, showing, saving all the plots related to a single model
'''
@classmethod
def from_chain(cls, chain, observations, method='median'):
'''
create a Visualizer instance based on a chain, y taking the median
of the chain parameters
'''
reduc_methods = {'median': np.median, 'mean': np.mean}
# if 3d (Niters, Nwalkers, Nparams)
# if 2d (Nwalkers, Nparams)
# if 1d (Nparams)
chain = chain.reshape((-1, chain.shape[-1]))
theta = reduc_methods[method](chain, axis=0)
return cls(Model(theta, observations), observations)
@classmethod
def from_theta(cls, theta, observations):
'''
create a Visualizer instance based on a theta, see `Model` for allowed
theta types
'''
return cls(Model(theta, observations), observations)
def __init__(self, model, observations=None):
self.model = model
self.obs = observations if observations else model.observations
self.rh = model.rh
self.ra = model.ra
self.rt = model.rt
self.F = model.F
self.s2 = model.s2
self.d = model.d
self.r = model.r
self.rlims = (9e-3, self.r.max() + (5 << self.r.unit))
self._2πr = 2 * np.pi * model.r
self.star_bin = model.nms - 1
self.mj = model.mj
self.LOS = np.sqrt(self.model.v2pj)[:, np.newaxis, :]
self.pm_T = np.sqrt(model.v2Tj)[:, np.newaxis, :]
self.pm_R = np.sqrt(model.v2Rj)[:, np.newaxis, :]
self.pm_tot = np.sqrt(0.5 * (self.pm_T**2 + self.pm_R**2))
self.pm_ratio = self.pm_T / self.pm_R
self._init_numdens(model, observations)
self._init_massfunc(model, observations)
self._init_surfdens(model, observations)
self._init_dens(model, observations)
self._init_mass_frac(model, observations)
self._init_cum_mass(model, observations)
# TODO alot of these init functions could be more homogenous
@_ClusterVisualizer._support_units
def _init_numdens(self, model, observations):
# TODO make this more robust and cleaner
model_nd = model.Sigmaj / model.mj[:, np.newaxis]
nd = np.empty(model_nd.shape)[:, np.newaxis, :] << model_nd.unit
# If have nd obs, apply scaling factor K
for mbin in range(model_nd.shape[0]):
try:
obs_nd = observations['number_density']
obs_r = obs_nd['r'].to(model.r.unit)
nd_interp = util.QuantitySpline(model.r, model_nd[mbin, :])
K = (np.nansum(obs_nd['Σ'] * nd_interp(obs_r) / obs_nd['Σ']**2)
/ np.nansum(nd_interp(obs_r)**2 / obs_nd['Σ']**2))
except KeyError:
K = 1
nd[mbin, 0, :] = K * model_nd[mbin, :]
self.numdens = nd
@_ClusterVisualizer._support_units
def _init_massfunc(self, model, observations, *, cmap=None):
'''
sets self.mass_func as a dict of PI's, where each PI has a list of
subdicts. Each subdict represents a single radial slice (within this PI)
and contains the radii, the mass func values, and the field slice
'''
cmap = cmap or plt.cm.rainbow
self.mass_func = {}
cen = (observations.mdata['RA'], observations.mdata['DEC'])
PI_list = observations.filter_datasets('*mass_function*')
densityj = [util.QuantitySpline(model.r, model.Sigmaj[j])
for j in range(model.nms)]
for i, (key, mf) in enumerate(PI_list.items()):
self.mass_func[key] = []
# TODO same colour for each PI or different for each slice?
clr = cmap(i / len(PI_list))
field = mass.Field.from_dataset(mf, cen=cen)
rbins = np.unique(np.c_[mf['r1'], mf['r2']], axis=0)
rbins.sort(axis=0)
for r_in, r_out in rbins:
this_slc = {'r1': r_in, 'r2': r_out}
field_slice = field.slice_radially(r_in, r_out)
this_slc['field'] = field_slice
this_slc['colour'] = clr
this_slc['dNdm'] = np.empty((1, model.nms))
sample_radii = field_slice.MC_sample(300).to(u.pc)
for j in range(model.nms):
Nj = field_slice.MC_integrate(densityj[j], sample_radii)
widthj = (model.mj[j] * model.mes_widths[j])
this_slc['dNdm'][0, j] = (Nj / widthj).value
self.mass_func[key].append(this_slc)
@_ClusterVisualizer._support_units
def _init_dens(self, model, observations):
shp = (np.newaxis, np.newaxis, slice(None))
self.rho_tot = np.sum(model.rhoj, axis=0)[shp]
self.rho_MS = np.sum(model.rhoj[model._star_bins], axis=0)[shp]
self.rho_rem = np.sum(model.rhoj[model._remnant_bins], axis=0)[shp]
self.rho_BH = np.sum(model.BH_rhoj, axis=0)[shp]
self.rho_WD = np.sum(model.WD_rhoj, axis=0)[shp]
self.rho_NS = np.sum(model.NS_rhoj, axis=0)[shp]
@_ClusterVisualizer._support_units
def _init_surfdens(self, model, observations):
shp = (np.newaxis, np.newaxis, slice(None))
self.Sigma_tot = np.sum(model.Sigmaj, axis=0)[shp]
self.Sigma_MS = np.sum(model.Sigmaj[model._star_bins], axis=0)[shp]
self.Sigma_rem = np.sum(model.Sigmaj[model._remnant_bins], axis=0)[shp]
self.Sigma_BH = np.sum(model.BH_Sigmaj, axis=0)[shp]
self.Sigma_WD = np.sum(model.WD_Sigmaj, axis=0)[shp]
self.Sigma_NS = np.sum(model.NS_Sigmaj, axis=0)[shp]
@_ClusterVisualizer._support_units
def _init_mass_frac(self, model, observations):
int_MS = util.QuantitySpline(self.r, self._2πr * self.Sigma_MS)
int_rem = util.QuantitySpline(self.r, self._2πr * self.Sigma_rem)
int_tot = util.QuantitySpline(self.r, self._2πr * self.Sigma_tot)
mass_MS = np.empty((1, 1, self.r.size))
mass_rem = np.empty((1, 1, self.r.size))
mass_tot = np.empty((1, 1, self.r.size))
# TODO the rbins at the end always mess up fractional stuff, drop to 0
mass_MS[0, 0, 0] = mass_rem[0, 0, 0] = mass_tot[0, 0, 0] = np.nan
for i in range(1, self.r.size - 2):
mass_MS[0, 0, i] = int_MS.integral(self.r[i], self.r[i + 1]).value
mass_rem[0, 0, i] = int_rem.integral(self.r[i], self.r[i + 1]).value
mass_tot[0, 0, i] = int_tot.integral(self.r[i], self.r[i + 1]).value
self.frac_M_MS = mass_MS / mass_tot
self.frac_M_rem = mass_rem / mass_tot
@_ClusterVisualizer._support_units
def _init_cum_mass(self, model, observations):
int_tot = util.QuantitySpline(self.r, self._2πr * self.Sigma_tot)
int_MS = util.QuantitySpline(self.r, self._2πr * self.Sigma_MS)
int_BH = util.QuantitySpline(self.r, self._2πr * self.Sigma_BH)
int_WD = util.QuantitySpline(self.r, self._2πr * self.Sigma_WD)
int_NS = util.QuantitySpline(self.r, self._2πr * self.Sigma_NS)
cum_tot = np.empty((1, 1, self.r.size)) << u.Msun
cum_MS = np.empty((1, 1, self.r.size)) << u.Msun
cum_BH = np.empty((1, 1, self.r.size)) << u.Msun
cum_WD = np.empty((1, 1, self.r.size)) << u.Msun
cum_NS = np.empty((1, 1, self.r.size)) << u.Msun
for i in range(0, self.r.size):
cum_tot[0, 0, i] = int_tot.integral(model.r[0], model.r[i])
cum_MS[0, 0, i] = int_MS.integral(model.r[0], model.r[i])
cum_BH[0, 0, i] = int_BH.integral(model.r[0], model.r[i])
cum_WD[0, 0, i] = int_WD.integral(model.r[0], model.r[i])
cum_NS[0, 0, i] = int_NS.integral(model.r[0], model.r[i])
self.cum_M_tot = cum_tot
self.cum_M_MS = cum_MS
self.cum_M_WD = cum_WD
self.cum_M_NS = cum_NS
self.cum_M_BH = cum_BH
class CIModelVisualizer(_ClusterVisualizer):
'''
class for making, showing, saving all the plots related to a bunch of models
in the form of confidence intervals
'''
@_ClusterVisualizer._support_units
def plot_BH_mass(self, fig=None, ax=None, bins='auto', color='b'):
fig, ax = self._setup_artist(fig, ax)
color = mpl_clr.to_rgb(color)
facecolor = color + (0.33, )
ax.hist(self.BH_mass, histtype='stepfilled',
bins=bins, ec=color, fc=facecolor, lw=2)
return fig
@_ClusterVisualizer._support_units
def plot_BH_num(self, fig=None, ax=None, bins='auto', color='b'):
fig, ax = self._setup_artist(fig, ax)
color = mpl_clr.to_rgb(color)
facecolor = color + (0.33, )
ax.hist(self.BH_num, histtype='stepfilled',
bins=bins, ec=color, fc=facecolor, lw=2)
return fig
@classmethod
def from_chain(cls, chain, observations, N=100, *, verbose=True, pool=None):
import functools
viz = cls()
viz.N = N
viz.obs = observations
# ------------------------------------------------------------------
# Get info about the chain and set of models
# ------------------------------------------------------------------
# Flatten walkers, if not already
chain = chain.reshape((-1, chain.shape[-1]))[-N:]
median_chain = np.median(chain, axis=0)
# TODO get these indices more dynamically
viz.F = median_chain[7]
viz.s2 = median_chain[6]
viz.d = median_chain[12] << u.kpc
# Setup the radial domain to interpolate everything onto
# We estimate the maximum radius needed will be given by the model with
# the largest value of the truncation parameter "g". This should be a
# valid enough assumption for our needs. While we have it, we'll also
# use this model to grab the other values we need, which shouldn't
# change much between models, so using this extreme model is okay.
# warning: in very large N samples, this g might be huge, and lead to a
# very large rt. I'm not really sure yet how that might affect the CIs
# or plots
huge_model = Model(chain[np.argmax(chain[:, 4])], viz.obs)
viz.rt = huge_model.rt
viz.r = np.r_[0, np.geomspace(1e-5, viz.rt.value, num=99)] << u.pc
viz.rlims = (9e-3, viz.r.max() + (5 << viz.r.unit))
# Assume that this example model has same nms, mj[:nms] as all models
# This approximation isn't exactly correct, but close enough for plots
# viz.star_bin = huge_model.nms - 1
viz.star_bin = 0
mj_MS = huge_model.mj[:huge_model.nms]
mj_tracer = huge_model.mj[huge_model._tracer_bins]
viz.mj = np.r_[mj_MS, mj_tracer]
# ------------------------------------------------------------------
# Setup the final full parameters arrays with dims of
# [mass bins, intervals (from percentile of models), radial bins] for
# all "profile" datasets
# ------------------------------------------------------------------
# velocities
vel_unit = np.sqrt(huge_model.v2Tj).unit
Nm = 1 + len(mj_tracer)
vpj = np.empty((Nm, N, viz.r.size)) << vel_unit
vTj, vRj, vtotj = vpj.copy(), vpj.copy(), vpj.copy()
vaj = np.empty((Nm, N, viz.r.size)) << u.dimensionless_unscaled
# mass density
rho_unit = huge_model.rhoj.unit
rho_tot = np.empty((1, N, viz.r.size)) << rho_unit
rho_MS, rho_BH = rho_tot.copy(), rho_tot.copy()
rho_WD, rho_NS = rho_tot.copy(), rho_tot.copy()
# surface density
Sigma_unit = huge_model.Sigmaj.unit
Sigma_tot = np.empty((1, N, viz.r.size)) << Sigma_unit
Sigma_MS, Sigma_BH = Sigma_tot.copy(), Sigma_tot.copy()
Sigma_WD, Sigma_NS = Sigma_tot.copy(), Sigma_tot.copy()
# Cumulative mass
mass_unit = huge_model.M.unit
cum_M_tot = np.empty((1, N, viz.r.size)) << mass_unit
cum_M_MS, cum_M_BH = cum_M_tot.copy(), cum_M_tot.copy()
cum_M_WD, cum_M_NS = cum_M_tot.copy(), cum_M_tot.copy()
# Mass Fraction
frac_M_MS = np.empty((1, N, viz.r.size)) << u.dimensionless_unscaled
frac_M_rem = frac_M_MS.copy()
# number density
numdens = np.empty((1, N, viz.r.size)) << u.arcmin**-2
# mass function
massfunc = viz._prep_massfunc(viz.obs)
# massfunc = np.empty((N, N_rbins, huge_model.nms))
for rbins in massfunc.values():
for rslice in rbins:
rslice['dNdm'] = np.empty((N, huge_model.nms))
# BH mass
BH_mass = np.empty(N) << u.Msun
BH_num = np.empty(N) << u.dimensionless_unscaled
# ------------------------------------------------------------------
# Setup iteration and pooling
# ------------------------------------------------------------------
# TODO currently does nothing
# if verbose:
# import tqdm
# chain_loader = tqdm.tqdm(chain)
# else:
# chain_loader = chain
# TODO assuming that chain always converges, might err if not the case
get_model = functools.partial(Model, observations=viz.obs)
try:
_map = map if pool is None else pool.imap_unordered
except AttributeError:
mssg = ("Invalid pool, currently only support pools with an "
"`imap_unordered` method")
raise ValueError(mssg)
# ------------------------------------------------------------------
# iterate over all models in the sample and compute/store their
# relevant parameters
# ------------------------------------------------------------------
for model_ind, model in enumerate(_map(get_model, chain)):
equivs = util.angular_width(model.d)
# Velocities
# convoluted way of going from a slice to a list of indices
tracers = list(range(len(model.mj))[model._tracer_bins])
for i, mass_bin in enumerate([model.nms - 1] + tracers):
slc = (i, model_ind, slice(None))
vTj[slc], vRj[slc], vtotj[slc], \
vaj[slc], vpj[slc] = viz._init_velocities(model, mass_bin)
slc = (0, model_ind, slice(None))
# Mass Densities
rho_MS[slc], rho_tot[slc], rho_BH[slc], \
rho_WD[slc], rho_NS[slc] = viz._init_dens(model)
# Surface Densities
Sigma_MS[slc], Sigma_tot[slc], Sigma_BH[slc], \
Sigma_WD[slc], Sigma_NS[slc] = viz._init_surfdens(model)
# Cumulative Mass distribution
cum_M_MS[slc], cum_M_tot[slc], cum_M_BH[slc], \
cum_M_WD[slc], cum_M_NS[slc] = viz._init_cum_mass(model)
# Number Densities
numdens[slc] = viz._init_numdens(model, equivs=equivs)
# Mass Functions
for rbins in massfunc.values():
for rslice in rbins:
mf = rslice['dNdm']
mf[model_ind, ...] = viz._init_dNdm(model, rslice, equivs)
# Mass Fractions
frac_M_MS[slc], frac_M_rem[slc] = viz._init_mass_frac(model)
# Black holes
BH_mass[model_ind] = np.sum(model.BH_Mj)
BH_num[model_ind] = np.sum(model.BH_Nj)
# ------------------------------------------------------------------
# compute and store the percentiles and medians
# ------------------------------------------------------------------
# TODO get sigmas dynamically ased on an arg
q = [97.72, 84.13, 50., 15.87, 2.28]
axes = (1, 0, 2) # `np.percentile` messes up the dimensions
viz.pm_T = np.transpose(np.percentile(vTj, q, axis=1), axes)
viz.pm_R = np.transpose(np.percentile(vRj, q, axis=1), axes)
viz.pm_tot = np.transpose(np.percentile(vtotj, q, axis=1), axes)
viz.pm_ratio = np.transpose(np.nanpercentile(vaj, q, axis=1), axes)
viz.LOS = np.transpose(np.percentile(vpj, q, axis=1), axes)
viz.rho_MS = np.transpose(np.percentile(rho_MS, q, axis=1), axes)
viz.rho_tot = np.transpose(np.percentile(rho_tot, q, axis=1), axes)
viz.rho_BH = np.transpose(np.percentile(rho_BH, q, axis=1), axes)
viz.rho_WD = np.transpose(np.percentile(rho_WD, q, axis=1), axes)
viz.rho_NS = np.transpose(np.percentile(rho_NS, q, axis=1), axes)
viz.Sigma_MS = np.transpose(np.percentile(Sigma_MS, q, axis=1), axes)
viz.Sigma_tot = np.transpose(np.percentile(Sigma_tot, q, axis=1), axes)
viz.Sigma_BH = np.transpose(np.percentile(Sigma_BH, q, axis=1), axes)
viz.Sigma_WD = np.transpose(np.percentile(Sigma_WD, q, axis=1), axes)
viz.Sigma_NS = np.transpose(np.percentile(Sigma_NS, q, axis=1), axes)
viz.cum_M_MS = np.transpose(np.percentile(cum_M_MS, q, axis=1), axes)
viz.cum_M_tot = np.transpose(np.percentile(cum_M_tot, q, axis=1), axes)
viz.cum_M_BH = np.transpose(np.percentile(cum_M_BH, q, axis=1), axes)
viz.cum_M_WD = np.transpose(np.percentile(cum_M_WD, q, axis=1), axes)
viz.cum_M_NS = np.transpose(np.percentile(cum_M_NS, q, axis=1), axes)
viz.numdens = np.transpose(np.percentile(numdens, q, axis=1), axes)
viz.mass_func = massfunc
for rbins in viz.mass_func.values():
for rslice in rbins:
rslice['dNdm'] = np.percentile(rslice['dNdm'], q, axis=0)
viz.frac_M_MS = | np.percentile(frac_M_MS, q, axis=1) | numpy.percentile |
import os
import copy
import datetime
import numpy as np
import xarray as xr
import pandas as pd
from collections import Counter
from ahh.ext import (round_to, get_order_mag, report_err, lonw2e)
from ahh.sci import get_stats, get_norm_anom, get_anom, get_norm
from ahh.era import td2dict
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.patches as mpatches
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.dates import YearLocator, MonthLocator, DayLocator,\
HourLocator, MinuteLocator, AutoDateLocator, \
DateFormatter, AutoDateFormatter
from matplotlib.ticker import MultipleLocator, \
FormatStrFormatter
import matplotlib.dates as mdates
__author__ = '<EMAIL>'
__copyright__ = '<NAME>'
class MissingInput(Exception):
pass
class Unsupported(Exception):
pass
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT = {
'scale': 1,
'projection': None,
'dpi': 105,
'sizes': {
'figure': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'text': {'smallest': 5.5,
'smaller': 7.5,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'line': {'smallest': 0.4,
'smaller': 0.65,
'small': 1,
'medium': 1.15,
'large': 1.3,
'larger': 1.5,
'largest': 2
},
'tick': {'smallest': 0.05,
'smaller': 0.15,
'small': 0.2,
'medium': 0.55,
'large': 1.0,
'larger': 1.25,
'largest': 1.5
},
'bar': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'marker': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'title pad': {'smallest': 0.985,
'smaller': 0.995,
'small': 1.0,
'medium': 1.01,
'large': 1.03,
'larger': 1.05,
'largest': 1.07
},
'pad': {'smallest': 0.15,
'smaller': 0.2,
'small': 0.3,
'medium': 0.45,
'large': 0.6,
'larger': 0.85,
'largest': 1.0
}
},
'styles': {
'color': {'green': '#145222',
'red': '#DF0909',
'orange': '#E68D00',
'pink': '#CE5F5F',
'magenta': '#9E005D',
'teal': '#66A7C5',
'yellow': '#E0D962',
'stone': '#6462E0',
'blue': '#2147B1',
'purple': '#630460',
'black': '#202020',
'light gray': '#DADADA',
'gray': '#5B5B5B',
'white': '#FFFFFF',
},
'tc_color': {'dep': '#7EC6FF',
'storm': '#00F9F3',
'one': '#FFFFC6',
'two': '#FFFF5A',
'three': '#FFD97E',
'four': '#FF9C00',
'five': '#FF5454'
},
'alpha': {'transparent': 0.2,
'translucid': 0.3,
'translucent': 0.5,
'semi opaque': 0.75,
'opaque': 0.95,
}
},
'figtext': {'loc': 'bottom right',
'center bottom': {
'xy_loc': (0.5, 0.05),
'ha': 'center',
'va': 'center',
'lef_marg': 0.05,
'rig_marg': 0.95,
'bot_marg': 0.15,
'top_marg': 0.95
},
'center left': {'xy_loc': (0.1, 0.5),
'ha': 'right',
'va': 'center',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.15,
'top_marg': 0.95
},
'center right': {'xy_loc': (0.9, 0.5),
'ha': 'left',
'va': 'center',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
'bottom left': {'xy_loc': (0.1, 0.075),
'ha': 'right',
'va': 'bottom',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.05,
'top_marg': 0.95
},
'bottom right': {'xy_loc': (0.9, 0.075),
'ha': 'left',
'va': 'bottom',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
'upper left': {'xy_loc': (0.1, 0.925),
'ha': 'right',
'va': 'top',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.05,
'top_marg': 0.95
},
'upper right': {'xy_loc': (0.9, 0.925),
'ha': 'left',
'va': 'top',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
}
}
SIZES = DEFAULT['sizes']
STYLES = DEFAULT['styles']
COLORS = STYLES['color']
ALPHAS = STYLES['alpha']
COLOR_LIST = [COLORS['red'], COLORS['teal'], COLORS['magenta'],
COLORS['stone'], COLORS['green'], COLORS['purple'],
COLORS['blue'], COLORS['light gray'], COLORS['pink'],
COLORS['orange'], COLORS['gray'], COLORS['yellow'],
COLORS['black']]
MISC_COLOR_LIST = [
'#fb2424',
'#24d324',
'#2139d5',
'#21bdbd',
'#cf0974',
'#f96710',
'#ccc506',
'#780e96',
'#32a26e',
'#f89356'
]
WARM_COLOR_LIST = [
'#82050b',
'#d50303',
'#f33f00',
'#f38f00',
'#f0d073'
]
COOL_COLOR_LIST = [
'#b9ddb4',
'#65c2a5',
'#3287bd',
'#4f32bd',
'#84038c'
]
HOT_COLOR_LIST = [
'#641502',
'#ab0b0b',
'#c03210',
'#e27123',
'#ffbb3e',
'#f6cb7b'
]
WET_COLOR_LIST = [
'#badbee',
'#6cb8d0',
'#59ba85',
'#3d9e3a',
'#008000',
'#003333'
]
DRY_COLOR_LIST = [
'#480505',
'#7d3e14',
'#ac6434',
'#cf9053',
'#c9c85b',
'#ebe696'
]
NEON_COLOR_LIST = [
'#7bfc73',
'#b0cd42',
'#cd7842',
'#9a3d5a',
'#46224b'
]
DIV_COLOR_LIST = (WARM_COLOR_LIST + COOL_COLOR_LIST)[::-1]
# https://www.ncl.ucar.edu/Document/Graphics/color_tables.shtml
NCL_CMAPS = pd.read_pickle(os.path.join(THIS_DIR, 'data', 'ncl_cmaps.pkl'))
NCL_CMAP_NAMES = NCL_CMAPS.columns.tolist()
def prettify_ax(ax,
alpha=0.75,
xlabel=None,
ylabel=None,
title=None,
suptitle=False,
matchcolor=True,
legend='best',
title_pad=1.025,
length_scale=False,
ticks=True):
"""
Beautify a plot axis.
:param ax: (matplotlib.axes) - original axis
:param alpha: (float) - how transparent it is
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param suptitle: (boolean) - whether to make a figure title
:param matchcolor: (boolean) - whether to match edgecolor with facecolor
:param legend: (str) - location of legend
:param title_pad: (scalar) - distance between box and title
:param length_scale: (scalar) - whether to scale the labels based on length
:param ticks: (boolean) - whether to modify ticks
:return ax: (matplotlib.axes) - prettified axis
"""
if xlabel is None:
xlabel = plt.getp(ax, 'xlabel')
if ylabel is None:
ylabel = plt.getp(ax, 'ylabel')
if title is None:
title = plt.getp(ax, 'title')
set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,
title=title, title_pad=title_pad, length_scale=length_scale)
plots = plt.getp(ax, 'children')
for plot in plots:
if plot.axes is not None:
try:
if matchcolor:
edgecolor = plt.getp(plot, 'facecolor')
plt.setp(plot,
edgecolor=edgecolor,
alpha=alpha)
except:
plt.setp(plot, alpha=alpha)
set_legend(ax, loc=legend)
set_borders(ax)
if ticks:
set_major_grid(ax)
set_major_ticks(ax)
set_major_tick_labels(ax)
set_minor_grid(ax)
set_minor_ticks(ax)
set_minor_tick_labels(ax)
return ax
def prettify_bokeh(p,
title_size=15,
xlabel_size=15,
ylabel_size=15,
ytick_label_size=10,
xtick_label_size=10,
legend_size=10,
font='century gothic'):
"""
Scales bokeh plot's label sizes based on figure size
:param p: (bokeh.figure) - bokeh figure
:param title_size: (scalar) - title size
:param xlabel_size: (scalar) - x label size
:param ylabel_size: (scalar) - y label size
:param xtick_label_size: (scalar) - x tick label size
:param ytick_label_size: (scalar) - y tick label size
:param legend: (scalar) - size of legend labels
:param font: (str) - font of labels
:return p: (bokeh.figure) - bokeh figure
"""
title_size = str(scale_it_bokeh(p, title_size, 1)) + 'pt'
xlabel_size = str(scale_it_bokeh(p, xlabel_size, 1)) + 'pt'
ylabel_size = str(scale_it_bokeh(p, ylabel_size, 1)) + 'pt'
xtick_label_size = str(scale_it_bokeh(p, xtick_label_size, 1)) + 'pt'
ytick_label_size = str(scale_it_bokeh(p, ytick_label_size, 1)) + 'pt'
legend_size = str(scale_it_bokeh(p, legend_size, 1)) + 'pt'
p.title.text_font_size = title_size
p.title.text_font_style = 'normal'
p.title.text_font = font
p.title.align = 'left'
p.title.offset = 5
p.xaxis.axis_label_text_font_style = 'normal'
p.xaxis.axis_label_text_font = font
p.xaxis.axis_label_text_font_size = xlabel_size
p.xaxis.major_tick_line_color = 'white'
p.xaxis.major_label_text_font_size = xtick_label_size
p.xaxis.axis_line_width = 0.01
p.xaxis.minor_tick_line_color = 'white'
p.yaxis.axis_label_standoff = 16
p.yaxis.axis_label_text_font_style = 'normal'
p.yaxis.axis_label_text_font = font
p.yaxis.axis_label_text_font_size = ylabel_size
p.yaxis.major_tick_line_color = 'white'
p.yaxis.major_label_text_font_size = ytick_label_size
p.yaxis.minor_tick_line_color = 'white'
p.yaxis.axis_line_width = 0.01
p.grid.grid_line_dash = 'solid'
p.legend.location = 'top_left'
p.legend.background_fill_alpha = 0
p.legend.border_line_alpha = 0
p.legend.label_text_font_size = legend_size
return p
def plot_map(data, lats=None, lons=None, figsize=None, ax=None, stipple=None,
cmap='BlueWhiteOrangeRed', orientation='horizontal', wrap=True,
data_lim=None, vmin=None, vmax=None, balance=True,
lat1=-90, lat2=90, lon1=-180, lon2=180,
latlim=None, lonlim=None, region=None,
title='', title_pad=1.025, suptitle=False,
lat_labels='auto', lon_labels='auto', length_scale=True,
rows=1, cols=1, pos=1, fmt=None,
cbar=True, cbar_label='', shrink=0.25,
contourf=True, interval=None, tick_locs=None,
data2=None, lats2=None, lons2=None,
contour=None, contour2=None,
clabel=True, clabel2=True,
mask_land=False, mask_ocean=False,
land=False, ocean=False, coastlines=True, rivers=False,
countries=False, states=False, lakes=False,
projection=None, central_longitude=0, tight_layout='auto',
dpi=DEFAULT['dpi'], save='', close=True, returnplot=False,
**kwargs
):
"""
Makes a map on a subplot.
:param data: (array) - data to be mapped
:param lats: (array) - array of latitudes
:param lons: (array) - array of longitudes
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param stipple: (array) - array of values to be stippled
:param cmap: (str) - color map
:param orientation: (str) - orientation of color bar
:param wrap: (boolean) - fill missing data at prime meridian
:param data_lim: (tup) - shortcut for vmin and vmax
:param vmin: (scalar) - lower limit of color bar
:param vmax: (scalar) - upper limit of color bar
:param lat1: (scalar) lower limit of latitude
:param lat2: (scalar) upper limit of latitude
:param lon1: (scalar) left limit of longitude
:param lon2: (scalar) right limit of longitude
:param latlim: (tuple) shortcut for lat1 and lat2
:param lonlim: (tuple) shortcut for lon1 and lon2
:param region: (str) region to quickly subset lat and lon extent (na or us)
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param lat_labels: (array) - list of latitudes to show on map
:param lon_labels: (array) - list of longitudes to show on map
:param length_scale: (scalar) - whether to scale the labels based on length
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param fmt: (str) - format of color bar labels
:param cbar: (boolean) - whether to show color bar
:param cbar_label: (str) - label of color bar
:param shrink: (scalar) - how much to shrink the color bar
:param contourf: (boolean) - whether to cartoonize colormap
:param interval: (scalar) - interval of tick marks on color bar
:param tick_locs: (array) - input own tick marks on color bar
:param data2: (array) - contours to be mapped
:param lats2: (array) - array of contour latitudes
:param lons2: (array) - array of contour longitudes
:param contour: (array) - list of values to contour with solid line
:param contour2: (array) - list of values to contour with dashed line
:param clabel: (boolean) - whether to show value on solid contours
:param clabel2: (boolean) - whether to show value on dashed contours
:param mask_land: (boolean) - whether to mask land
:param mask_ocean: (boolean) - whether to mask ocean
:param land: (boolean) - whether to color fill land
:param ocean: (boolean) - whether to color fill land
:param coastlines: (boolean) - whether to draw coastline
:param rivers: (boolean) - whether to draw rivers
:param countries: (boolean) - whether to draw country borders
:param states: (boolean) - whether to draw state borders
:param lakes: (boolean) - whether to color fill lakes
:param projection: (cartopy.crs) - projection of map
:param central_longitude: (scalar) - longitude to center the map on
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted line
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional image plot
"""
from ahh.ext import get_ocean_mask
import cartopy.util
if isinstance(data, xr.Dataset):
raise Exception('Please subselect a variable from xr.Dataset!')
if isinstance(data, xr.DataArray):
if lats is None:
lats = data.lat.values
if lons is None:
lons = data.lon.values
data = data.to_masked_array()
if isinstance(lons, xr.DataArray):
lons = lons.values
if isinstance(lons, xr.DataArray):
lats = lats.values
if lons is None or lats is None:
raise Exception('Missing lats and lons!')
if data2 is None:
data2 = data
ndim = data.ndim
if ndim > 2:
raise Exception('Data must be 2D, {0}D data was input!'.format(ndim))
if mask_ocean:
data, lons = get_ocean_mask(data, lats, lons, apply_mask=True)
elif mask_land:
data, lons = get_ocean_mask(data, lats, lons,
reverse=True, apply_mask=True)
projection = _get_projection_logic(projection, lons, central_longitude)
if lons2 is None and lats2 is None:
lats2, lons2 = lats, lons
else:
lons2 -= central_longitude
lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,
lat1, lat2, lon1, lon2,
region=region,
central_longitude=
central_longitude)
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
if ax is None:
ax = plt.subplot(rows, cols, pos, projection=projection)
if wrap:
try:
data, lons = cartopy.util.add_cyclic_point(data, coord=lons)
except:
print('Unable to wrap!')
ax.set_extent([lon1, lon2, lat1, lat2], projection)
_add_features(ax, land, ocean, coastlines,
states, countries, lakes, rivers)
set_latlons(ax, central_longitude=central_longitude,
lat_labels=lat_labels, lon_labels=lon_labels)
if contourf:
try:
contourf[0]
base, base2 = _get_bases_logic(contourf)
vmin, vmax = _get_vmin_vmax_logic(data=contourf,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
if tick_locs is None:
tick_locs = contourf
except:
base, base2 = _get_bases_logic(data)
vmin, vmax = _get_vmin_vmax_logic(data=data,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
vmin, vmax = _balance_logic(balance, vmin, vmax)
if interval is None:
interval = base
oom = get_order_mag(np.abs(vmax) - np.abs(vmin))
interval = _get_interval_logic(interval=interval,
vmin=vmin, vmax=vmax,
base=base, oom=oom)
try:
contourf[0]
except:
contourf = np.arange(vmin, vmax + interval, interval)
vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin,
vmax=vmax,
data=contourf,
interval=interval)
contourf, interval = _fix_contourf_logic(contourf=contourf,
interval=interval,
vmin=vmin,
vmax=vmax)
fmt = _get_fmt_logic(fmt=fmt, interval=interval)
cmap = get_cmap(cmap, n=len(contourf))
(tick_locs,
cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,
vmin=vmin,
vmax=vmax,
interval=interval)
im = ax.contourf(lons, lats, data, levels=contourf, extend='both',
transform=projection, cmap=cmap,
vmin=vmin, vmax=vmax, **kwargs)
drawedges = True
else:
base, base2 = _get_bases_logic(data)
vmin, vmax = _get_vmin_vmax_logic(data=data,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
vmin, vmax = _balance_logic(balance, vmin, vmax)
cmap = get_cmap(cmap, n=100)
im = ax.pcolormesh(lons, lats, data, transform=projection,
cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)
drawedges = False
if cbar:
set_cbar(ax, im, label=cbar_label, drawedges=drawedges,
shrink=shrink, orientation=orientation,
fmt=fmt, tick_locs=tick_locs)
if stipple:
ax.contourf(lons2, lats2, data2, stipple, colors='none',
hatches=['.', '.', ' '],
transform=projection, **kwargs)
_set_contour_logic(ax, lons2, lats2, data2, contour,
projection, fmt, clabel)
_set_contour_logic(ax, lons2, lats2, data2, contour2,
projection, fmt, clabel2)
set_labels(ax, title=title, title_pad=title_pad,
length_scale=length_scale, suptitle=suptitle)
set_borders(ax)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, im
else:
return ax
def plot_bounds(ax, lat1=-90, lat2=90, lon1=-180, lon2=180,
latlim=None, lonlim=None,
color='k', linestyle='solid', linewidth=1.25,
fill=False, alpha=0.75, projection=None,
tight_layout='on', dpi=DEFAULT['dpi'], save='',
close=True, **kwargs):
"""
Plot a bounded region on a map. Default is a rectangle with black outlines.
:param ax: (matplotlib.axes) - original axis
:param lat1: (float) - a latitudinal bound (can be any order)
:param lat2: (float) - another latitudinal bound (can be any order)
:param lon1: (float) - a longitudinal bound (can be any order)
:param lon2: (float) - another longitudinal bound (can be any order)
:param latlim: (tuple) shortcut for lat1 and lat2
:param lonlim: (tuple) shortcut for lon1 and lon2
:param color: (str) - matplotlib abbrieviation of color
:param linestyle: (str) - solid, dashed, dashdot, or dotted linestyle
:param linewidth: (scalar) - how thick line is
:param fill: (boolean) - whether to color in the region
:param alpha: (float) - how transparent it is
:param projection: (cartopy.crs) - map projection
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - save figure if string is specified
:param kwargs: (kwargs) - additional keyword arguments
:param close: (boolean) - whether to close figure after saving
"""
projection = _get_projection_logic(projection)
lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,
lat1, lat2, lon1, lon2)
width = lon2 - lon1
height = lat2 - lat1
ax.add_patch(mpatches.Rectangle(xy=[lon1, lat1],
width=width,
height=height,
facecolor=color,
edgecolor=color,
linestyle=linestyle,
linewidth=linewidth,
alpha=alpha,
transform=projection,
fill=fill, **kwargs
)
)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=1, rows=1, cols=1)
def plot_line(x, y=None, figsize=None,
ax=None, xlim=None, ylim=None,
stats=False,
norm=False, anom=False, norm_anom=False, cumsum=False,
color=COLORS['red'], alpha=ALPHAS['translucent'],
inherit=True, label='', xlabel='', ylabel='', title='',
suptitle=False,
title_pad=0.965, length_scale=True, linewidth=1, linestyle='-',
xscale='linear', yscale='linear', minor_date_ticks=True,
rows=1, cols=1, pos=1, label_inline=False,
sharex=None, sharey=None,
twinx=None, twiny=None, aligned=True,
xinvert=False, yinvert=False, legend=None,
projection=DEFAULT['projection'],
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw a line on a subplot. Use other functions for full customizability.
:param x: (arr) - input x array
:param y: (arr) - input y array
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param xlim: (tup) - left and right x axis limit in a tuple, respectively
:param ylim: (tup) - left and right y axis limit in a tuple, respectively
:param stats: (boolean/str) - whether to show stats and if str, the loc
:param norm: (boolean) - whether to normalize the y
:param anom: (boolean) - whether to subtract the average of y from y
:param norm_anom: (boolean) - whether to get the normalized anomaly of y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param color: (str) - color of the plotted line
:param alpha: (scalar/str) - transparency of the plotted line
:param inherit: (boolean) - whether to inherit previous labels
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param length_scale: (scalar) - whether to scale the labels based on length
:param linewidth: (scalar) - width of the plotted line
:param linestyle: (str) - style of the plotted line
:param xscale: (str) - linear or log scale of x axis
:param yscale: (str) - linear or log scale of y axis
:param minor_date_ticks: (str) - whether to have date ticks on top axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param label_inline: (scalar) - whether to label in line; x-value of label
:param sharex: (mpl.axes) - share x axis ticks with another subplot
:param sharey: (mpl.axes) - share y axis ticks with another subplot
:param twinx: (mpl.axes) - share x axis and have another y axis
:param twiny: (mpl.axes) - share x axis and have another x axis
:param aligned: (boolean) - whether to keep left and right ticks aligned
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param legend: (str) - location of legend
:param projection: (cartopy.crs) - projection of plotted line
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted line
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional line plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
x = _get_dt_from_pd_logic(x)
x, xtext, xticklabels = _get_xtext_logic(x=x)
x, y = _get_x_to_y_logic(x=x, y=y)
y = _get_stats_logic(ax, y, norm=norm, anom=anom,
norm_anom=norm_anom, cumsum=cumsum)
origin_xlim, xlim = _get_xlim_logic(x, xlim)
origin_ylim, ylim = _get_ylim_logic(y, ylim)
ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,
rows=rows, cols=cols, pos=pos,
projection=projection)
plot = ax.plot(x, y, **kwargs)
if inherit:
ax, xlabel, ylabel, title, xlim, ylim = \
set_inherited(ax, xlabel, ylabel, title,
xlim, ylim, origin_xlim, origin_ylim)
linewidth = scale_it(ax, linewidth, 0.2)
plt.setp(plot, color=color, alpha=alpha, label=label,
linewidth=linewidth, linestyle=linestyle,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='round', dash_joinstyle='round')
# must be after label
if label is not None and label_inline:
if not isinstance(label_inline, bool):
set_inline_label(ax, plot, xval=label_inline)
else:
set_inline_label(ax, plot)
if projection is not None:
plt.setp(plot, transform=projection)
set_axes(ax, xlim=xlim, ylim=ylim,
xscale=xscale, yscale=yscale,
xinvert=xinvert, yinvert=yinvert)
# need ax and ylim set
_show_stats_logic(ax, y, stats)
_settings_logic(ax=ax,
x=x,
twinx=twinx,
twiny=twiny,
xticks=None,
xlabel=xlabel,
ylabel=ylabel,
title=title,
title_pad=title_pad,
suptitle=suptitle,
aligned=aligned,
length_scale=length_scale,
xtext=xtext,
xticklabels=xticklabels,
minor_date_ticks=minor_date_ticks)
set_legend(ax, loc=legend)
rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,
sharex=sharex, sharey=sharey,
xlabel=xlabel, ylabel=ylabel)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, plot
else:
return ax
def plot_bar(x, y=None, figsize=None, ax=None, xlim=None, ylim=None,
stats=False,
norm=False, anom=False, norm_anom=False, cumsum=False,
matchcolor=True, color=None, facecolor=COLORS['red'],
edgecolor=COLORS['red'], alpha=ALPHAS['semi opaque'],
linewidth=0.25, linestyle='-', title_pad=0.965, length_scale=True,
inherit=True, label='', xlabel='', ylabel='', title='',
suptitle=False,
width='auto', height=None, align='edge',
xscale='linear', yscale='linear', minor_date_ticks=True,
rows=1, cols=1, pos=1, orientation='vertical',
sidebar_count=0, sidebar_pos=1, bar_vals=None,
sharex=None, sharey=None,
twinx=None, twiny=None, aligned=True,
xinvert=False, yinvert=False, legend=None,
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw bars on a subplot. Use other functions for full customizability.
:param x: (arr) - input x array
:param y: (arr) - input y array
:param xlim: (tup) - left and right x axis limit in a tuple, respectively
:param ylim: (tup) - left and right y axis limit in a tuple, respectively
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param stats: (boolean/str) - whether to show stats and if str, the loc
:param norm: (boolean) - whether to normalize the y
:param anom: (boolean) - whether to subtract the average of y from y
:param norm_anom: (boolean) - whether to get the normalized anomaly of y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param matchcolor: (boolean) - whether to match edgecolor with facecolor
:param color: (str) - facecolor and edgecolor of plotted bar
:param facecolor: (str) - facecolor of plotted bar
:param edgecolor: (str) - edgecolor of plotted bar
:param alpha: (scalar/str) - transparency of the plotted bar
:param linewidth: (scalar) - width of plotted bar edges
:param linestyle: (str) - style of the plotted bar edges
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param inherit: (boolean) - whether to inherit previous labels
:param length_scale: (scalar) - whether to scale the labels based on length
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param width: (str/scalar) - width of plotted bars when vertical
:param height: (str/scalar) - height of plotted bars when horizontal
:param align: (str) - whether to align plotted bar on center or edge
:param xscale: (str) - linear or log scale of x axis
:param yscale: (str) - linear or log scale of y axis
:param minor_date_ticks: (str) - whether to have date ticks on top axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param orientation: (str) - whether to have horizontal or vertical bars
:param sidebar_count: (int) - how many bars per x
:param sidebar_pos: (int) - the location of the side bar
:param bar_vals: (str) - format of bar vals
:param sharex: (mpl.axes) - share x axis ticks with another subplot
:param sharey: (mpl.axes) - share y axis ticks with another subplot
:param twinx: (mpl.axes) - share x axis and have another y axis
:param twiny: (mpl.axes) - share x axis and have another x axis
:param aligned: (boolean) - whether to keep left and right ticks aligned
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param legend: (str) - location of legend
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted bar
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional bar plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi,
sidebar_pos=sidebar_pos)
x = _get_dt_from_pd_logic(x)
x, xtext, xticklabels = _get_xtext_logic(x=x)
x, y = _get_x_to_y_logic(x=x, y=y)
y = _get_stats_logic(ax, y, norm=norm, anom=anom,
norm_anom=norm_anom, cumsum=cumsum)
origin_ylim, ylim = _get_ylim_logic(y, ylim)
facecolor, edgecolor = _get_color_logic(color,
facecolor,
edgecolor,
matchcolor)
if width == 'auto':
width = _get_width_logic(x)
if sidebar_count > 1:
if facecolor is not COLORS['red']:
(width, align, x_list) = get_side_bars_recs(x,
sidebar_count,
colors=False)
else:
(width, align,
x_list, colors) = get_side_bars_recs(x,
sidebar_count,
colors=True)
if facecolor is COLORS['red']:
color = colors[sidebar_pos - 1]
x = x_list[sidebar_pos - 1]
ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,
rows=rows, cols=cols, pos=pos)
# set width first
if xtext:
align = 'center'
origin_xlim, xlim = _get_xlim_logic(x, xlim, pad=width, align=align)
if sidebar_count > 1 and sidebar_count % 2 == 0:
xlim = (xlim[0] - width * sidebar_count,
xlim[1] + width * (sidebar_count - 1))
elif sidebar_count > 1 and sidebar_count % 2 != 0:
xlim = (xlim[0] - width * sidebar_count,
xlim[1])
if 'vertical' in orientation:
plot = ax.bar(x, y, align=align, label=label, **kwargs)
elif 'horizontal' in orientation:
plot = ax.barh(x, y, height=height, align=align,
label=label, **kwargs)
if inherit:
ax, xlabel, ylabel, title, xlim, ylim = \
set_inherited(ax, xlabel, ylabel, title,
xlim, ylim, origin_xlim, origin_ylim)
linewidth = scale_it(ax, linewidth, 0.2)
plt.setp(plot, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha,
linestyle=linestyle, width=width, linewidth=linewidth)
set_axes(ax,
xlim=xlim,
ylim=ylim,
xscale=xscale,
yscale=yscale,
xinvert=xinvert,
yinvert=yinvert)
if bar_vals != False:
if sidebar_count == 0:
sidebar_count = 1
if (len(x) < (50 / sidebar_count * 1.7) and
sidebar_pos == sidebar_count):
if bar_vals is None:
interval = np.median(y)
bar_vals = _get_fmt_logic(fmt=bar_vals, interval=interval)
set_bar_vals(ax, fmt=bar_vals, orientation='auto',
yinvert=yinvert)
_settings_logic(ax=ax,
x=x,
twinx=twinx,
twiny=twiny,
xticks=None,
xlabel=xlabel,
ylabel=ylabel,
title=title,
title_pad=title_pad,
suptitle=suptitle,
aligned=aligned,
length_scale=length_scale,
xtext=xtext,
xticklabels=xticklabels,
minor_date_ticks=minor_date_ticks)
rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,
sharex=sharex, sharey=sharey,
xlabel=xlabel, ylabel=ylabel)
set_legend(ax, loc=legend)
# need ax and ylim set and bar vals shifted
_show_stats_logic(ax, y, stats)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, plot
else:
return ax
def plot_scatter(x, y=None, figsize=None, ax=None,
xlim=None, ylim=None,
stats=False,
norm=False, anom=False, norm_anom=False, cumsum=False,
matchcolor=True,
data_lim=None, vmin=None, vmax=None,
color=None, facecolor=COLORS['red'], edgecolor=COLORS['red'],
alpha=ALPHAS['translucent'],
linewidth=0.25, size=5, marker='o', s=None,
c=None, cbar=True, cbar_label='', shrink=0.35, cmap=None,
orientation='horizontal', interval=None, tick_locs=None,
inherit=True, label='', xlabel='', ylabel='',
title='', title_pad=0.965, suptitle=False, length_scale=True,
xscale='linear', yscale='linear', minor_date_ticks=True,
rows=1, cols=1, pos=1, fmt=None, pad=0.225,
sharex=None, sharey=None,
twinx=None, twiny=None, aligned=True,
xinvert=False, yinvert=False, legend=None,
projection=DEFAULT['projection'],
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw markers on a subplot. Use other functions for full customizability.
:param x: (arr) - input x array
:param y: (arr) - input y array
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param stats: (boolean/str) - whether to show stats and if str, the loc
:param xlim: (tup) - left and right x axis limit in a tuple, respectively
:param ylim: (tup) - left and right y axis limit in a tuple, respectively
:param norm: (boolean) - whether to normalize the y
:param anom: (boolean) - whether to subtract the average of y from y
:param norm_anom: (boolean) - whether to get the normalized anomaly of y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param data_lim: (tup) - shortcut for vmin and vmax
:param vmin: (scalar) - lower limit of color bar
:param vmax: (scalar) - upper limit of color bar
:param matchcolor: (boolean) - whether to match edgecolor with facecolor
:param color: (str) - facecolor and edgecolor of plotted scatter marker
:param facecolor: (str) - facecolor of plotted scatter marker
:param edgecolor: (str) - edgecolor of plotted scatter marker
:param alpha: (scalar/str) - transparency of the plotted scatter marker
:param linewidth: (scalar) - width of plotted scatter marker edges
:param size: (scalar) - size of plotted scatter marker
:param marker: (scalar) - style of plotted scatter marker
:param s: (arr) - array to map size to
:param c: (arr) - array to map color to
:param cbar: (boolean) - whether to show color bar
:param cbar_label: (str) - label of color bar
:param shrink: (scalar) - size of color bar
:param cmap: (str) - color map
:param orientation: (str) - orientation of color bar
:param interval: (scalar) - interval of tick marks on color bar
:param tick_locs: (array) - input own tick marks on color bar
:param inherit: (boolean) - whether to inherit previous labels
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param length_scale: (scalar) - whether to scale the labels based on length
:param xscale: (str) - linear or log scale of x axis
:param yscale: (str) - linear or log scale of y axis
:param minor_date_ticks: (str) - whether to have date ticks on top axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param fmt: (str) - format of color bar labels
:param pad: (scalar) - padding of color bar from plot
:param sharex: (mpl.axes) - share x axis ticks with another subplot
:param sharey: (mpl.axes) - share y axis ticks with another subplot
:param twinx: (mpl.axes) - share x axis and have another y axis
:param twiny: (mpl.axes) - share x axis and have another x axis
:param aligned: (boolean) - whether to keep left and right ticks aligned
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param legend: (str) - location of legend
:param projection: (cartopy.crs) - projection of plotted scatter
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted scatter
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional scatter plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
x = _get_dt_from_pd_logic(x)
x, xtext, xticklabels = _get_xtext_logic(x=x)
x, y = _get_x_to_y_logic(x, y)
y = _get_stats_logic(ax, y, norm=norm, anom=anom,
norm_anom=norm_anom, cumsum=cumsum)
origin_ylim, ylim = _get_ylim_logic(y, ylim)
origin_xlim, xlim = _get_xlim_logic(x, xlim)
ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,
rows=rows, cols=cols, pos=pos,
projection=projection)
if c is not None:
base, base2 = _get_bases_logic(c)
vmin, vmax = _get_vmin_vmax_logic(data=c, base=base2,
vmin=vmin, vmax=vmax,
data_lim=data_lim)
oom = get_order_mag(vmax - vmin)
interval = _get_interval_logic(interval=interval,
vmin=vmin, vmax=vmax,
base=base, oom=oom)
fmt = _get_fmt_logic(fmt=fmt, interval=interval)
vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin, vmax=vmax, data=c,
interval=interval)
(tick_locs,
cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,
vmin=vmin, vmax=vmax,
interval=interval)
if cmap is None:
cmap = 'viridis'
cmap = get_cmap(cmap, cbar_count)
edgecolor = None
facecolor = COLORS['gray']
if s is not None:
size = np.abs(s)
else:
size = scale_it(ax, np.abs(size), 25, exp=False)
plot = ax.scatter(x, y, marker=marker,
linewidths=linewidth,
s=size, c=c, cmap=cmap,
vmin=vmin, vmax=vmax,
**kwargs
)
if cbar and cmap is not None:
set_cbar(ax, plot, label=cbar_label, fmt=fmt,
pad=pad, shrink=shrink,
tick_size=8, label_size=10,
orientation=orientation,
tick_locs=tick_locs)
else:
if color is not None:
facecolor = color
edgecolor = color
if matchcolor:
edgecolor = facecolor
if inherit:
ax, xlabel, ylabel, title, xlim, ylim = \
set_inherited(ax, xlabel, ylabel, title,
xlim, ylim, origin_xlim, origin_ylim)
linewidth = scale_it(ax, linewidth, 0.2)
if projection is not None:
plt.setp(plot, transform=projection)
plt.setp(plot, facecolor=facecolor, edgecolor=edgecolor,
alpha=alpha, label=label)
set_axes(ax, xlim=xlim, ylim=ylim,
xscale=xscale, yscale=yscale,
xinvert=xinvert, yinvert=yinvert)
# need ax and ylim set
_show_stats_logic(ax, y, stats)
_settings_logic(ax=ax,
x=x,
twinx=twinx,
twiny=twiny,
xticks=None,
xlabel=xlabel,
ylabel=ylabel,
title=title,
title_pad=title_pad,
suptitle=suptitle,
aligned=aligned,
length_scale=length_scale,
xtext=xtext,
xticklabels=xticklabels,
minor_date_ticks=minor_date_ticks)
rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,
sharex=sharex, sharey=sharey,
xlabel=xlabel, ylabel=ylabel)
set_legend(ax, loc=legend)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, plot
else:
return ax
def plot(*plot_args, **plot_kwargs):
"""
Plot multiple line/bar/scatter plots at once using this syntax
x, y, 'label', 'ptype/color/linestyle/marker'
Example - plot a red dashed line with circle marker and a black bar plot
plot(x, y, 'line plot', 'line/red/--/o', x2, y2, 'bar plot', 'bar/black')
Equivalent shorthand
plot(x, y, 'line plot', 'l/r/--/o', x2, y2, 'bar plot', 'b/k')
Example 2 - plot a green solid line, blue bar plot, yellow scatter plot
with a title, ylabel, and xlabel
plot(x, y, 'labl', 'l/r', x2, y2, 'labl2', 'b/b', x3, y3, 'labl3', 's/y',
title='title', ylabel='a ylabel', xlabel='one xlabel')
Example 3 - adjust figsize while still stacking all the plots
plot(x, y, 'labl', 'l', x2, y2, 'labl2', 'b', figsize=(8, 5), stack=True)
Example 4 - plot two separate figures
plot(x, y, 'labl', 'l', x2, y2, 'labl2', 'b', stack=False)
:param stack: (bool) whether to keep stacking if figsize input is provided
:return ax_list: (list) - list of axes
"""
plot_inputs = zip(plot_args[::4],
plot_args[1::4],
plot_args[2::4],
plot_args[3::4])
figsize = plot_kwargs.get('figsize', 'na')
stack = plot_kwargs.get('stack', True)
if figsize == 'na':
set_figsize()
ax_list = []
for i, plot_input in enumerate(plot_inputs):
if stack and i > 0:
plot_kwargs['figsize'] = 'na'
x, y, label, style = plot_input
ptype, color, linestyle, marker = _parse_style(style)
vis_dict = dict(label=label, color=color,
linestyle=linestyle, marker=marker,
**plot_kwargs)
if ptype in ['b', 'bar']:
_pop_keys(vis_dict, 'bar')
ax = plot_bar(x, y, **vis_dict)
elif ptype in ['s', 'scatter']:
_pop_keys(vis_dict, 'scatter')
if vis_dict['marker'] == '':
vis_dict['marker'] = 'o'
ax = plot_scatter(x, y, **vis_dict)
else:
_pop_keys(vis_dict, 'line')
ax = plot_line(x, y, **vis_dict)
ax_list.append(ax)
return ax_list
def plot_hist(x=None, y=None, ptype='bar', align='edge', bar_vals=None,
width='auto', norm=False, cumsum=False, **kwargs):
"""
Plot histogram using plot line/bar/scatter.
:param x: (int/arr) - number of bins or array of bin edges
:param y: (arr) - array of items
:param ptype: (str) - whether to plot line, bar, or scatter
:param align: (str) - whether to align bars on edge or center
:param bar_vals: (str) - format of bar vals
:param width: (str/scalar) - width of plotted bars when vertical
:param norm: (boolean) - whether to normalize the y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
"""
if y is None:
y = x
x = None
try:
int(x)
refresh_x = x + 1
except:
refresh_x = 0
if norm:
weights = np.ones_like(y) / float(len(y))
normed = 0
else:
weights = None
normed = False
try:
if x is None or refresh_x:
if not refresh_x:
ymin = np.min(y)
ymax = np.max(y)
oom = get_order_mag(ymax - ymin)
base = np.power(5, oom)
ymin = round_to(ymin, base=base)
ymax = round_to(ymax, base=base)
x = np.arange(ymin, ymax, base)
if ymin == ymax or refresh_x:
ymin = np.min(y) # refresh it
ymax = np.max(y)
if refresh_x == 0:
refresh_x += 7
x = np.linspace(ymin, ymax, refresh_x)
y = np.clip(y, np.min(x), np.max(x))
hist_counts, bin_edges = np.histogram(y, x,
normed=normed,
weights=weights)
x, y = bin_edges[:-1], hist_counts
if width == 'auto':
width = np.average(np.diff(x))
except:
text_hist = Counter(y)
y = list(text_hist.values())
x = list(text_hist.keys())
align = 'center'
if bar_vals is None:
if not norm:
bar_vals = '%1d'
else:
bar_vals = '%.2f'
if ptype == 'bar':
plot_bar(x, y, align=align, width=width, bar_vals=bar_vals,
cumsum=cumsum, **kwargs)
elif ptype == 'scatter':
plot_scatter(x, y, cumsum=cumsum, **kwargs)
else:
plot_line(x, y, cumsum=cumsum, **kwargs)
def plot_heatmap(df, figsize=None, ax=None, mask=None, mask2=None,
size=12, cmap='RdBu_r', orientation='vertical',
edgecolor=COLORS['black'],
xrotation=0, yrotation=0,
data_lim=None, vmin=None, vmax=None,
inherit=True, label='', xlabel='', ylabel='',
title='', title_pad=1.025, suptitle=False, length_scale=True,
xticklabels=None, yticklabels=None,
rows=1, cols=1, pos=1, fmt=None, pad=0.3,
cbar=True, cbar_label='', shrink=0.2,
interval=None, tick_locs=None,
xinvert=False, yinvert=True,
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw a heatmap on a subplot. Use other functions for full customizability.
:param df: (pd.DataFrame) - dataframe to be converted into heatmap
:param mask: (pd.DataFrame) - dataframe containing booleans to show text
:param mask2: (pd.DataFrame) - dataframe containing booleans to show text
:param size: (scalar) - size of text over masks
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param cmap: (str) - color map
:param orientation: (str) - orientation of color bar
:param data_lim: (tup) - shortcut for vmin and vmax
:param vmin: (scalar) - lower limit of color bar
:param vmax: (scalar) - upper limit of color bar
:param xrotation: (scalar) - degrees to rotate x major tick labels
:param yrotation: (scalar) - degrees to rotate y major tick labels
:param inherit: (boolean) - whether to inherit previous labels
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param length_scale: (scalar) - whether to scale the labels based on length
:param xticklabels: (list) - manually set x major tick labels
:param yticklabels: (list) - manually set y major tick labels
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param fmt: (str) - format of color bar labels
:param pad: (scalar) - padding of color bar
:param cbar: (boolean) - whether to show color bar
:param cbar_label: (str) - label of color bar
:param shrink: (scalar) - size of color bar
:param interval: (scalar) - interval of tick marks on color bar
:param tick_locs: (array) - input own tick marks on color bar
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted heatmap
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional line plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
if ax is None:
ax = plt.subplot(rows, cols, pos)
base, base2 = _get_bases_logic(df)
vmin, vmax = _get_vmin_vmax_logic(data=df,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
oom = get_order_mag(vmax - vmin)
interval = _get_interval_logic(interval=interval,
vmin=vmin, vmax=vmax,
base=base, oom=oom)
fmt = _get_fmt_logic(fmt=fmt, interval=interval)
vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin, vmax=vmax, data=df,
interval=interval)
(tick_locs,
cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,
vmin=vmin, vmax=vmax,
interval=interval)
cmap = get_cmap(cmap, cbar_count)
im = ax.pcolor(df,
cmap=cmap,
vmin=vmin,
vmax=vmax,
edgecolors=edgecolor,
**kwargs)
ax.set_yticks(np.arange(df.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(df.shape[1]) + 0.5, minor=False)
ax.patch.set(hatch='+',
edgecolor=COLORS['gray'],
color=COLORS['gray'],
alpha=0.45, lw=0.25)
if xinvert:
ax.invert_yaxis()
if yinvert:
ax.invert_yaxis()
if xticklabels is None:
xticklabels = df.columns
if yticklabels is None:
yticklabels = df.index
set_major_tick_labels(ax,
xticklabels=xticklabels,
yticklabels=yticklabels,
xrotation=xrotation,
yrotation=yrotation)
set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,
title=title, title_pad=title_pad, length_scale=length_scale)
ax.grid(False)
if cbar:
set_cbar(ax, im, label=cbar_label, fmt=fmt,
pad=pad, shrink=shrink,
tick_size=8, label_size=10,
orientation=orientation,
tick_locs=tick_locs)
df_nan = np.ma.masked_invalid(df)
if mask is not None:
_set_heatmap_mask(ax, df_nan, mask, size)
if mask2 is not None:
_set_heatmap_mask(ax, df_nan, mask2, size)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, im
else:
return ax
def plot_cbar(cmap,
fig=None,
left=0.05,
bottom=0.95,
width=0.95,
height=0.05,
label='',
fmt='%1.0f',
label_size=12,
drawedges=True,
label_color=COLORS['gray'],
ticks=None,
boundaries=None,
tick_size=8,
tick_color=COLORS['gray'],
color=COLORS['black'],
pad=0.075,
aspect=25.5,
shrink=0.2,
length=0,
tick_width=0.25,
direction='out',
orientation='horizontal',
cax=None,
**kwargs):
"""
Plot lone color bar.
:param cmap: (list/str) - a list containing RGB or Python/NCL cmap name
:param fig: (boolean) - input figure
:param left: (scalar) - left padding from figure edge
:param bottom: (scalar) - bottom padding from figure left edge
:param width: (scalar) - percent width of figure
:param height: (scalar) - percent height of figure
:param fmt: (str) - format of color bar labels
:param label_size: (scalar) - size of color bar label
:param label_color: (scalar) - color of color bar label
:param ticks: (array) - input own tick marks on color bar
:param tick_size: (scalar) - size of color bar tick labels
:param tick_color: (scalar) - color of color bar tick labels
:param color: (scalar) - color of color bar tick marks
:param drawedges: (scalar) - whether to draw color edges
:param pad: (scalar) - padding of color bar from plot
:param aspect: (int) - aspect ratio of color bar
:param shrink: (scalar) - size of color bar
:param length: (scalar) - length of color bar tick marks
:param tick_width: (scalar) - width of color bar tick marks
:param direction: (str) - direction of color bar tick marks
:param orientation: (str) - orientation of color bar
:param cax: (mpl.axes) - plot axis to attach to
:param kwargs: (kwargs) - additional keyword arguments
:return cbar: (mpl.ColorBar) - matplotlib color bar
"""
if fig is None:
fig = set_figsize(8, 4)
if boundaries is None and ticks is not None:
boundaries = ticks
ax = fig.add_axes([left, bottom, width, height])
cmap = get_cmap(cmap)
cbar = mpl.colorbar.ColorbarBase(ax, ticks=ticks,
boundaries=boundaries,
cmap=cmap,
orientation=orientation)
cbar.ax.tick_params(labelsize=tick_size,
direction=direction,
length=length,
width=tick_width,
tick2On=True,
labelcolor=label_color,
color=color)
cbar.set_label(label,
size=label_size,
color=label_color)
return cbar
def init_map(lat1=-90, lat2=90, lon1=-180, lon2=180,
latlim=None, lonlim=None, region=None,
rows=1, cols=1, pos=1, figsize=None, ax=None,
title='', suptitle=False,
length_scale=True, lat_labels='auto', lon_labels='auto',
projection=DEFAULT['projection'], central_longitude=0,
land=False, ocean=False, lakes=True,
coastlines=True, states=True, countries=True, rivers=False,
tight_layout='auto', dpi=DEFAULT['dpi'], save='', close=True):
"""
Initialize a projected map.
:param lat1: (scalar) lower limit of latitude
:param lat2: (scalar) upper limit of latitude
:param lon1: (scalar) left limit of longitude
:param lon2: (scalar) right limit of longitude
:param latlim: (tuple) shortcut for lat1 and lat2
:param lonlim: (tuple) shortcut for lon1 and lon2
:param region: (str) region to quickly subset lat and lon extent (na or us)
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param title: (str) - title of subplot
:param length_scale: (scalar) - whether to scale the labels based on length
:param lat_labels: (array) - list of latitudes to show on map
:param lon_labels: (array) - list of longitudes to show on map
:param projection: (cartopy.crs) - projection of map
:param central_longitude: (scalar) - longitude to center the map on
:param land: (boolean) - whether to color fill land
:param ocean: (boolean) - whether to color fill land
:param lakes: (boolean) - whether to color fill lakes
:param coastlines: (boolean) - whether to draw coastline
:param states: (boolean) - whether to draw state borders
:param countries: (boolean) - whether to draw country borders
:param rivers: (boolean) - whether to draw rivers
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:return ax: (mpl.axes) - plot axis
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
projection = _get_projection_logic(projection)
if ax is None:
ax = plt.subplot(rows, cols, pos, projection=projection)
lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,
lat1, lat2, lon1, lon2,
region=region,
central_longitude=
central_longitude)
ax.set_extent([lon1, lon2, lat1, lat2], projection)
_add_features(ax, land, ocean, coastlines,
states, countries, lakes, rivers)
set_latlons(ax,
lat_labels=lat_labels, lon_labels=lon_labels,
central_longitude=central_longitude)
set_labels(ax, title=title, length_scale=length_scale)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
return ax
def get_side_bars_recs(x, sidebar_count, colors=True):
"""
Output some recommended values to show side by side bars.
:param x: (arr) - input x array
:param sidebar_count: (int) - how many bars side by side
:param colors: (boolean) - whether to return colors
:return width: (scalar) - adjusted width of color bars
:return align: (str) - edge or center based on sidebar_count
:return x_list: (list) - adjusted x values
:return colors: (list) - list of colors
"""
if sidebar_count == 0:
raise IOError('Unable to have 0 side bars per x!')
if sidebar_count == 1:
if colors:
return 0.833333333, 'center', [x], [COLOR_LIST[0]]
else:
return 0.833333333, 'center', [x]
if sidebar_count % 2 == 0:
align = 'edge'
else:
align = 'center'
width = _get_width_logic(x) / sidebar_count
x_shift_end = sidebar_count // 2
x_shift_start = -(sidebar_count - x_shift_end)
x_shifts = np.arange(x_shift_start, x_shift_end)
if align is 'center':
extra_x_shift = len(x_shifts) // 2 + 1
x_shifts += extra_x_shift
x_list = []
for x_shift in x_shifts:
try:
x_list.append(mdates.date2num(x) + width * x_shift)
except:
x_list.append(x + width * x_shift)
if colors:
colors = COLOR_LIST[0:sidebar_count]
return width, align, x_list, colors
else:
return width, align, x_list
def set_bar_vals(ax, size=7.5,
color=COLORS['black'],
alpha=ALPHAS['translucent'],
orientation='auto',
inherit_color=False,
pad_remover=1,
fmt='%d',
yinvert=False):
"""
Label the rectangles in bar plots with its respective values.
Adaptation of: "http://composition.al/blog/2015/11/29/a-better-way-to-\
add-labels-to-bar-charts-with-matplotlib/"
:param ax: (mpl.axes) - plot axis
:param size: (scalar) - size of bar labels
:param color: (str) - color of bar labels
:param alpha: (scalar/str) - transparency of bar labels
:param orientation: (str) - orientation of the labels
:param inherit_color: (boolean) - whether to inherit color for labels
:param pad_remover: (scalar): - space to remove between ylim and labels
:param fmt: (str) - format of color bar labels
:param yinvert (boolean) - whether to invert the y values of labels
:return ax: (mpl.axes) - plot axis
"""
try:
pad_remover = scale_it(ax, pad_remover, 0.1, exp=True)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
y_height = ymax - ymin
rects = ax.patches
size = scale_it(ax, size, 1, exp=True) / np.log(len(rects))
if len(rects) > 5:
size *= 3
if orientation is 'auto':
if len(str(int(ymax))) > 2:
orientation = 'vertical'
else:
orientation = 'horizontal'
if orientation is 'vertical':
rotation = 90
height_mult = 0.02
limit_mult = 2
else:
rotation = 0
height_mult = 0.015
limit_mult = 1
pos_ct = 1 # to dampen future
neg_ct = 1 # ylim increases
orient_add = 0
for rect in rects:
x = plt.getp(rect, 'x')
y = rect.get_height()
if plt.getp(ax, 'yscale') is 'log':
label_position = y
if orientation is 'vertical':
label_position += y / 50
else:
label_position = y + (y_height * height_mult)
if y < 0:
va = 'top'
if orientation is 'horizontal':
orient_add = label_position / 60
label_position += (y_height * -2 * height_mult)
else:
va = 'bottom'
if label_position >= (ymax - ymax / 5):
ymax += (ymax * pad_remover / 6.5 /
pos_ct * limit_mult + orient_add)
pos_ct += 15
if label_position <= (ymin - ymin / 5):
ymin += (ymin * pad_remover / 8 /
neg_ct * limit_mult + orient_add)
neg_ct += 15
if inherit_color:
color = plt.getp(rect, 'facecolor')
ax.set_ylim(ymin, ymax)
if yinvert:
label_position *= -1
if (ymin <= y < ymax) and (xmin < x < xmax):
ax.text(rect.get_x() + rect.get_width() / 2., label_position,
fmt % y, size=size, alpha=alpha, color=color,
ha='center', va=va, rotation=rotation)
except:
print('Unable to set bar vals!')
return ax
def set_inline_label(ax, line, label=None,
xval=None, size=6, alpha=ALPHAS['translucent'],
color=None, ha='center', va='center',
bbox=dict(facecolor=COLORS['white'],
edgecolor=COLORS['white'],
alpha=ALPHAS['transparent']),
**kwargs):
"""
Automatically adds an inline label to line
https://github.com/cphyc/matplotlib-label-lines
:param ax: (mpl.axes) - plot axis
:param line: (mpl.Line2D) - line to be labeled
:param label: (str) - label of line
:param xval: (scalar) - x value of label; defaults to median
:param size: (scalar) - size of label
:param alpha: (scalar) - opacity of label
:param ha: (str) - horizontal alignment of label
:param va: (str) - vertical alignment of label
:param bbox: (dict) - dictionary of box surrounding label
:param kwargs: (kwargs) - additional keyword arguments
"""
if isinstance(line, list):
line = line[0]
xdata = line.get_xdata()
ydata = line.get_ydata()
try:
if xval is None:
xval = np.median(xdata)
except:
xval = xdata[int(len(xdata) / 2)]
if isinstance(xval, datetime.datetime):
xdata = pd.to_datetime(xdata).to_pydatetime()
elif isinstance(xval, str):
xval = pd.to_datetime(xval).to_pydatetime()
xdata = pd.to_datetime(xdata).to_pydatetime()
x_idx = np.where(xdata == xval)[0]
if not x_idx:
print('xval outside range of x in set_label_inline!')
return
yval = ydata[x_idx]
if not label:
label = line.get_label()
size = scale_it(ax, size, 2, exp=True)
try:
if xval is None:
xval = np.median(xdata)
except:
xval = xdata[int(len(xdata) / 2)]
if color is None:
color = plt.getp(line, 'color')
ax.text(xval, yval, label,
color=color,
alpha=alpha,
size=size,
ha=ha,
va=va,
bbox=bbox,
**kwargs
)
def annotate_point(ax, x, y, label='', xytext=(0, 0),
size=SIZES['marker']['smaller'],
textcoords='offset points', transform=False,
projection=DEFAULT['projection'],
bbox=dict(boxstyle='round, pad=0.3',
facecolor=COLORS['black'],
alpha=ALPHAS['transparent']),
**kwargs
):
"""
Annotate a point on a subplot.
:param ax: (mpl.axes) - plot axis
:param x: (scalar) - input x location to annotate
:param y: (scalar) - input y location to annotate
:param label: (str) - label of line to be used in legend
:param xytext: (tup) - x, y offset from input x and y for annotation
:param size: (scalar) - size of annotation
:param textcoords: (str) - type of coordinates
:param transform: (boolean) - whether to use input projection
:param projection: (cartopy.crs) - projection of plotted scatter
:param bbox: (dict) - dictionary of boxstyle, facecolor, and alpha of box
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
"""
if transform:
x, y = ax.projection.transform_point(x, y, src_crs=projection)
ax.annotate(label, xy=(x, y), xytext=xytext, ha='left', va='center',
textcoords=textcoords, size=size, bbox=bbox, **kwargs)
return ax
def set_figsize(width=None, height=None, figsize='wide',
rows=1, cols=1, pos=1, dpi=DEFAULT['dpi'], **kwargs):
"""
Set figure size; can be wide, tall, auto, or input tuple.
:param width: (scalar) - width of figure
:param height: (scalar) - height of figure
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param dpi: (int) - dots per inch to save the figure
:param kwargs: (kwargs) - additional keyword arguments
"""
if width is not None and height is not None:
figsize = (width, height)
else:
if figsize is 'wide' and pos == 1:
fig_width = 10 + rows * 1.75
fig_height = 3.5 + cols * 1.25
figsize = (fig_width, fig_height)
elif figsize is 'tall' and pos == 1:
fig_width = 3.5 + rows * 1.25
fig_height = 12 + cols * 1.75
figsize = (fig_width, fig_height)
elif figsize is 'auto' and pos == 1:
fig_width = 8 + rows * 1.5
fig_height = 4.5 + cols * 1.5
figsize = (fig_width, fig_height)
if isinstance(figsize, tuple):
fig = plt.figure(figsize=figsize, dpi=dpi, **kwargs)
return fig
def set_ax(rows=1, cols=1, pos=1, **kwargs):
"""
Create plot axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
"""
return plt.subplot(rows, cols, pos, **kwargs)
def set_date_ticks(ax, minor_date_ticks=True):
"""
Use logic on the length of date range to decide the tick marks.
:param ax: (mpl.axes) - plot axis
:param minor_date_ticks: (boolean) - whether to show the top date ticks
:return major_xlocator: (str) - locator of major tick
:return major_xinterval: (str) - interval between each major tick
:return major_xformatter: (str) - formatter of major tick
:return minor_xlocator: (str) - locator of minor tick
:return minor_xinterval: (str) - interval between each minor tick
:return minor_xformatter: (str) - formatter of minor tick
:return dt_bool: (boolean) - whether the x axis is datetimes
"""
geom = plt.getp(ax, 'geometry')
nrows = geom[0]
ncols = geom[1]
xlim = plt.getp(ax, 'xlim')
if xlim[0] < 700000:
dt_bool = False
return [None] * 6 + [dt_bool]
else:
dt_bool = True
xlim_dts = mdates.num2date(xlim)
dt_dict = td2dict(xlim_dts[-1] - xlim_dts[0])
ndays = dt_dict['days']
if ndays < 0:
dt_dict = td2dict(xlim_dts[0] - xlim_dts[-1])
ndays = dt_dict['days']
if ndays > 10950:
major_xlocator = 'years'
major_xformatter = '%Y'
major_xinterval = int(ndays / 2000)
major_xlocator2 = None
major_xformatter2 = None
major_xinterval2 = None
minor_xlocator = 'years'
minor_xformatter = '\'%y'
minor_xinterval = int(ndays / 8000)
minor_xshow = int(ndays / 8000)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xinterval / 1.75)
if minor_xshow <= minor_xinterval:
minor_xshow += 1
elif 3000 < ndays <= 10950:
major_xlocator = 'years'
major_xformatter = '%Y'
major_xinterval = 1 + int(ndays / 3000)
major_xlocator2 = None
major_xformatter2 = None
major_xinterval2 = None
minor_xlocator = 'years'
minor_xformatter = '\'%y'
minor_xinterval = 1 + int(ndays / 3300)
minor_xshow = 1 + int(ndays / 3300)
if major_xinterval >= minor_xinterval:
minor_xinterval -= 1
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xshow / 1.3)
if minor_xshow == 0:
minor_xshow = 1
elif 1825 < ndays <= 3000:
major_xlocator = 'months'
major_xformatter = '%B'
major_xinterval = 10 + int(ndays / 1850)
major_xlocator2 = 'months'
major_xformatter2 = '%Y'
major_xinterval2 = 8
minor_xlocator = 'months'
minor_xformatter = '%b'
minor_xinterval = 1 + int(ndays / 600)
minor_xshow = 1 + int(ndays / 725)
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xshow / 1.25)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
for i in range(0, 10):
if (major_xinterval2 % major_xinterval != 0
or major_xinterval2 == 0):
major_xinterval2 += 1
else:
break
elif 217 < ndays <= 1825:
major_xlocator = 'months'
major_xformatter = '%b %d'
major_xinterval = 3 + int(ndays / 1000) * 2
major_xlocator2 = 'months'
major_xformatter2 = '%Y'
major_xinterval2 = 4 + int(ndays / 800)
minor_xlocator = 'months'
minor_xformatter = '%b'
minor_xinterval = 1 + int(ndays / 600)
minor_xshow = 1 + int(ndays / 725)
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xshow / 1.5)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
for i in range(0, 10):
if (major_xinterval2 % major_xinterval != 0
or major_xinterval2 == 0):
major_xinterval2 += 1
else:
break
elif 6 < ndays <= 217:
major_xlocator = 'days'
major_xformatter = '%b %d'
major_xinterval = 2 + int(ndays / 15) * 2
major_xlocator2 = None
major_xformatter2 = None
major_xinterval2 = None
minor_xlocator = 'days'
minor_xformatter = '%d'
minor_xinterval = 1 + int(ndays / 50)
minor_xshow = 1 + int(ndays / 35)
if minor_xshow >= minor_xinterval:
minor_xshow -= int(minor_xshow / 2.25)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
elif 1 < ndays <= 6:
major_xlocator = 'hours'
major_xformatter = '%H:%M'
major_xinterval = ndays * 5
major_xlocator2 = 'hours'
major_xformatter2 = '%m/%d'
major_xinterval2 = 24
minor_xlocator = 'hours'
minor_xformatter = '%H'
minor_xinterval = int(ndays / 1.5)
minor_xshow = 1 + int(minor_xinterval / 2)
if minor_xshow >= minor_xinterval:
minor_xshow -= int(minor_xshow / 2.25)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
for i in range(0, 25):
if (major_xinterval2 % major_xinterval != 0
or major_xinterval2 == 0):
major_xinterval2 -= 1
else:
break
if minor_xshow <= minor_xinterval:
minor_xshow += 1
elif 0 <= ndays <= 1:
nminutes = (dt_dict['days'] * 1440
+ dt_dict['hours'] * 60
+ dt_dict['minutes']
)
major_xlocator = 'minutes'
major_xformatter = '%I:%M %p'
major_xinterval = int(nminutes / 3)
major_xlocator2 = 'minutes'
major_xformatter2 = '%b %d'
major_xinterval2 = int(nminutes / 1.5)
minor_xlocator = 'minutes'
minor_xformatter = '%H:%M'
minor_xinterval = int(nminutes / 12)
minor_xshow = 1
if minor_xshow >= 3 and major_xlocator != 'years':
minor_xshow = int(minor_xshow / 1.5)
elif minor_xshow >= 3 and major_xlocator == 'years':
minor_xshow -= int(minor_xshow / 1.5)
if nminutes > 360:
major_xinterval = round_to(major_xinterval, base=15)
minor_xinterval = round_to(minor_xinterval, base=15)
major_xinterval2 = round_to(major_xinterval2, base=15)
if major_xinterval % minor_xinterval != 0:
minor_xinterval = int(major_xinterval / 3)
for i in range(0, 60):
if major_xinterval % minor_xinterval != 0:
minor_xinterval += 1
else:
break
if major_xinterval2 % major_xinterval != 0:
major_xinterval2 = major_xinterval
if minor_xshow <= 0:
minor_xshow = 1
if major_xinterval2 is not None:
if major_xinterval2 <= 0:
major_xinterval2 = major_xinterval
set_major_ticks(ax,
xlocator=major_xlocator,
xformatter=major_xformatter,
xinterval=major_xinterval)
set_major_tick_labels(ax, size=8)
ax2 = ax.twiny()
ax2.set_xlim(ax.get_xlim())
prettify_ax(ax2, ticks=False)
if major_xlocator2 is not None and nrows == 1:
set_major_ticks(ax2,
xlocator=major_xlocator2,
xformatter=major_xformatter2,
xinterval=major_xinterval2)
set_major_tick_labels(ax2, bottom=True, top=False,
pad=24, size=6)
else:
set_major_tick_labels(ax2, xticklabels=[])
set_major_ticks(ax2, xticks=[])
if minor_date_ticks:
set_minor_ticks(ax2,
xlocator=minor_xlocator,
xformatter=minor_xformatter,
xinterval=minor_xinterval,
top=True, bottom=False)
set_minor_tick_labels(ax2, top=True, size=7.5)
set_minor_grid(ax2, xalpha=0.25)
for label in ax2.get_xminorticklabels():
label.set_visible(False) # find a better way?
for label in ax2.get_xminorticklabels()[0::minor_xshow * ncols]:
label.set_visible(True)
return (major_xlocator, major_xinterval, major_xformatter,
minor_xlocator, minor_xinterval, minor_xformatter, dt_bool)
def set_cbar(ax, im,
fig=False,
label='',
fmt='%1.0f',
label_size=7.5,
drawedges=True,
label_color=COLORS['gray'],
tick_locs=None,
tick_size=5,
tick_color=COLORS['gray'],
color=COLORS['black'],
pad=0.1,
aspect=25.5,
shrink=0.2,
length=0,
width=0.25,
direction='out',
orientation='horizontal',
cax=None,
**kwargs):
"""
Set color bar for a map.
:param ax: (mpl.axes) - plot axis
:param im: (mpl.collections/contour) - plotted map
:param fig: (boolean) - whether to plot a figure wide colorbar
:param fmt: (str) - format of color bar labels
:param label_size: (scalar) - size of color bar label
:param label_color: (scalar) - color of color bar label
:param tick_locs: (array) - input own tick marks on color bar
:param tick_size: (scalar) - size of color bar tick labels
:param tick_color: (scalar) - color of color bar tick labels
:param color: (scalar) - color of color bar tick marks
:param drawedges: (scalar) - whether to draw color edges
:param pad: (scalar) - padding of color bar from plot
:param aspect: (int) - aspect ratio of color bar
:param shrink: (scalar) - size of color bar
:param length: (scalar) - length of color bar tick marks
:param width: (scalar) - width of color bar tick marks
:param direction: (str) - direction of color bar tick marks
:param orientation: (str) - orientation of color bar
:param cax: (mpl.axes) - plot axis to attach to
:param kwargs: (kwargs) - additional keyword arguments
:return cbar: (mpl.ColorBar) - matplotlib color bar
"""
try:
pad = scale_it(ax, pad, 0.00075, exp=True)
label_size = scale_it(ax, label_size, 1.25, exp=True)
tick_size = scale_it(ax, tick_size, 1.25, exp=True)
width = scale_it(ax, width, 0.05, exp=True)
shrink = scale_it(ax, shrink, 0.075)
aspect = scale_it(ax, aspect, 1.25)
geom = plt.getp(plt.getp(ax, 'subplotspec'), 'geometry')
nrows = geom[0]
ncols = geom[1]
shrink *= (nrows + 0.5) / 1.5
tick_size += (nrows + ncols)
if orientation == 'vertical':
shrink *= 2
pad /= 3
if fmt == '%.2f':
rotation = 45
else:
rotation = 0
try:
if not fig:
cbar = plt.colorbar(im, orientation=orientation,
pad=pad,
drawedges=drawedges,
shrink=shrink,
format=fmt,
ticks=tick_locs,
aspect=aspect,
cax=cax,
**kwargs)
else:
figure = plt.getp(ax, 'figure')
cbar = figure.colorbar(im, ax=plt.getp(figure, 'axes'),
orientation=orientation,
pad=pad,
drawedges=drawedges,
shrink=shrink * 1.75,
format=fmt,
ticks=tick_locs,
aspect=aspect,
cax=cax,
**kwargs)
except:
cbar = plt.colorbar(im,
orientation=orientation,
drawedges=drawedges,
format=fmt,
ticks=tick_locs,
cax=cax,
**kwargs)
cbar.ax.tick_params(labelsize=tick_size,
rotation=rotation,
direction=direction,
length=length,
width=width,
tick2On=True,
labelcolor=label_color,
color=color)
cbar.set_label(label, size=label_size, color=label_color)
return cbar
except:
report_err(comment='Could not set color bar; please set manually!')
def get_cmap(colors, n=None, r=False, start=0, stop=1, **kwargs):
"""
Converts a list of colors into a color map or discretizes a registered cmap
http://matplotlib.org/examples/color/colormaps_reference.html
http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml
:param colors: (list/str) - a list containing RGB or Python/NCL cmap name
:param n: (int) - number of colors in cmap
:param r: (boolean) - reverse colormap
:param start: (scalar) - value to start on the cmap between 0 and 1
:param stop: (scalar) - value to end on the cmap between 0 and 1
:param kwargs: (kwargs) - additional keyword arguments
:return cmap: (mpl.cmap) - color map
"""
try:
if '_r' in colors:
colors = colors[:-2]
r = True
except:
pass
if colors in NCL_CMAP_NAMES:
if r:
color_list = get_color_list(NCL_CMAPS[colors].values[0])[::-1]
cmap = LinearSegmentedColormap.from_list('cmap',
colors=color_list)
else:
cmap = NCL_CMAPS[colors].values[0]
if n is None:
n = NCL_CMAPS[colors].values[1]
else:
if isinstance(colors, str):
if r:
colors += '_r'
if n is None:
n = 10
cmap = plt.get_cmap(colors, **kwargs)
elif isinstance(colors, mpl.colors.LinearSegmentedColormap):
return colors
else:
if r:
colors = colors[::-1]
if n is None and len(colors) > 2:
n = len(colors)
elif n is None:
n = 10
if not isinstance(colors[0], str):
if ( | np.array(colors) | numpy.array |
import pandas as pd
import numpy as np
import itertools
import os
from scipy import interpolate
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.mplot3d import Axes3D
''' Get directory '''
dir_config = '/home/hector/ros/ual_ws/src/upat_follower/config/'
dir_data = '/home/hector/ros/ual_ws/src/upat_follower/data/'
experiment_name = 'jint2020/2UAV-4D-1Conflict'
case_name = 'uav_1-2020-09-23_11-37-23'
dir_experiment = dir_data + 'log/' + experiment_name + '/' + case_name + '/'
dir_save_data = dir_data + 'img/' + experiment_name + '/' + case_name + '/'
''' Create folder to save data '''
if not os.path.exists(dir_save_data):
os.makedirs(dir_save_data)
''' Get csv files '''
try:
default_init_path = pd.read_csv(
dir_experiment + 'init_waypoints.csv', names=['x', 'y', 'z', 'Times'])
except FileNotFoundError:
print('init.csv not found!')
try:
generated_trajectory_m0 = pd.read_csv(
dir_experiment + 'generated_trajectory_m0.csv', names=['x', 'y', 'z'])
except FileNotFoundError:
print('generated_trajectory_m0.csv not found!')
try:
generated_trajectory_m1 = pd.read_csv(
dir_experiment + 'generated_trajectory_m1.csv', names=['x', 'y', 'z'])
except FileNotFoundError:
print('generated_trajectory_m1.csv not found!')
try:
generated_trajectory_m2 = pd.read_csv(
dir_experiment + 'generated_trajectory_m2.csv', names=['x', 'y', 'z'])
except FileNotFoundError:
print('generated_trajectory_m2.csv not found!')
try:
normal_dist_trajectory_m0 = pd.read_csv(
dir_experiment + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('normal_dist_trajectory_m0.csv not found!')
try:
normal_dist_trajectory_m1 = pd.read_csv(
dir_experiment + 'normal_dist_trajectory_m1.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('normal_dist_trajectory_m1.csv not found!')
try:
normal_dist_trajectory_m2 = pd.read_csv(
dir_experiment + 'normal_dist_trajectory_m2.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('normal_dist_trajectory_m2.csv not found!')
try:
current_trajectory_m0 = pd.read_csv(
dir_experiment + 'current_trajectory_m0.csv', names=['x', 'y', 'z'])
except FileNotFoundError:
print('current_trajectory_m0.csv not found!')
try:
current_trajectory_m1 = pd.read_csv(
dir_experiment + 'current_trajectory_m1.csv', names=['x', 'y', 'z'])
except FileNotFoundError:
print('current_trajectory_m1.csv not found!')
try:
current_trajectory_m2 = pd.read_csv(
dir_experiment + 'current_trajectory_m2.csv', names=['x', 'y', 'z'])
except FileNotFoundError:
print('current_trajectory_m2.csv not found!')
try:
reach_times_trajectory_m0 = pd.read_csv(
dir_experiment + 'reach_times_trajectory_m0.csv', names=['curTime'])
except FileNotFoundError:
print('reach_times_trajectory_m0.csv not found!')
try:
reach_times_trajectory_m1 = pd.read_csv(
dir_experiment + 'reach_times_trajectory_m2.csv', names=['curTime'])
except FileNotFoundError:
print('reach_times_trajectory_m2.csv not found!')
try:
reach_times_trajectory_m2 = pd.read_csv(
dir_experiment + 'reach_times_trajectory_m1.csv', names=['curTime'])
except FileNotFoundError:
print('reach_times_trajectory_m1.csv not found!')
def getTimesWPsReached(_init_path, _normal_dist_trajectory):
min_dist = 1000000
times_wps_reached = []
idx = jdx = 0
for i in _init_path.values:
p1 = np.asarray([_init_path.values[idx, 0],
_init_path.values[idx, 1], _init_path.values[idx, 2]])
min_dist = 1000000
jdx = 0
for j in _normal_dist_trajectory.values:
p2 = np.asarray([_normal_dist_trajectory.values[jdx, 4],
_normal_dist_trajectory.values[jdx, 5], _normal_dist_trajectory.values[jdx, 6]])
temp_dist = np.linalg.norm(p2 - p1)
if temp_dist < min_dist:
min_dist = temp_dist
t_wp_reached = _normal_dist_trajectory.values[jdx, 0]
if jdx < _normal_dist_trajectory.shape[0]-1:
jdx += 1
times_wps_reached.append(t_wp_reached)
idx += 1
return times_wps_reached
def getModVelocity(_normal_dist_trajectory):
mod_cur_vel = []
mod_des_vel = []
idx = 0
for i in _normal_dist_trajectory.values:
mod_cur_vel.append(np.sqrt(_normal_dist_trajectory.values[idx, 7] * _normal_dist_trajectory.values[idx, 7] +
_normal_dist_trajectory.values[idx, 8] * _normal_dist_trajectory.values[idx, 8] +
_normal_dist_trajectory.values[idx, 9] * _normal_dist_trajectory.values[idx, 9]))
mod_des_vel.append(np.sqrt(_normal_dist_trajectory.values[idx, 10] * _normal_dist_trajectory.values[idx, 10] +
_normal_dist_trajectory.values[idx, 11] * _normal_dist_trajectory.values[idx, 11] +
_normal_dist_trajectory.values[idx, 12] * _normal_dist_trajectory.values[idx, 12]))
idx += 1
return mod_cur_vel, mod_des_vel
def getDesiredTimesForNonTrajectory(_default_times, _normal_dist_trajectory):
generated_times = []
array_len_ndist = []
array_len_times = []
def_times = []
idx = 0
for i in _normal_dist_trajectory.values:
array_len_ndist.append(idx)
idx += 1
idx = 0
for i in _default_times.values:
array_len_times.append(idx)
idx += 1
idx = 0
for i in _default_times.values:
def_times.append(_default_times.values[idx, 0])
idx += 1
# generated_times = np.interp(array_len_ndist, array_len_times, def_times)
def_times = [0.0, 30.0, 14.0, 42.0]
test_array = np.linspace(0.0, 42.0, 1118)
array_len_times = np.linspace(0.0, 42, 4)
# test_array = np.arange(0.0, 42.0, 42.0/1118.0)
print(len(test_array))
# print (array_len_ndist, len(array_len_ndist))
print(array_len_times, len(array_len_times))
print(def_times, len(def_times))
# x2 = def_times
# y2 = array_len_times
# xinterp = np.arange(len(_normal_dist_trajectory))
# yinterp1 = np.interp(xinterp, x2, y2)
generated_times = np.interp(test_array, array_len_times, def_times)
plt.plot(generated_times)
plt.show()
print(generated_times)
# generated_times = interpolate.interp1d(array_len_times, def_times, array_len_ndist)º
# generated_times = yinterp1
return generated_times
def plot3DFigure(_compare_path, _current_trajectory, _num):
figN = plt.figure(num='Mode ' + str(_num) +
' 3D behaviour', figsize=(6, 4))
axN = Axes3D(figN)
axN.plot(default_init_path.x, default_init_path.y, default_init_path.z, 'ko',
# color="0.5"
)
# ax1.plot(default_init_path.x, default_init_path.y, default_init_path.z, 'y')
axN.plot(_compare_path.x, _compare_path.y, _compare_path.z, 'r--',
# color="0"
)
axN.plot(_current_trajectory.x, _current_trajectory.y,
_current_trajectory.z, 'b', alpha=0.9
# color="0.4"
)
axN.legend(['Waypoints', 'Generated trajectory', 'Actual trajectory'])
axN.set_xlim(35, 45)
axN.set_ylim(-60, 40)
axN.set_zlim(3, 7)
axN.set_xlabel('X axis')
axN.set_ylabel('Y axis')
axN.set_zlabel('Z axis')
figN.savefig(dir_save_data + '3D_traj_m' +
str(_num) + '.eps', format='eps', dpi=1200, bbox_inches="tight")
return figN
def plot2DFigures(_normal_dist_trajectory, _times_wps_reached, _default_times, _num):
plt.figure(num='Mode ' + str(_num) + ' normal distance', figsize=(6, 3))
plt.plot(_normal_dist_trajectory.curTime,
_normal_dist_trajectory.Spline, 'b', label="Normal distance to trajectory")
plt.xlabel('Time (s)')
plt.ylabel('Distance (m)')
# plt.ylim(top=2)
# plt.axes().xaxis.set_major_locator(ticker.MultipleLocator(5))
# idx = 0
# for xc in _default_times:
# plt.axvline(x=xc, color='r', linestyle='--', alpha=0.7,
# label='WP ' + str(idx+1) + ' desired time ' +
# str(_default_times[idx])
# )
# idx += 1
idx = 0
for xc in _times_wps_reached:
plt.axvline(x=xc, color='grey', linestyle='--', alpha=0.7,
label='WP ' + str(idx+1) + ' reached at ' +
str(_times_wps_reached[idx])
)
idx += 1
# str_times_wps_reached = "WPs reached at " + str(_times_wps_reached[0]) + "s, " + str(_times_wps_reached[1]) + "s, " + str(_times_wps_reached[2]) + "s, " + str(_times_wps_reached[3]) + "s"
# str_default_times = "WPs desired time " + str(_default_times[0, 0]) + "s, " + str(_default_times[1, 0]) + "s, " + str(_default_times[2, 0]) + "s, " + str(_default_times[3, 0]) + "s"
# plt.legend(['Normal distance to trajectory', str_default_times,
# str_times_wps_reached], fontsize='small')
# plt.gca().get_legend().legendHandles[0].set_color('blue')
# plt.gca().get_legend().legendHandles[1].set_color('green')
# plt.gca().get_legend().legendHandles[2].set_color('red')
plt.legend(['Normal distance', 'WP reached'])
# plt.legend(fontsize='x-small')
plt.savefig(dir_save_data + 'ndist_traj_m' +
str(_num)+'.eps', format='eps', dpi=1200, bbox_inches='tight')
plt.show(block=False)
''' Plot Velocities trajectory m0 '''
mod_cur_vel = []
mod_des_vel = []
mod_cur_vel, mod_des_vel = getModVelocity(_normal_dist_trajectory)
plt.figure(num='Velocities trajectory mode '+str(_num), figsize=(6, 3))
plt.plot(_normal_dist_trajectory.curTime,
mod_cur_vel, label="Current |v|", color='b')
plt.plot(_normal_dist_trajectory.curTime, mod_des_vel,
label="Desired |v|", color='r', alpha=0.7)
idx = 0
for xc in _times_wps_reached:
plt.axvline(x=xc, color='grey', linestyle='--', alpha=0.7, label='WP ' + str(idx+1) + ' reached: ' +
str(_times_wps_reached[idx]))
idx += 1
plt.xlabel('Time (s)')
plt.ylabel('Velocity (m/s)')
plt.legend(['Current |v|', 'Desired |v|', 'WP reached'])
plt.savefig(dir_save_data + 'vel_traj_m' +
str(_num)+'.eps', format='eps', dpi=1200, bbox_inches='tight')
plt.show(block=False)
plt.figure(num='Delta of Times mode '+str(_num), figsize=(6, 3))
plt.plot(_normal_dist_trajectory.curTime,
_normal_dist_trajectory.desTime - _normal_dist_trajectory.curTime, color='b')
idx = 0
for xc in _times_wps_reached:
plt.axvline(x=xc, color='grey', linestyle='--', alpha=0.7)
idx += 1
plt.xlabel('Current time (s)')
plt.ylabel('Desired time - Current time (s)')
# plt.ylim(bottom=-6)
plt.legend(['Difference of times', 'WP reached'])
plt.savefig(dir_save_data + 'deltaT_traj_m' +
str(_num)+'.eps', format='eps', dpi=1200, bbox_inches='tight')
plt.show(block=False)
if 'default_init_path' in globals():
if 'normal_dist_trajectory_m0' in globals():
times_wps_reached_m0 = getTimesWPsReached(
default_init_path, normal_dist_trajectory_m0)
plot3DFigure(generated_trajectory_m0, current_trajectory_m0, 0)
plot2DFigures(normal_dist_trajectory_m0,
times_wps_reached_m0, default_init_path.Times, 0)
plt.show(block=False)
''' Print results of the normal distance through path '''
print(
'-----------------------------------------------------------------------------')
print('Trajectory m0 -> max: {:.3f}, min: {:.3f}, mean: {:.3f}, std: {:.3f}, var: {:.3f}'.format(np.max(normal_dist_trajectory_m0.Linear), np.min(
normal_dist_trajectory_m0.Linear), np.mean(normal_dist_trajectory_m0.Linear), np.std(normal_dist_trajectory_m0.Linear), np.var(normal_dist_trajectory_m0.Linear)))
if 'normal_dist_trajectory_m1' in globals():
times_wps_reached_m1 = getTimesWPsReached(
default_init_path, normal_dist_trajectory_m1)
plot3DFigure(generated_trajectory_m1, current_trajectory_m1, 1)
plot2DFigures(normal_dist_trajectory_m1,
times_wps_reached_m1, default_init_path.Times, 1)
plt.show(block=True)
print(
'-----------------------------------------------------------------------------')
print('Trajectory m1 -> max: {:.3f}, min: {:.3f}, mean: {:.3f}, std: {:.3f}, var: {:.3f}'.format(np.max(normal_dist_trajectory_m1.Spline), np.min(
normal_dist_trajectory_m1.Spline), np.mean(normal_dist_trajectory_m1.Spline), np.std(normal_dist_trajectory_m1.Spline), np.var(normal_dist_trajectory_m1.Spline)))
if 'normal_dist_trajectory_m2' in globals():
times_wps_reached_m2 = getTimesWPsReached(
default_init_path, normal_dist_trajectory_m2)
plot3DFigure(generated_trajectory_m2, current_trajectory_m2, 2)
plot2DFigures(normal_dist_trajectory_m2,
times_wps_reached_m2, default_init_path.Times, 2)
plt.show(block=True)
print(
'-----------------------------------------------------------------------------')
print('Trajectory m2 -> max: {:.3f}, min: {:.3f}, mean: {:.3f}, std: {:.3f}, var: {:.3f}'.format( | np.max(normal_dist_trajectory_m2.Spline) | numpy.max |
# -*- coding: utf-8 -*-
# Copyright 2019 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM-Review-Requirement: Art30.3
# Please note that the following code was developed for the project VaVeL at IBM Research
# -- Ireland, funded by the European Union under the Horizon 2020 Program.
# The project started on December 1st, 2015 and was completed by December 1st,
# 2018. Thus, in accordance with Article 30.3 of the Multi-Beneficiary General
# Model Grant Agreement of the Program, the above limitations are in force.
# For further details please contact <NAME> (<EMAIL>),
# or <NAME> (<EMAIL>).
# If you use this code, please cite our paper:
# @inproceedings{kozdoba2018,
# title={On-Line Learning of Linear Dynamical Systems: Exponential Forgetting in Kalman Filters},
# author={Kozdoba, <NAME> Marecek, <NAME> and <NAME>},
# booktitle = {The Thirty-Third AAAI Conference on Artificial Intelligence (AAAI-19)},
# note={arXiv preprint arXiv:1809.05870},
# year={2019}
#}
from __future__ import print_function
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import scipy.optimize as opt
import numpy as np
import rlcompleter
from sklearn.metrics import f1_score
import time
import timeit
import math
# debugging
import pdb
pdb.Pdb.complete=rlcompleter.Completer(locals()).complete
import traceback
# Matlab loading
import tables
from scipy.io import loadmat
verbose = False
from onlinelds import *
from inputlds import *
def close_all_figs():
plt.close('all')
def testIdentification(sys, filenameStub = "test", noRuns = 2, T = 100, k = 5, etaZeros = None, ymin = None, ymax = None, sequenceLabel = None, haveSpectral = True):
""" noRuns is the number of runs, T is the time horizon, k is the number of filters, """
if k>T:
print("Number of filters (k) must be less than or equal to the number of time-steps (T).")
exit()
if not etaZeros:
etaZeros = [1.0, 2500.0]
print("etaZeros:")
print(etaZeros)
filename = './outputs/' + filenameStub+'.pdf'
pp = PdfPages(filename)
error_AR_data = None
error_spec_data = None
error_persist_data = None
for i in range(noRuns):
print("run %i" % i)
inputs = np.zeros(T)
sys.solve([[1],[0]],inputs,T)
if haveSpectral:
predicted_spectral, M, error_spec, error_persist = wave_filtering_SISO_ftl(sys, T, k)
if error_spec_data is None: error_spec_data = error_spec
else: error_spec_data = np.vstack((error_spec_data, error_spec))
if error_persist_data is None: error_persist_data = error_persist
else: error_persist_data = np.vstack((error_persist_data, error_persist))
for etaZero in etaZeros:
error_AR = np.zeros(T)
predicted_AR = np.zeros(T)
s=2
D=1.
theta = [0 for i in range(s)]
for t in range(s,T):
eta = pow(float(t),-0.5) / etaZero
Y = sys.outputs[t]
loss = cost_AR(theta, Y, list(reversed(sys.outputs[t-s:t])))
error_AR[t] = pow(loss, 0.5)
grad = gradient_AR(theta, Y, list(reversed(sys.outputs[t-s:t])))
#print("Loss: at time step %d :" % (t), loss)
theta = [theta[i] -eta*grad[i] for i in range(len(theta))] #gradient step
norm_theta = np.linalg.norm(theta)
if norm_theta>D: theta = [D*i/norm_theta for i in theta] #projection step
predicted_AR[t] = np.dot(list(reversed(sys.outputs[t-s:t])),theta)
if error_AR_data is None: error_AR_data = error_AR
else: error_AR_data = np.vstack((error_AR_data, error_AR))
p1 = plt.figure()
if ymax and ymin: plt.ylim(ymin, ymax)
if sum(inputs[1:]) > 0: plt.plot(inputs[1:], label='Input')
if sequenceLabel: plt.plot([float(i) for i in sys.outputs][1:], label=sequenceLabel, color='#000000', linewidth=2, antialiased = True)
else: plt.plot([float(i) for i in sys.outputs][1:], label='Output', color='#000000', linewidth=2, antialiased = True)
#plt.plot([-i for i in predicted_output], label='Predicted output') #for some reason, usual way produces -ve estimate
if haveSpectral:
plt.plot([i for i in predicted_spectral], label='Spectral')
#lab = 'AR(3) / OGD, c_0 = ' + str(etaZero)
lab = "AR(" + str(s) + "), c = " + str(int(etaZero))
plt.plot(predicted_AR, label = lab)
plt.legend()
plt.xlabel('Time')
plt.ylabel('Output')
p1.show()
p1.savefig(pp, format='pdf')
p2 = plt.figure()
plt.ylim(0, 20)
if haveSpectral:
plt.plot(error_spec, label='Spectral')
plt.plot(error_persist, label='Persistence')
plt.plot(error_AR, label=lab)
plt.legend()
p2.show()
plt.xlabel('Time')
plt.ylabel('Error')
p2.savefig(pp, format='pdf')
error_AR_mean = np.mean(error_AR_data, 0)
error_AR_std = np.std(error_AR_data, 0)
if haveSpectral:
error_spec_mean = np.mean(error_spec_data, 0)
error_spec_std = np.std(error_spec_data, 0)
error_persist_mean = np.mean(error_persist_data, 0)
error_persist_std = np.std(error_persist_data, 0)
p3 = plt.figure()
if ymax and ymin: plt.ylim(ymin, ymax)
if haveSpectral:
plt.plot(error_spec_mean, label='Spectral', color='#1B2ACC', linewidth=2, antialiased = True)
plt.fill_between(range(0,T-1), error_spec_mean-error_spec_std, error_spec_mean+error_spec_std, alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=1, antialiased=True)
plt.plot(error_persist_mean, label='Persistence', color='#CC1B2A', linewidth=2, antialiased = True)
plt.fill_between(range(0,T-1), error_persist_mean-error_persist_std, error_persist_mean+error_persist_std, alpha=0.2, edgecolor='#CC1B2A', facecolor='#FF0800',
linewidth=1, antialiased=True)
cAR1 = (42.0/255, 204.0 / 255.0, 1.0/255)
bAR1 = (1.0, 204.0 / 255.0, 0.0) # , alphaValue
plt.ylim(0, 20)
plt.plot(error_AR_mean, label='AR(3)', color=cAR1, linewidth=2, antialiased = True)
plt.fill_between(range(0,T), error_AR_mean-error_AR_std, error_AR_mean+error_AR_std, alpha=0.2, edgecolor=cAR1, facecolor=bAR1,
linewidth=1, antialiased=True)
plt.legend()
plt.xlabel('Time')
plt.ylabel('Error')
p3.savefig(pp, format='pdf')
pp.close()
print("See the output in " + filename)
def testIdentification2(T = 100, noRuns = 10, sChoices = [15,3,1], haveKalman = False, haveSpectral = True, G = np.matrix([[0.999,0],[0,0.5]]), F_dash = np.matrix([[1,1]]), sequenceLabel = ""):
if haveKalman: sChoices = sChoices + [T]
if len(sequenceLabel) > 0: sequenceLabel = " (" + sequenceLabel + ")"
if noRuns < 2:
print("Number of runs has to be larger than 1.")
exit()
filename = './outputs/AR.pdf'
pp = PdfPages(filename)
################# SYSTEM ###################
proc_noise_std = 0.5
obs_noise_std = 0.5
error_spec_data = None
error_persist_data = None
error_AR1_data = None
error_Kalman_data = None
for runNo in range(noRuns):
sys = dynamical_system(G,np.zeros((2,1)),F_dash,np.zeros((1,1)),
process_noise='gaussian',
observation_noise='gaussian',
process_noise_std=proc_noise_std,
observation_noise_std=obs_noise_std,
timevarying_multiplier_b = None)
inputs = np.zeros(T)
sys.solve([[1],[1]],inputs,T)
Y = [i[0,0] for i in sys.outputs]
#pdb.set_trace()
############################################
########## PRE-COMPUTE FILTER PARAMS ###################
n = G.shape[0]
m = F_dash.shape[0]
W = proc_noise_std**2 * np.matrix(np.eye(n))
V = obs_noise_std**2 * np.matrix(np.eye(m))
#m_t = [np.matrix([[0],[0]])]
C = [np.matrix(np.eye(2))]
R = []
Q = []
A = []
Z = []
for t in range(T):
R.append(G * C[-1] * G.transpose() + W)
Q.append(F_dash * R[-1] * F_dash.transpose() + V)
A.append(R[-1]*F_dash.transpose()*np.linalg.inv(Q[-1]))
C.append(R[-1] - A[-1]*Q[-1]*A[-1].transpose() )
Z.append(G*( np.eye(2) - A[-1] * F_dash ))
#PREDICTION
plt.plot(Y, label='Output', color='#000000', linewidth=2, antialiased = True)
for s in sChoices:
Y_pred=[]
for t in range(T):
Y_pred_term1 = F_dash * G * A[t] * sys.outputs[t]
if t==0:
Y_pred.append(Y_pred_term1)
continue
acc = 0
for j in range(min(t,s)+1):
for i in range(j+1):
if i==0:
ZZ=Z[t-i]
continue
ZZ = ZZ*Z[t-i]
acc += ZZ * G * A[t-j-1] * Y[t-j-1]
Y_pred.append(Y_pred_term1 + F_dash*acc)
#print(np.linalg.norm([Y_pred[i][0,0] - Y[i] for i in range(len(Y))]))
#print(lab)
if s == 1:
if error_AR1_data is None: error_AR1_data = np.array([pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))])
else:
#print(error_AR1_data.shape)
error_AR1_data = np.vstack((error_AR1_data, [pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))]))
if s == T:
# For the spectral filtering etc, we use: loss = pow(np.linalg.norm(sys.outputs[t] - y_pred), 2)
if error_Kalman_data is None: error_Kalman_data = np.array([pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))])
else: error_Kalman_data = np.vstack((error_Kalman_data, [pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))]))
plt.plot([i[0,0] for i in Y_pred], label="Kalman" + sequenceLabel, color=(42.0/255.0, 204.0 / 255.0, 200.0/255.0), linewidth=2, antialiased = True)
else:
plt.plot([i[0,0] for i in Y_pred], label='AR(%i)' % (s+1) + sequenceLabel, color=(42.0/255.0, 204.0 / 255.0, float(min(255.0,s))/255.0), linewidth=2, antialiased = True)
plt.xlabel('Time')
plt.ylabel('Prediction')
if haveSpectral:
predicted_output, M, error_spec, error_persist = wave_filtering_SISO_ftl(sys, T, 5)
plt.plot(predicted_output, label='Spectral' + sequenceLabel, color='#1B2ACC', linewidth=2, antialiased = True)
if error_spec_data is None: error_spec_data = error_spec
else: error_spec_data = np.vstack((error_spec_data, error_spec))
if error_persist_data is None: error_persist_data = error_persist
else: error_persist_data = np.vstack((error_persist_data, error_persist))
plt.legend()
plt.savefig(pp, format='pdf')
plt.close('all')
#plt.show()
if haveSpectral:
error_spec_mean = np.mean(error_spec_data, 0)
error_spec_std = np.std(error_spec_data, 0)
error_persist_mean = np.mean(error_persist_data, 0)
error_persist_std = np.std(error_persist_data, 0)
error_AR1_mean = np.mean(error_AR1_data, 0)
error_AR1_std = np.std(error_AR1_data, 0)
if haveKalman:
error_Kalman_mean = np.mean(error_Kalman_data, 0)
error_Kalman_std = np.std(error_Kalman_data, 0)
for (ylim, alphaValue) in [((0, 100.0), 0.2), ((0.0, 1.0), 0.05)]:
for Tlim in [T-1, min(T-1, 20)]:
#p3 = plt.figure()
p3, ax = plt.subplots()
plt.ylim(ylim)
if haveSpectral:
plt.plot(range(0,Tlim), error_spec[:Tlim], label='Spectral' + sequenceLabel, color='#1B2ACC', linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_spec_mean-error_spec_std)[:Tlim], (error_spec_mean+error_spec_std)[:Tlim], alpha=alphaValue, edgecolor='#1B2ACC', facecolor='#089FFF', linewidth=1, antialiased=True)
plt.plot(range(0,Tlim), error_persist[:Tlim], label='Persistence' + sequenceLabel, color='#CC1B2A', linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_persist_mean-error_persist_std)[:Tlim], (error_persist_mean+error_persist_std)[:Tlim], alpha=alphaValue, edgecolor='#CC1B2A', facecolor='#FF0800', linewidth=1, antialiased=True)
#import matplotlib.transforms as mtransforms
#trans = mtransforms.blended_transform_factory(ax.transData, ax.transData)
#trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
cAR1 = (42.0/255, 204.0 / 255.0, 1.0/255)
bAR1 = (1.0, 204.0 / 255.0, 0.0) # , alphaValue
print(cAR1)
print(bAR1)
#print(error_AR1_data)
#print(error_AR1_mean)
#print(Tlim)
plt.plot(error_AR1_mean[:Tlim], label='AR(2)' + sequenceLabel, color=cAR1, linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_AR1_mean-error_AR1_std)[:Tlim], (error_AR1_mean+error_AR1_std)[:Tlim], alpha=alphaValue, edgecolor=cAR1, facecolor=bAR1, linewidth=1, antialiased=True) #transform=trans) #offset_position="data") alpha=alphaValue,
if haveKalman:
cK = (42.0/255.0, 204.0 / 255.0, 200.0/255.0)
bK = (1.0, 204.0 / 255.0, 200.0/255.0) # alphaValue
print(cK)
print(bK)
plt.plot(error_Kalman_mean[:Tlim], label='Kalman' + sequenceLabel, color=cK, linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_Kalman_mean-error_Kalman_std)[:Tlim], (error_Kalman_mean+error_Kalman_std)[:Tlim], alpha=alphaValue, facecolor=bK, edgecolor=cK, linewidth=1, antialiased=True) # transform = trans) #offset_position="data")
plt.legend()
plt.xlabel('Time')
plt.ylabel('Error')
#p3.show()
p3.savefig(pp, format='pdf')
pp.close()
# This is taken from pyplot documentation
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def testNoiseImpact(T = 50, noRuns = 10, discretisation = 10):
filename = './outputs/noise.pdf'
pp = PdfPages(filename)
for s in [1, 2, 3, 7]:
data = np.zeros((discretisation, discretisation))
diff = np.zeros((discretisation, discretisation))
ratio = np.zeros((discretisation, discretisation))
errKalman = np.zeros((discretisation, discretisation))
errAR = np.zeros((discretisation, discretisation))
################# SYSTEM ###################
G = np.matrix([[0.999,0],[0,0.5]])
F_dash = np.matrix([[1,1]])
for proc_noise_i in range(discretisation):
proc_noise_std = float(proc_noise_i + 1) / (discretisation - 1)
for obs_noise_i in range(discretisation):
obs_noise_std = float(obs_noise_i + 1) / (discretisation - 1)
for runNo in range(noRuns):
sys = dynamical_system(G,np.zeros((2,1)),F_dash,np.zeros((1,1)),
process_noise='gaussian',
observation_noise='gaussian',
process_noise_std=proc_noise_std,
observation_noise_std=obs_noise_std,
timevarying_multiplier_b = None)
inputs = | np.zeros(T) | numpy.zeros |
import pymzml
import numpy as np
import pandas as pd
from tqdm import tqdm
class ResultTable:
def __init__(self, files, features):
n_features = len(features)
n_files = len(files)
self.files = {k: v for v, k in enumerate(files)}
self.intensities = np.zeros((n_files, n_features))
self.mz = | np.zeros(n_features) | numpy.zeros |
# coding=utf-8
import random
import string
import zipfile
import numpy as np
import tensorflow as tf
from not_mnist.img_pickle import save_obj, load_pickle
from not_mnist.load_data import maybe_download
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return tf.compat.as_str(f.read(name))
f.close()
data_set = load_pickle('text8_text.pickle')
if data_set is None:
# load data
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', 31344016, url=url)
# read data
text = read_data(filename)
print('Data size %d' % len(text))
save_obj('text8_text.pickle', text)
else:
text = data_set
# Create a small validation set.
valid_size = 1000
valid_text = text[:valid_size]
train_text = text[valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
bi_voc_size = vocabulary_size * vocabulary_size
class BiBatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size_in_chars = len(text)
self._text_size = self._text_size_in_chars // 2 # in bigrams
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
batch = | np.zeros(shape=self._batch_size, dtype=np.int) | numpy.zeros |
"""
This module is an example of a barebones function plugin for napari
It implements the ``napari_experimental_provide_function`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from __future__ import print_function, division
from typing import TYPE_CHECKING, DefaultDict
from unicodedata import name
import six
# import modules
import sys # input, output, errors, and files
import os # interacting with file systems
import time # getting time
import datetime
import inspect # get passed parameters
import yaml # parameter importing
import json # for importing tiff metadata
try:
import cPickle as pickle # loading and saving python objects
except:
import pickle
import numpy as np # numbers package
import struct # for interpretting strings as binary data
import re # regular expressions
from pprint import pprint # for human readable file output
import traceback # for error messaging
import warnings # error messaging
import copy # not sure this is needed
import h5py # working with HDF5 files
import pandas as pd
import networkx as nx
import collections
# scipy and image analysis
from scipy.signal import find_peaks_cwt # used in channel finding
from scipy.optimize import curve_fit # fitting ring profile
from scipy.optimize import leastsq # fitting 2d gaussian
from scipy import ndimage as ndi # labeling and distance transform
from skimage import io
from skimage import segmentation # used in make_masks and segmentation
from skimage.transform import rotate
from skimage.feature import match_template # used to align images
from skimage.feature import blob_log # used for foci finding
from skimage.filters import threshold_otsu, median # segmentation
from skimage import filters
from skimage import morphology # many functions is segmentation used from this
from skimage.measure import regionprops # used for creating lineages
from skimage.measure import profile_line # used for ring an nucleoid analysis
from skimage import util, measure, transform, feature
import tifffile as tiff
from sklearn import metrics
# deep learning
import tensorflow as tf # ignore message about how tf was compiled
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras import backend as K
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # supress warnings
# Parralelization modules
import multiprocessing
from multiprocessing import Pool
# Plotting for debug
import matplotlib as mpl
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
mpl.rc('font', **font)
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib.patches import Ellipse
from pathlib import Path
import time
import matplotlib.pyplot as plt
# import modules
import os
import glob
import re
import numpy as np
import tifffile as tiff
import pims_nd2
from skimage import io, measure, morphology
import tifffile as tiff
from scipy import stats
from pprint import pprint # for human readable file output
import multiprocessing
from multiprocessing import Pool
import numpy as np
import warnings
from tensorflow.python.keras import models
from enum import Enum
import numpy as np
import multiprocessing
from multiprocessing import Pool
import os
from napari_plugin_engine import napari_hook_implementation
from skimage.filters import threshold_otsu # segmentation
from skimage import morphology # many functions is segmentation used from this
from skimage import segmentation # used in make_masks and segmentation
from scipy import ndimage as ndi # labeling and distance transform
import matplotlib.gridspec as gridspec
from skimage.exposure import rescale_intensity # for displaying in GUI
from skimage import io, morphology, segmentation
# import mm3_helpers as mm3
import napari
# This is the actual plugin function, where we export our function
# (The functions themselves are defined below)
@napari_hook_implementation
def napari_experimental_provide_function():
# we can return a single function
# or a tuple of (function, magicgui_options)
# or a list of multiple functions with or without options, as shown here:
#return [Segment, threshold, image_arithmetic]
return [Compile, ChannelPicker, Segment]
# 1. First example, a simple function that thresholds an image and creates a labels layer
def threshold(data: "napari.types.ImageData", threshold: int) -> "napari.types.LabelsData":
"""Threshold an image and return a mask."""
return (data > threshold).astype(int)
# print a warning
def warning(*objs):
print(time.strftime("%H:%M:%S WARNING:", time.localtime()), *objs, file=sys.stderr)
# print information
def information(*objs):
print(time.strftime("%H:%M:%S", time.localtime()), *objs, file=sys.stdout)
def julian_day_number():
"""
Need this to solve a bug in pims_nd2.nd2reader.ND2_Reader instance initialization.
The bug is in /usr/local/lib/python2.7/site-packages/pims_nd2/ND2SDK.py in function `jdn_to_datetime_local`, when the year number in the metadata (self._lim_metadata_desc) is not in the correct range. This causes a problem when calling self.metadata.
https://en.wikipedia.org/wiki/Julian_day
"""
dt=datetime.datetime.now()
tt=dt.timetuple()
jdn=(1461.*(tt.tm_year + 4800. + (tt.tm_mon - 14.)/12))/4. + (367.*(tt.tm_mon - 2. - 12.*((tt.tm_mon -14.)/12)))/12. - (3.*((tt.tm_year + 4900. + (tt.tm_mon - 14.)/12.)/100.))/4. + tt.tm_mday - 32075
return jdn
def get_plane(filepath):
pattern = r'(c\d+).tif'
res = re.search(pattern,filepath)
if (res != None):
return res.group(1)
else:
return None
def get_fov(filepath):
pattern = r'xy(\d+)\w*.tif'
res = re.search(pattern,filepath)
if (res != None):
return int(res.group(1))
else:
return None
def get_time(filepath):
pattern = r't(\d+)xy\w+.tif'
res = re.search(pattern,filepath)
if (res != None):
return np.int_(res.group(1))
else:
return None
# loads and image stack from TIFF or HDF5 using mm3 conventions
def load_stack(fov_id, peak_id, color='c1', image_return_number=None):
'''
Loads an image stack.
Supports reading TIFF stacks or HDF5 files.
Parameters
----------
fov_id : int
The FOV id
peak_id : int
The peak (channel) id. Dummy None value incase color='empty'
color : str
The image stack type to return. Can be:
c1 : phase stack
cN : where n is an integer for arbitrary color channel
sub : subtracted images
seg : segmented images
empty : get the empty channel for this fov, slightly different
Returns
-------
image_stack : np.ndarray
The image stack through time. Shape is (t, y, x)
'''
# things are slightly different for empty channels
if 'empty' in color:
if params['output'] == 'TIFF':
img_filename = params['experiment_name'] + '_xy%03d_%s.tif' % (fov_id, color)
with tiff.TiffFile(os.path.join(params['empty_dir'],img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r') as h5f:
img_stack = h5f[color][:]
return img_stack
# load normal images for either TIFF or HDF5
if params['output'] == 'TIFF':
if color[0] == 'c':
img_dir = params['chnl_dir']
elif 'sub' in color:
img_dir = params['sub_dir']
elif 'foci' in color:
img_dir = params['foci_seg_dir']
elif 'seg' in color:
img_dir = params['seg_dir']
img_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, color)
with tiff.TiffFile(os.path.join(img_dir, img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'], 'xy%03d.hdf5' % fov_id), 'r') as h5f:
# normal naming
# need to use [:] to get a copy, else it references the closed hdf5 dataset
img_stack = h5f['channel_%04d/p%04d_%s' % (peak_id, peak_id, color)][:]
return img_stack
# load the time table and add it to the global params
def load_time_table():
'''Add the time table dictionary to the params global dictionary.
This is so it can be used during Cell creation.
'''
# try first for yaml, then for pkl
try:
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'rb') as time_table_file:
params['time_table'] = yaml.safe_load(time_table_file)
except:
with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'rb') as time_table_file:
params['time_table'] = pickle.load(time_table_file)
return
# function for loading the channel masks
def load_channel_masks():
'''Load channel masks dictionary. Should be .yaml but try pickle too.
'''
information("Loading channel masks dictionary.")
# try loading from .yaml before .pkl
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.yaml'))
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'r') as cmask_file:
channel_masks = yaml.safe_load(cmask_file)
except:
warning('Could not load channel masks dictionary from .yaml.')
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.pkl'))
with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'rb') as cmask_file:
channel_masks = pickle.load(cmask_file)
except ValueError:
warning('Could not load channel masks dictionary from .pkl.')
return channel_masks
# function for loading the specs file
def load_specs():
'''Load specs file which indicates which channels should be analyzed, used as empties, or ignored.'''
try:
with open(os.path.join(params['ana_dir'], 'specs.yaml'), 'r') as specs_file:
specs = yaml.safe_load(specs_file)
except:
try:
with open(os.path.join(params['ana_dir'], 'specs.pkl'), 'rb') as specs_file:
specs = pickle.load(specs_file)
except ValueError:
warning('Could not load specs file.')
return specs
### functions for dealing with raw TIFF images
# get params is the major function which processes raw TIFF images
def get_initial_tif_params(image_filename):
'''This is a function for getting the information
out of an image for later trap identification, cropping, and aligning with Unet. It loads a tiff file and pulls out the image metadata.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
#print(image_data.shape) # uncomment for debug
#if len(image_data.shape) == 2:
# img_shape = [image_data.shape[0],image_data.shape[1]]
#else:
img_shape = [image_data.shape[1],image_data.shape[2]]
plane_list = [str(i+1) for i in range(image_data.shape[0])]
#print(plane_list) # uncomment for debug
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : plane_list, # list of plane names
'shape' : img_shape} # image shape x y in pixels
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# get params is the major function which processes raw TIFF images
def get_tif_params(image_filename, find_channels=True):
'''This is a damn important function for getting the information
out of an image. It loads a tiff file, pulls out the image data, and the metadata,
including the location of the channels if flagged.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
'channels': cp_dict, # dictionary of channel locations, in the case of Unet-based channel segmentation, it's a dictionary of channel labels
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
# look for channels if flagged
if find_channels:
# fix the image orientation and get the number of planes
image_data = fix_orientation(image_data)
# if the image data has more than 1 plane restrict image_data to phase,
# which should have highest mean pixel data
if len(image_data.shape) > 2:
#ph_index = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
ph_index = int(params['phase_plane'][1:]) - 1
image_data = image_data[ph_index]
# get shape of single plane
img_shape = [image_data.shape[0], image_data.shape[1]]
# find channels on the processed image
chnl_loc_dict = find_channel_locs(image_data)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : image_metadata['planes'], # list of plane names
'shape' : img_shape, # image shape x y in pixels
# 'channels' : {1 : {'A' : 1, 'B' : 2}, 2 : {'C' : 3, 'D' : 4}}}
'channels' : chnl_loc_dict} # dictionary of channel locations
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# finds metdata in a tiff image which has been expoted with Nikon Elements.
def get_tif_metadata_elements(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by Nikon Elements as a stacked tiff, each for one tpoint.
tif is an opened tif file (using the package tifffile)
arguments:
fname (tifffile.TiffFile): TIFF file object from which data will be extracted
returns:
dictionary of values:
'jdn' (float)
'x' (float)
'y' (float)
'plane_names' (list of strings)
Called by
mm3.Compile
'''
# image Metadata
idata = { 'fov': -1,
't' : -1,
'jd': -1 * 0.0,
'x': -1 * 0.0,
'y': -1 * 0.0,
'planes': []}
# get the fov and t simply from the file name
idata['fov'] = int(tif.fname.split('xy')[1].split('.tif')[0])
idata['t'] = int(tif.fname.split('xy')[0].split('t')[-1])
# a page is plane, or stack, in the tiff. The other metdata is hidden down in there.
for page in tif:
for tag in page.tags.values():
#print("Checking tag",tag.name,tag.value)
t = tag.name, tag.value
t_string = u""
time_string = u""
# Interesting tag names: 65330, 65331 (binary data; good stuff), 65332
# we wnat to work with the tag of the name 65331
# if the tag name is not in the set of tegs we find interesting then skip this cycle of the loop
if tag.name not in ('65331', '65332', 'strip_byte_counts', 'image_width', 'orientation', 'compression', 'new_subfile_type', 'fill_order', 'max_sample_value', 'bits_per_sample', '65328', '65333'):
#print("*** " + tag.name)
#print(tag.value)
pass
#if tag.name == '65330':
# return tag.value
if tag.name in ('65331'):
# make info list a list of the tag values 0 to 65535 by zipoing up a paired list of two bytes, at two byte intervals i.e. fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
# note that 0X100 is hex for 256
infolist = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
# get char values for each element in infolist
for c_entry in range(0, len(infolist)):
# the element corresponds to an ascii char for a letter or bracket (and a few other things)
if infolist[c_entry] < 127 and infolist[c_entry] > 64:
# add the letter to the unicode string t_string
t_string += chr(infolist[c_entry])
#elif infolist[c_entry] == 0:
# continue
else:
t_string += " "
# this block will find the dTimeAbsolute and print the subsequent integers
# index 170 is counting seconds, and rollover of index 170 leads to increment of index 171
# rollover of index 171 leads to increment of index 172
# get the position of the array by finding the index of the t_string at which dTimeAbsolute is listed not that 2*len(dTimeAbsolute)=26
#print(t_string)
arraypos = t_string.index("dXPos") * 2 + 16
xarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in xarr)
idata['x'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dYPos") * 2 + 16
yarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in yarr)
idata['y'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dTimeAbsolute") * 2 + 26
shortarray = tag.value[arraypos+2:arraypos+10]
b = ''.join(chr(i) for i in shortarray)
idata['jd'] = float(struct.unpack('<d', b)[0])
# extract plane names
il = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
li = [a+b*0x100 for a,b in zip(tag.value[1::2], tag.value[2::2])]
strings = list(zip(il, li))
allchars = ""
for c_entry in range(0, len(strings)):
if 31 < strings[c_entry][0] < 127:
allchars += chr(strings[c_entry][0])
elif 31 < strings[c_entry][1] < 127:
allchars += chr(strings[c_entry][1])
else:
allchars += " "
allchars = re.sub(' +',' ', allchars)
words = allchars.split(" ")
planes = []
for idx in [i for i, x in enumerate(words) if x == "sOpticalConfigName"]:
planes.append(words[idx+1])
idata['planes'] = planes
return idata
# finds metdata in a tiff image which has been expoted with nd2ToTIFF.py.
def get_tif_metadata_nd2ToTIFF(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by the mm3 function mm3_nd2ToTIFF.py. All the metdata
is found in that script and saved in json format to the tiff, so it is simply extracted here
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
'planes' (list of strings)
Called by
mm3_Compile.get_tif_params
'''
# get the first page of the tiff and pull out image description
# this dictionary should be in the above form
for tag in tif.pages[0].tags:
if tag.name=="ImageDescription":
idata=tag.value
break
#print(idata)
idata = json.loads(idata)
return idata
# Finds metadata from the filename
def get_tif_metadata_filename(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This just gets the tiff metadata from the filename and is a backup option when the known format of the metadata is not known.
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
Called by
mm3_Compile.get_tif_params
'''
idata = {'fov' : get_fov(tif.filename), # fov id
't' : get_time(tif.filename), # time point
'jd' : -1 * 0.0, # absolute julian time
'x' : -1 * 0.0, # x position on stage [um]
'y' : -1 * 0.0} # y position on stage [um]
return idata
# make a lookup time table for converting nominal time to elapsed time in seconds
def make_time_table(analyzed_imgs):
'''
Loops through the analyzed images and uses the jd time in the metadata to find the elapsed
time in seconds that each picture was taken. This is later used for more accurate elongation
rate calculation.
Parametrs
---------
analyzed_imgs : dict
The output of get_tif_params.
params['use_jd'] : boolean
If set to True, 'jd' time will be used from the image metadata to use to create time table. Otherwise the 't' index will be used, and the parameter 'seconds_per_time_index' will be used from the parameters.yaml file to convert to seconds.
Returns
-------
time_table : dict
Look up dictionary with keys for the FOV and then the time point.
'''
information('Making time table...')
# initialize
time_table = {}
first_time = float('inf')
# need to go through the data once to find the first time
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
if idata['jd'] < first_time:
first_time = idata['jd']
else:
if idata['t'] < first_time:
first_time = idata['t']
# init dictionary for specific times per FOV
if idata['fov'] not in time_table:
time_table[idata['fov']] = {}
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
# convert jd time to elapsed time in seconds
t_in_seconds = np.around((idata['jd'] - first_time) * 24*60*60, decimals=0).astype('uint32')
else:
t_in_seconds = np.around((idata['t'] - first_time) * params['moviemaker']['seconds_per_time_index'], decimals=0).astype('uint32')
time_table[int(idata['fov'])][int(idata['t'])] = int(t_in_seconds)
# save to .pkl. This pkl will be loaded into the params
# with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'wb') as time_table_file:
# pickle.dump(time_table, time_table_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(params['ana_dir'], 'time_table.txt'), 'w') as time_table_file:
# pprint(time_table, stream=time_table_file)
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'w') as time_table_file:
yaml.dump(data=time_table, stream=time_table_file, default_flow_style=False, tags=None)
information('Time table saved.')
return time_table
# saves traps sliced via Unet
def save_tiffs(imgDict, analyzed_imgs, fov_id):
savePath = os.path.join(params['experiment_directory'],
params['analysis_directory'],
params['chnl_dir'])
img_names = [key for key in analyzed_imgs.keys()]
image_params = analyzed_imgs[img_names[0]]
for peak,img in six.iteritems(imgDict):
img = img.astype('uint16')
if not os.path.isdir(savePath):
os.mkdir(savePath)
for planeNumber in image_params['planes']:
channel_filename = os.path.join(savePath, params['experiment_name'] + '_xy{0:0=3}_p{1:0=4}_c{2}.tif'.format(fov_id, peak, planeNumber))
io.imsave(channel_filename, img[:,:,:,int(planeNumber)-1])
# slice_and_write cuts up the image files one at a time and writes them out to tiff stacks
def tiff_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images per channel.
Loads all tiffs from and FOV into memory and then slices all time points at once.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# go through list of images and get the file path
for n, image in enumerate(images_to_write):
# analyzed_imgs dictionary will be found in main scope. [0] is the key, [1] is jd
image_params = analyzed_imgs[image[0]]
information("Loading %s." % image_params['filepath'].split('/')[-1])
if n == 1:
# declare identification variables for saving using first image
fov_id = image_params['fov']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
# change axis so it goes Y, X, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# channel masks should only contain ints, but you can use this for hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different time stack for all colors
for color_index in range(channel_stack.shape[3]):
# this is the filename for the channel
# # chnl_dir and p will be looked for in the scope above (__main__)
channel_filename = os.path.join(params['chnl_dir'], params['experiment_name'] + '_xy%03d_p%04d_c%1d.tif' % (fov_id, peak, color_index+1))
# save stack
tiff.imsave(channel_filename, channel_stack[:,:,:,color_index], compress=4)
return
# saves traps sliced via Unet to an hdf5 file
def save_hdf5(imgDict, img_names, analyzed_imgs, fov_id, channel_masks):
'''Writes out 4D stacks of images to an HDF5 file.
Called by
mm3_Compile.py
'''
savePath = params['hdf5_dir']
if not os.path.isdir(savePath):
os.mkdir(savePath)
img_times = [analyzed_imgs[key]['t'] for key in img_names]
img_jds = [analyzed_imgs[key]['jd'] for key in img_names]
fov_ids = [analyzed_imgs[key]['fov'] for key in img_names]
# get image_params from first image from current fov
image_params = analyzed_imgs[img_names[0]]
# establish some variables for hdf5 attributes
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
fov_channel_masks = channel_masks[fov_id]
with h5py.File(os.path.join(savePath,'{}_xy{:0=2}.hdf5'.format(params['experiment_name'],fov_id)), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted([key for key in imgDict.keys()]))
# this is for things that change across time, for these create a dataset
img_names = np.asarray(img_names)
img_names = np.expand_dims(img_names, 1)
img_names = img_names.astype('S100')
h5ds = h5f.create_dataset(u'filenames', data=img_names,
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(img_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(img_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak,channel_stack in six.iteritems(imgDict):
channel_stack = channel_stack.astype('uint16')
# create group for this trap
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
channel_loc = fov_channel_masks[peak]
h5g.attrs.create('channel_loc', channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
# same thing as tiff_stack_slice_and_write but do it for hdf5
def hdf5_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images to an HDF5 file.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# make arrays for filenames and times
image_filenames = []
image_times = [] # times is still an integer but may be indexed arbitrarily
image_jds = [] # jds = julian dates (times)
# go through list of images, load and fix them, and create arrays of metadata
for n, image in enumerate(images_to_write):
image_name = image[0] # [0] is the key, [1] is jd
# analyzed_imgs dictionary will be found in main scope.
image_params = analyzed_imgs[image_name]
information("Loading %s." % image_params['filepath'].split('/')[-1])
# add information to metadata arrays
image_filenames.append(image_name)
image_times.append(image_params['t'])
image_jds.append(image_params['jd'])
# declare identification variables for saving using first image
if n == 1:
# same across fov
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
#change axis so it goes X, Y, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# create the HDF5 file for the FOV, first time this is being done.
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted(channel_masks[fov_id].keys()))
# this is for things that change across time, for these create a dataset
h5ds = h5f.create_dataset(u'filenames', data=np.expand_dims(image_filenames, 1),
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(image_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(image_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# create group for this channel
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
h5g.attrs.create('channel_loc', channel_loc)
# channel masks should only contain ints, but you can use this for a hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
def tileImage(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
print(img.shape, M, N, divisor, subImageNumber)
ans = ([img[x:x+M,y:y+N] for x in range(0,img.shape[0],M) for y in range(0,img.shape[1],N)])
tiles=[]
for m in ans:
if m.shape[0]==512 and m.shape[1]==512:
tiles.append(m)
tiles=np.asarray(tiles)
#print(tiles)
return(tiles)
def get_weights(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
weights = np.ones((img.shape[0],img.shape[1]),dtype='uint8')
for i in range(divisor-1):
weights[(M*(i+1))-25:(M*(i+1)+25),:] = 0
weights[:,(N*(i+1))-25:(N*(i+1)+25)] = 0
return(weights)
def permute_image(img, trap_align_metadata):
# are there three dimensions?
if len(img.shape) == 3:
if img.shape[0] < 3: # for tifs with fewer than three imageing channels, the first dimension separates channels
# img = np.transpose(img, (1,2,0))
img = img[trap_align_metadata['phase_plane_index'],:,:] # grab just the phase channel
else:
img = img[:,:,trap_align_metadata['phase_plane_index']] # grab just the phase channel
return(img)
def imageConcatenatorFeatures(imgStack, subImageNumber = 64):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
#print(rowNumPerImage)
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j]))#,
#imgStack[baseNum+4,:,:,j],imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3]))#,
#featureRowDicts[j][baseNum+4],featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7]))
return(bigImg)
def imageConcatenatorFeatures2(imgStack, subImageNumber = 81):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j],
imgStack[baseNum+4,:,:,j]))#,imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j],
#imgStack[baseNum+8,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3],
featureRowDicts[j][baseNum+4]))#,featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7],
#featureRowDicts[j][baseNum+8]))
return(bigImg)
def get_weights_array(arr=np.zeros((2048,2048)), shiftDistance=128, subImageNumber=64, padSubImageNumber=81):
originalImageWeights = get_weights(arr, subImageNumber=subImageNumber)
shiftLeftWeights = np.pad(originalImageWeights, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
shiftRightWeights = np.pad(originalImageWeights, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:(-1*shiftDistance)]
shiftUpWeights = np.pad(originalImageWeights, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
shiftDownWeights = np.pad(originalImageWeights, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:(-1*shiftDistance),:]
expandedImageWeights = get_weights(np.zeros((arr.shape[0]+2*shiftDistance,arr.shape[1]+2*shiftDistance)), subImageNumber=padSubImageNumber)[shiftDistance:-shiftDistance,shiftDistance:-shiftDistance]
allWeights = np.stack((originalImageWeights, expandedImageWeights, shiftUpWeights, shiftDownWeights, shiftLeftWeights,shiftRightWeights), axis=-1)
stackWeights = np.stack((allWeights,allWeights),axis=0)
stackWeights = np.stack((stackWeights,stackWeights,stackWeights),axis=3)
return(stackWeights)
# predicts locations of channels in an image using deep learning model
def get_frame_predictions(img,model,stackWeights, shiftDistance=256, subImageNumber=16, padSubImageNumber=25, debug=False):
pred = predict_first_image_channels(img, model, shiftDistance=shiftDistance,
subImageNumber=subImageNumber, padSubImageNumber=padSubImageNumber, debug=debug)[0,...]
# print(pred.shape)
if debug:
print(pred.shape)
compositePrediction = np.average(pred, axis=3, weights=stackWeights)
# print(compositePrediction.shape)
padSize = (compositePrediction.shape[0]-img.shape[0])//2
compositePrediction = util.crop(compositePrediction,((padSize,padSize),
(padSize,padSize),
(0,0)))
# print(compositePrediction.shape)
return(compositePrediction)
def apply_median_filter_normalize(imgs):
selem = morphology.disk(3)
for i in range(imgs.shape[0]):
# Store sample
tmpImg = imgs[i,:,:,0]
medImg = median(tmpImg, selem)
tmpImg = medImg/np.max(medImg)
tmpImg = np.expand_dims(tmpImg, axis=-1)
imgs[i,:,:,:] = tmpImg
return(imgs)
def predict_first_image_channels(img, model,
subImageNumber=16, padSubImageNumber=25,
shiftDistance=128, batchSize=1,
debug=False):
imgSize = img.shape[0]
padSize = (2048-imgSize)//2 # how much to pad on each side to get up to 2048x2048?
imgStack = np.pad(img, pad_width=((padSize,padSize),(padSize,padSize)),
mode='constant', constant_values=((0,0),(0,0))) # pad the images to make them 2048x2048
# pad the stack by 128 pixels on each side to get complemetary crops that I can run the network on. This
# should help me fill in low-confidence regions where the crop boundaries were for the original image
imgStackExpand = np.pad(imgStack, pad_width=((shiftDistance,shiftDistance),(shiftDistance,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))
imgStackShiftRight = np.pad(imgStack, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
imgStackShiftLeft = np.pad(imgStack, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:-shiftDistance]
imgStackShiftDown = np.pad(imgStack, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
imgStackShiftUp = np.pad(imgStack, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:-shiftDistance,:]
#print(imgStackShiftUp.shape)
crops = tileImage(imgStack, subImageNumber=subImageNumber)
print("Crops: ", crops.shape)
crops = np.expand_dims(crops, -1)
data_gen_args = {'batch_size':params['compile']['channel_prediction_batch_size'],
'n_channels':1,
'normalize_to_one':True,
'shuffle':False}
predict_gen_args = {'verbose':1,
'use_multiprocessing':True,
'workers':params['num_analyzers']}
img_generator = TrapSegmentationDataGenerator(crops, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
prediction = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
#print(prediction.shape)
cropsExpand = tileImage(imgStackExpand, subImageNumber=padSubImageNumber)
cropsExpand = np.expand_dims(cropsExpand, -1)
img_generator = TrapSegmentationDataGenerator(cropsExpand, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionExpand = imageConcatenatorFeatures2(predictions, subImageNumber=padSubImageNumber)
predictionExpand = util.crop(predictionExpand, ((0,0),(shiftDistance,shiftDistance),(shiftDistance,shiftDistance),(0,0)))
#print(predictionExpand.shape)
cropsShiftLeft = tileImage(imgStackShiftLeft, subImageNumber=subImageNumber)
cropsShiftLeft = np.expand_dims(cropsShiftLeft, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftLeft, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionLeft = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionLeft = np.pad(predictionLeft, pad_width=((0,0),(0,0),(0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,shiftDistance:,:]
#print(predictionLeft.shape)
cropsShiftRight = tileImage(imgStackShiftRight, subImageNumber=subImageNumber)
cropsShiftRight = np.expand_dims(cropsShiftRight, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftRight, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionRight = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionRight = np.pad(predictionRight, pad_width=((0,0),(0,0),(shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,:(-1*shiftDistance),:]
#print(predictionRight.shape)
cropsShiftUp = tileImage(imgStackShiftUp, subImageNumber=subImageNumber)
#print(cropsShiftUp.shape)
cropsShiftUp = np.expand_dims(cropsShiftUp, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftUp, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionUp = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionUp = np.pad(predictionUp, pad_width=((0,0),(0,shiftDistance),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,shiftDistance:,:,:]
#print(predictionUp.shape)
cropsShiftDown = tileImage(imgStackShiftDown, subImageNumber=subImageNumber)
cropsShiftDown = np.expand_dims(cropsShiftDown, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftDown, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionDown = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionDown = np.pad(predictionDown, pad_width=((0,0),(shiftDistance,0),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:(-1*shiftDistance),:,:]
#print(predictionDown.shape)
allPredictions = np.stack((prediction, predictionExpand,
predictionUp, predictionDown,
predictionLeft, predictionRight), axis=-1)
return(allPredictions)
# takes initial U-net centroids for trap locations, and creats bounding boxes for each trap at the defined height and width
def get_frame_trap_bounding_boxes(trapLabels, trapProps, trapAreaThreshold=2000, trapWidth=27, trapHeight=256):
badTrapLabels = [reg.label for reg in trapProps if reg.area < trapAreaThreshold] # filter out small "trap" regions
goodTraps = trapLabels.copy()
for label in badTrapLabels:
goodTraps[goodTraps == label] = 0 # re-label bad traps as background (0)
goodTrapProps = measure.regionprops(goodTraps)
trapCentroids = [(int(np.round(reg.centroid[0])),int(np.round(reg.centroid[1]))) for reg in goodTrapProps] # get centroids as integers
trapBboxes = []
for centroid in trapCentroids:
rowIndex = centroid[0]
colIndex = centroid[1]
minRow = rowIndex-trapHeight//2
maxRow = rowIndex+trapHeight//2
minCol = colIndex-trapWidth//2
maxCol = colIndex+trapWidth//2
if trapWidth % 2 != 0:
maxCol += 1
coordArray = np.array([minRow,maxRow,minCol,maxCol])
# remove any traps at edges of image
if np.any(coordArray > goodTraps.shape[0]):
continue
if np.any(coordArray < 0):
continue
trapBboxes.append((minRow,minCol,maxRow,maxCol))
return(trapBboxes)
# this function performs image alignment as defined by the shifts passed as an argument
def crop_traps(fileNames, trapProps, labelledTraps, bboxesDict, trap_align_metadata):
frameNum = trap_align_metadata['frame_count']
channelNum = trap_align_metadata['plane_number']
trapImagesDict = {key:np.zeros((frameNum,
trap_align_metadata['trap_height'],
trap_align_metadata['trap_width'],
channelNum)) for key in bboxesDict}
trapClosedEndPxDict = {}
flipImageDict = {}
trapMask = labelledTraps
for frame in range(frameNum):
if (frame+1) % 20 == 0:
print("Cropping trap regions for frame number {} of {}.".format(frame+1, frameNum))
imgPath = os.path.join(params['experiment_directory'],params['image_directory'],fileNames[frame])
fullFrameImg = io.imread(imgPath)
if len(fullFrameImg.shape) == 3:
if fullFrameImg.shape[0] < 3: # for tifs with less than three imaging channels, the first dimension separates channels
fullFrameImg = np.transpose(fullFrameImg, (1,2,0))
trapClosedEndPxDict[fileNames[frame]] = {key:{} for key in bboxesDict.keys()}
for key in trapImagesDict.keys():
bbox = bboxesDict[key][frame]
trapImagesDict[key][frame,:,:,:] = fullFrameImg[bbox[0]:bbox[2],bbox[1]:bbox[3],:]
#tmpImg = np.reshape(fullFrameImg[trapMask==key], (trapHeight,trapWidth,channelNum))
if frame == 0:
medianProfile = np.median(trapImagesDict[key][frame,:,:,0],axis=1) # get intensity of middle column of trap
maxIntensityRow = np.argmax(medianProfile)
if maxIntensityRow > trap_align_metadata['trap_height']//2:
flipImageDict[key] = 0
else:
flipImageDict[key] = 1
if flipImageDict[key] == 1:
trapImagesDict[key][frame,:,:,:] = trapImagesDict[key][frame,::-1,:,:]
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[0]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[2]
else:
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[2]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[0]
continue
return(trapImagesDict, trapClosedEndPxDict)
# gets shifted bounding boxes to crop traps through time
def shift_bounding_boxes(bboxesDict, shifts, imgSize):
bboxesShiftDict = {}
for key in bboxesDict.keys():
bboxesShiftDict[key] = []
bboxes = bboxesDict[key]
for i in range(shifts.shape[0]):
if i == 0:
bboxesShiftDict[key].append(bboxes)
else:
minRow = bboxes[0]+shifts[i,0]
minCol = bboxes[1]+shifts[i,1]
maxRow = bboxes[2]+shifts[i,0]
maxCol = bboxes[3]+shifts[i,1]
bboxesShiftDict[key].append((minRow,
minCol,
maxRow,
maxCol))
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) < 0):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) > imgSize):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
return(bboxesShiftDict)
# finds the location of channels in a tif
def find_channel_locs(image_data):
'''Finds the location of channels from a phase contrast image. The channels are returned in
a dictionary where the key is the x position of the channel in pixel and the value is a
dicionary with the open and closed end in pixels in y.
Called by
mm3_Compile.get_tif_params
'''
# declare temp variables from yaml parameter dict.
chan_w = params['compile']['channel_width']
chan_sep = params['compile']['channel_separation']
crop_wp = int(params['compile']['channel_width_pad'] + chan_w/2)
chan_snr = params['compile']['channel_detection_snr']
# Detect peaks in the x projection (i.e. find the channels)
projection_x = image_data.sum(axis=0).astype(np.int32)
# find_peaks_cwt is a function which attempts to find the peaks in a 1-D array by
# convolving it with a wave. here the wave is the default Mexican hat wave
# but the minimum signal to noise ratio is specified
# *** The range here should be a parameter or changed to a fraction.
peaks = find_peaks_cwt(projection_x, np.arange(chan_w-5,chan_w+5), min_snr=chan_snr)
# If the left-most peak position is within half of a channel separation,
# discard the channel from the list.
if peaks[0] < (chan_sep / 2):
peaks = peaks[1:]
# If the diference between the right-most peak position and the right edge
# of the image is less than half of a channel separation, discard the channel.
if image_data.shape[1] - peaks[-1] < (chan_sep / 2):
peaks = peaks[:-1]
# Find the average channel ends for the y-projected image
projection_y = image_data.sum(axis=1)
# find derivative, must use int32 because it was unsigned 16b before.
proj_y_d = np.diff(projection_y.astype(np.int32))
# use the top third to look for closed end, is pixel location of highest deriv
onethirdpoint_y = int(projection_y.shape[0]/3.0)
default_closed_end_px = proj_y_d[:onethirdpoint_y].argmax()
# use bottom third to look for open end, pixel location of lowest deriv
twothirdpoint_y = int(projection_y.shape[0]*2.0/3.0)
default_open_end_px = twothirdpoint_y + proj_y_d[twothirdpoint_y:].argmin()
default_length = default_open_end_px - default_closed_end_px # used for checks
# go through peaks and assign information
# dict for channel dimensions
chnl_loc_dict = {}
# key is peak location, value is dict with {'closed_end_px': px, 'open_end_px': px}
for peak in peaks:
# set defaults
chnl_loc_dict[peak] = {'closed_end_px': default_closed_end_px,
'open_end_px': default_open_end_px}
# redo the previous y projection finding with just this channel
channel_slice = image_data[:, peak-crop_wp:peak+crop_wp]
slice_projection_y = channel_slice.sum(axis = 1)
slice_proj_y_d = np.diff(slice_projection_y.astype(np.int32))
slice_closed_end_px = slice_proj_y_d[:onethirdpoint_y].argmax()
slice_open_end_px = twothirdpoint_y + slice_proj_y_d[twothirdpoint_y:].argmin()
slice_length = slice_open_end_px - slice_closed_end_px
# check if these values make sense. If so, use them. If not, use default
# make sure lenght is not 30 pixels bigger or smaller than default
# *** This 15 should probably be a parameter or at least changed to a fraction.
if slice_length + 15 < default_length or slice_length - 15 > default_length:
continue
# make sure ends are greater than 15 pixels from image edge
if slice_closed_end_px < 15 or slice_open_end_px > image_data.shape[0] - 15:
continue
# if you made it to this point then update the entry
chnl_loc_dict[peak] = {'closed_end_px' : slice_closed_end_px,
'open_end_px' : slice_open_end_px}
return chnl_loc_dict
# make masks from initial set of images (same images as clusters)
def make_masks(analyzed_imgs):
'''
Make masks goes through the channel locations in the image metadata and builds a consensus
Mask for each image per fov, which it returns as dictionary named channel_masks.
The keys in this dictionary are fov id, and the values is a another dictionary. This dict's keys are channel locations (peaks) and the values is a [2][2] array:
[[minrow, maxrow],[mincol, maxcol]] of pixel locations designating the corner of each mask
for each channel on the whole image
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
information("Determining initial channel masks...")
# declare temp variables from yaml parameter dict.
crop_wp = int(params['compile']['channel_width_pad'] + params['compile']['channel_width']/2)
chan_lp = int(params['compile']['channel_length_pad'])
#intiaize dictionary
channel_masks = {}
# get the size of the images (hope they are the same)
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
image_rows = img_v['shape'][0] # x pixels
image_cols = img_v['shape'][1] # y pixels
break # just need one. using iteritems mean the whole dict doesn't load
# get the fov ids
fovs = []
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
if img_v['fov'] not in fovs:
fovs.append(img_v['fov'])
# max width and length across all fovs. channels will get expanded by these values
# this important for later updates to the masks, which should be the same
max_chnl_mask_len = 0
max_chnl_mask_wid = 0
# for each fov make a channel_mask dictionary from consensus mask
for fov in fovs:
# initialize a the dict and consensus mask
channel_masks_1fov = {} # dict which holds channel masks {peak : [[y1, y2],[x1,x2]],...}
consensus_mask = np.zeros([image_rows, image_cols]) # mask for labeling
# bring up information for each image
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
# skip this one if it is not of the current fov
if img_v['fov'] != fov:
continue
# for each channel in each image make a single mask
img_chnl_mask = np.zeros([image_rows, image_cols])
# and add the channel mask to it
for chnl_peak, peak_ends in six.iteritems(img_v['channels']):
# pull out the peak location and top and bottom location
# and expand by padding (more padding done later for width)
x1 = max(chnl_peak - crop_wp, 0)
x2 = min(chnl_peak + crop_wp, image_cols)
y1 = max(peak_ends['closed_end_px'] - chan_lp, 0)
y2 = min(peak_ends['open_end_px'] + chan_lp, image_rows)
# add it to the mask for this image
img_chnl_mask[y1:y2, x1:x2] = 1
# add it to the consensus mask
consensus_mask += img_chnl_mask
# Normalize concensus mask between 0 and 1.
consensus_mask = consensus_mask.astype('float32') / float(np.amax(consensus_mask))
# threshhold and homogenize each channel mask within the mask, label them
# label when value is above 0.1 (so 90% occupancy), transpose.
# the [0] is for the array ([1] is the number of regions)
# It transposes and then transposes again so regions are labeled left to right
# clear border it to make sure the channels are off the edge
consensus_mask = ndi.label(consensus_mask)[0]
# go through each label
for label in np.unique(consensus_mask):
if label == 0: # label zero is the background
continue
binary_core = consensus_mask == label
# clean up the rough edges
poscols = np.any(binary_core, axis = 0) # column positions where true (any)
posrows = np.any(binary_core, axis = 1) # row positions where true (any)
# channel_id givin by horizontal position
# this is important. later updates to the positions will have to check
# if their channels contain this median value to match up
channel_id = int(np.median(np.where(poscols)[0]))
# store the edge locations of the channel mask in the dictionary. Will be ints
min_row = np.min(np.where(posrows)[0])
max_row = np.max(np.where(posrows)[0])
min_col = np.min(np.where(poscols)[0])
max_col = np.max(np.where(poscols)[0])
# if the min/max cols are within the image bounds,
# add the mask, as 4 points, to the dictionary
if min_col > 0 and max_col < image_cols:
channel_masks_1fov[channel_id] = [[min_row, max_row], [min_col, max_col]]
# find the largest channel width and height while you go round
max_chnl_mask_len = int(max(max_chnl_mask_len, max_row - min_row))
max_chnl_mask_wid = int(max(max_chnl_mask_wid, max_col - min_col))
# add channel_mask dictionary to the fov dictionary, use copy to play it safe
channel_masks[fov] = channel_masks_1fov.copy()
# update all channel masks to be the max size
cm_copy = channel_masks.copy()
for fov, peaks in six.iteritems(channel_masks):
# f_id = int(fov)
for peak, chnl_mask in six.iteritems(peaks):
# p_id = int(peak)
# just add length to the open end (bottom of image, low column)
if chnl_mask[0][1] - chnl_mask[0][0] != max_chnl_mask_len:
cm_copy[fov][peak][0][1] = chnl_mask[0][0] + max_chnl_mask_len
# enlarge widths around the middle, but make sure you don't get floats
if chnl_mask[1][1] - chnl_mask[1][0] != max_chnl_mask_wid:
wid_diff = max_chnl_mask_wid - (chnl_mask[1][1] - chnl_mask[1][0])
if wid_diff % 2 == 0:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - wid_diff/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + wid_diff/2, image_cols - 1)
else:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - (wid_diff-1)/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + (wid_diff+1)/2, image_cols - 1)
# convert all values to ints
chnl_mask[0][0] = int(chnl_mask[0][0])
chnl_mask[0][1] = int(chnl_mask[0][1])
chnl_mask[1][0] = int(chnl_mask[1][0])
chnl_mask[1][1] = int(chnl_mask[1][1])
# cm_copy[fov][peak] = {'y_top': chnl_mask[0][0],
# 'y_bot': chnl_mask[0][1],
# 'x_left': chnl_mask[1][0],
# 'x_right': chnl_mask[1][1]}
# print(type(cm_copy[fov][peak][1][0]), cm_copy[fov][peak][1][0])
#save the channel mask dictionary to a pickle and a text file
# with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'wb') as cmask_file:
# pickle.dump(cm_copy, cmask_file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(params['ana_dir'], 'channel_masks.txt'), 'w') as cmask_file:
pprint(cm_copy, stream=cmask_file)
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'w') as cmask_file:
yaml.dump(data=cm_copy, stream=cmask_file, default_flow_style=False, tags=None)
information("Channel masks saved.")
return cm_copy
# get each fov_id, peak_id, frame's mask bounding box from bounding boxes arrived at by convolutional neural network
def make_channel_masks_CNN(bboxes_dict):
'''
The keys in this dictionary are peak_ids and the values of each is an array of shape (frameNumber,2,2):
Each frameNumber's 2x2 slice of the array represents the given peak_id's [[minrow, maxrow],[mincol, maxcol]].
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
# initialize the new channel_masks dict
channel_masks = {}
# reorder elements of tuples in bboxes_dict to match [[minrow, maxrow], [mincol, maxcol]] convention above
peak_ids = [peak_id for peak_id in bboxes_dict.keys()]
peak_ids.sort()
bbox_array = np.zeros((len(bboxes_dict[peak_ids[0]]),2,2), dtype='uint16')
for peak_id in peak_ids:
# get each frame's bounding boxes for the given peak_id
frame_bboxes = bboxes_dict[peak_id]
for frame_index in range(len(frame_bboxes)):
# replace the values in bbox_array with the proper ones from frame_bboxes
minrow = frame_bboxes[frame_index][0]
maxrow = frame_bboxes[frame_index][2]
mincol = frame_bboxes[frame_index][1]
maxcol = frame_bboxes[frame_index][3]
bbox_array[frame_index,0,0] = minrow
bbox_array[frame_index,0,1] = maxrow
bbox_array[frame_index,1,0] = mincol
bbox_array[frame_index,1,1] = maxcol
channel_masks[peak_id] = bbox_array
return(channel_masks)
### functions about trimming, padding, and manipulating images
# define function for flipping the images on an FOV by FOV basis
def fix_orientation(image_data):
'''
Fix the orientation. The standard direction for channels to open to is down.
called by
process_tif
get_params
'''
# user parameter indicates how things should be flipped
image_orientation = params['compile']['image_orientation']
# if this is just a phase image give in an extra layer so rest of code is fine
flat = False # flag for if the image is flat or multiple levels
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
flat = True
# setting image_orientation to 'auto' will use autodetection
if image_orientation == "auto":
# use 'phase_plane' to find the phase plane in image_data, assuming c1, c2, c3... naming scheme here.
try:
ph_channel = int(re.search('[0-9]', params['phase_plane']).group(0)) - 1
except:
# Pick the plane to analyze with the highest mean px value (should be phase)
ph_channel = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
# flip based on the index of the higest average row value
# this should be closer to the opening
if np.argmax(image_data[ph_channel].mean(axis = 1)) < image_data[ph_channel].shape[0] / 2:
image_data = image_data[:,::-1,:]
else:
pass # no need to do anything
# flip if up is chosen
elif image_orientation == "up":
return image_data[:,::-1,:]
# do not flip the images if "down is the specified image orientation"
elif image_orientation == "down":
pass
if flat:
image_data = image_data[0] # just return that first layer
return image_data
# cuts out channels from the image
def cut_slice(image_data, channel_loc):
'''Takes an image and cuts out the channel based on the slice location
slice location is the list with the peak information, in the form
[][y1, y2],[x1, x2]]. Returns the channel slice as a numpy array.
The numpy array will be a stack if there are multiple planes.
if you want to slice all the channels from a picture with the channel_masks
dictionary use a loop like this:
for channel_loc in channel_masks[fov_id]: # fov_id is the fov of the image
channel_slice = cut_slice[image_pixel_data, channel_loc]
# ... do something with the slice
NOTE: this function will try to determine what the shape of your
image is and slice accordingly. It expects the images are in the order
[t, x, y, c]. It assumes images with three dimensions are [x, y, c] not
[t, x, y].
'''
# case where image is in form [x, y]
if len(image_data.shape) == 2:
# make slice object
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1]]
# case where image is in form [x, y, c]
elif len(image_data.shape) == 3:
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# case where image in form [t, x , y, c]
elif len(image_data.shape) == 4:
channel_slicer = np.s_[:,channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# slice based on appropriate slicer object.
channel_slice = image_data[channel_slicer]
# pad y of channel if slice happened to be outside of image
y_difference = (channel_loc[0][1] - channel_loc[0][0]) - channel_slice.shape[1]
if y_difference > 0:
paddings = [[0, 0], # t
[0, y_difference], # y
[0, 0], # x
[0, 0]] # c
channel_slice = np.pad(channel_slice, paddings, mode='edge')
return channel_slice
# calculate cross correlation between pixels in channel stack
def channel_xcorr(fov_id, peak_id):
'''
Function calculates the cross correlation of images in a
stack to the first image in the stack. The output is an
array that is the length of the stack with the best cross
correlation between that image and the first image.
The very first value should be 1.
'''
pad_size = params['subtract']['alignment_pad']
# Use this number of images to calculate cross correlations
number_of_images = 20
# load the phase contrast images
image_data = load_stack(fov_id, peak_id, color=params['phase_plane'])
# if there are more images than number_of_images, use number_of_images images evenly
# spaced across the range
if image_data.shape[0] > number_of_images:
spacing = int(image_data.shape[0] / number_of_images)
image_data = image_data[::spacing,:,:]
if image_data.shape[0] > number_of_images:
image_data = image_data[:number_of_images,:,:]
# we will compare all images to this one, needs to be padded to account for image drift
first_img = np.pad(image_data[0,:,:], pad_size, mode='reflect')
xcorr_array = [] # array holds cross correlation vaues
for img in image_data:
# use match_template to find all cross correlations for the
# current image against the first image.
xcorr_array.append(np.max(match_template(first_img, img)))
return xcorr_array
### functions about subtraction
# average empty channels from stacks, making another TIFF stack
def average_empties_stack(fov_id, specs, color='c1', align=True):
'''Takes the fov file name and the peak names of the designated empties,
averages them and saves the image
Parameters
fov_id : int
FOV number
specs : dict
specifies whether a channel should be analyzed (1), used for making
an average empty (0), or ignored (-1).
color : string
Which plane to use.
align : boolean
Flag that is passed to the worker function average_empties, indicates
whether images should be aligned be for averaging (use False for fluorescent images)
Returns
True if succesful.
Saves empty stack to analysis folder
'''
information("Creating average empty channel for FOV %d." % fov_id)
# get peak ids of empty channels for this fov
empty_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 0: # 0 means it should be used for empty
empty_peak_ids.append(peak_id)
empty_peak_ids = sorted(empty_peak_ids) # sort for repeatability
# depending on how many empties there are choose what to do
# if there is no empty the user is going to have to copy another empty stack
if len(empty_peak_ids) == 0:
information("No empty channel designated for FOV %d." % fov_id)
return False
# if there is just one then you can just copy that channel
elif len(empty_peak_ids) == 1:
peak_id = empty_peak_ids[0]
information("One empty channel (%d) designated for FOV %d." % (peak_id, fov_id))
# load the one phase contrast as the empties
avg_empty_stack = load_stack(fov_id, peak_id, color=color)
# but if there is more than one empty you need to align and average them per timepoint
elif len(empty_peak_ids) > 1:
# load the image stacks into memory
empty_stacks = [] # list which holds phase image stacks of designated empties
for peak_id in empty_peak_ids:
# load data and append to list
image_data = load_stack(fov_id, peak_id, color=color)
empty_stacks.append(image_data)
information("%d empty channels designated for FOV %d." % (len(empty_stacks), fov_id))
# go through time points and create list of averaged empties
avg_empty_stack = [] # list will be later concatentated into numpy array
time_points = range(image_data.shape[0]) # index is time
for t in time_points:
# get images from one timepoint at a time and send to alignment and averaging
imgs = [stack[t] for stack in empty_stacks]
avg_empty = average_empties(imgs, align=align) # function is in mm3
avg_empty_stack.append(avg_empty)
# concatenate list and then save out to tiff stack
avg_empty_stack = np.stack(avg_empty_stack, axis=0)
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (fov_id, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute
h5ds.attrs.create('empty_channels', empty_peak_ids)
h5f.close()
information("Saved empty channel for FOV %d." % fov_id)
return True
# averages a list of empty channels
def average_empties(imgs, align=True):
'''
This function averages a set of images (empty channels) and returns a single image
of the same size. It first aligns the images to the first image before averaging.
Alignment is done by enlarging the first image using edge padding.
Subsequent images are then aligned to this image and the offset recorded.
These images are padded such that they are the same size as the first (padded) image but
with the image in the correct (aligned) place. Edge padding is again used.
The images are then placed in a stack and aveaged. This image is trimmed so it is the size
of the original images
Called by
average_empties_stack
'''
aligned_imgs = [] # list contains the aligned, padded images
if align:
# pixel size to use for padding (ammount that alignment could be off)
pad_size = params['subtract']['alignment_pad']
for n, img in enumerate(imgs):
# if this is the first image, pad it and add it to the stack
if n == 0:
ref_img = np.pad(img, pad_size, mode='reflect') # padded reference image
aligned_imgs.append(ref_img)
# otherwise align this image to the first padded image
else:
# find correlation between a convolution of img against the padded reference
match_result = match_template(ref_img, img)
# find index of highest correlation (relative to top left corner of img)
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad img so it aligns and is the same size as reference image
pad_img = np.pad(img, ((y, ref_img.shape[0] - (y + img.shape[0])),
(x, ref_img.shape[1] - (x + img.shape[1]))), mode='reflect')
aligned_imgs.append(pad_img)
else:
# don't align, just link the names to go forward easily
aligned_imgs = imgs
# stack the aligned data along 3rd axis
aligned_imgs = np.dstack(aligned_imgs)
# get a mean image along 3rd axis
avg_empty = np.nanmean(aligned_imgs, axis=2)
# trim off the padded edges (only if images were alinged, otherwise there was no padding)
if align:
avg_empty = avg_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
# change type back to unsigned 16 bit not floats
avg_empty = avg_empty.astype(dtype='uint16')
return avg_empty
# this function is used when one FOV doesn't have an empty
def copy_empty_stack(from_fov, to_fov, color='c1'):
'''Copy an empty stack from one FOV to another'''
# load empty stack from one FOV
information('Loading empty stack from FOV {} to save for FOV {}.'.format(from_fov, to_fov))
avg_empty_stack = load_stack(from_fov, 0, color='empty_{}'.format(color))
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (to_fov, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % to_fov), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute. Just put 0
h5ds.attrs.create('empty_channels', [0])
h5f.close()
information("Saved empty channel for FOV %d." % to_fov)
# Do subtraction for an fov over many timepoints
def subtract_fov_stack(fov_id, specs, color='c1', method='phase'):
'''
For a given FOV, loads the precomputed empty stack and does subtraction on
all peaks in the FOV designated to be analyzed
Parameters
----------
color : string, 'c1', 'c2', etc.
This is the channel to subtraction. will be appended to the word empty.
Called by
mm3_Subtract.py
Calls
mm3.subtract_phase
'''
information('Subtracting peaks for FOV %d.' % fov_id)
# load empty stack feed dummy peak number to get empty
avg_empty_stack = load_stack(fov_id, 0, color='empty_{}'.format(color))
# determine which peaks are to be analyzed
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 0 means it should be used for empty, -1 is ignore
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information("Subtracting %d channels for FOV %d." % (len(ana_peak_ids), fov_id))
# just break if there are to peaks to analize
if not ana_peak_ids:
return False
# load images for the peak and get phase images
for peak_id in ana_peak_ids:
information('Subtracting peak %d.' % peak_id)
image_data = load_stack(fov_id, peak_id, color=color)
# make a list for all time points to send to a multiprocessing pool
# list will length of image_data with tuples (image, empty)
subtract_pairs = zip(image_data, avg_empty_stack)
# # set up multiprocessing pool to do subtraction. Should wait until finished
# pool = Pool(processes=params['num_analyzers'])
# if method == 'phase':
# subtracted_imgs = pool.map(subtract_phase, subtract_pairs, chunksize=10)
# elif method == 'fluor':
# subtracted_imgs = pool.map(subtract_fluor, subtract_pairs, chunksize=10)
# pool.close() # tells the process nothing more will be added.
# pool.join() # blocks script until everything has been processed and workers exit
# linear loop for debug
subtracted_imgs = [subtract_phase(subtract_pair) for subtract_pair in subtract_pairs]
# stack them up along a time axis
subtracted_stack = np.stack(subtracted_imgs, axis=0)
# save out the subtracted stack
if params['output'] == 'TIFF':
sub_filename = params['experiment_name'] + '_xy%03d_p%04d_sub_%s.tif' % (fov_id, peak_id, color)
tiff.imsave(os.path.join(params['sub_dir'],sub_filename), subtracted_stack, compress=4) # save it
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(subtracted_stack, name='Subtracted' + '_xy1_p'+str(peak_id)+'_sub_'+str(color)+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put subtracted channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_sub_%s' % (peak_id, color) in h5g:
del h5g['p%04d_sub_%s' % (peak_id, color)]
h5ds = h5g.create_dataset(u'p%04d_sub_%s' % (peak_id, color),
data=subtracted_stack,
chunks=(1, subtracted_stack.shape[1], subtracted_stack.shape[2]),
maxshape=(None, subtracted_stack.shape[1], subtracted_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
information("Saved subtracted channel %d." % peak_id)
if params['output'] == 'HDF5':
h5f.close()
return True
# subtracts one phase contrast image from another.
def subtract_phase(image_pair):
'''subtract_phase aligns and subtracts a .
Modified from subtract_phase_only by jt on 20160511
The subtracted image returned is the same size as the image given. It may however include
data points around the edge that are meaningless but not marked.
We align the empty channel to the phase channel, then subtract.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# this is for aligning the empty channel to the cell channel.
### Pad cropped channel.
pad_size = params['subtract']['alignment_pad'] # pixel size to use for padding (ammount that alignment could be off)
padded_chnl = np.pad(cropped_channel, pad_size, mode='reflect')
# ### Align channel to empty using match template.
# use match template to get a correlation array and find the position of maximum overlap
match_result = match_template(padded_chnl, empty_channel)
# get row and colum of max correlation value in correlation array
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad the empty channel according to alignment to be overlayed on padded channel.
empty_paddings = [[y, padded_chnl.shape[0] - (y + empty_channel.shape[0])],
[x, padded_chnl.shape[1] - (x + empty_channel.shape[1])]]
aligned_empty = np.pad(empty_channel, empty_paddings, mode='reflect')
# now trim it off so it is the same size as the original channel
aligned_empty = aligned_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = aligned_empty.astype('int32') - cropped_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0. This is what Sattar does
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
# subtract one fluorescence image from another.
def subtract_fluor(image_pair):
''' subtract_fluor does a simple subtraction of one image to another. Unlike subtract_phase,
there is no alignment. Also, the empty channel is subtracted from the full channel.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image.
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# check frame size of cropped channel and background, always keep crop channel size the same
crop_size = np.shape(cropped_channel)[:2]
empty_size = np.shape(empty_channel)[:2]
if crop_size != empty_size:
if crop_size[0] > empty_size[0] or crop_size[1] > empty_size[1]:
pad_row_length = max(crop_size[0] - empty_size[0], 0) # prevent negatives
pad_column_length = max(crop_size[1] - empty_size[1], 0)
empty_channel = np.pad(empty_channel,
[[np.int(.5*pad_row_length), pad_row_length-np.int(.5*pad_row_length)],
[np.int(.5*pad_column_length), pad_column_length-np.int(.5*pad_column_length)],
[0,0]], 'edge')
# mm3.information('size adjusted 1')
empty_size = np.shape(empty_channel)[:2]
if crop_size[0] < empty_size[0] or crop_size[1] < empty_size[1]:
empty_channel = empty_channel[:crop_size[0], :crop_size[1],]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = cropped_channel.astype('int32') - empty_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0.
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
### functions that deal with segmentation and lineages
# Do segmentation for an channel time stack
def segment_chnl_stack(fov_id, peak_id):
'''
For a given fov and peak (channel), do segmentation for all images in the
subtracted .tif stack.
Called by
mm3_Segment.py
Calls
mm3.segment_image
'''
information('Segmenting FOV %d, channel %d.' % (fov_id, peak_id))
# load subtracted images
sub_stack = load_stack(fov_id, peak_id, color='sub_{}'.format(params['phase_plane']))
# set up multiprocessing pool to do segmentation. Will do everything before going on.
#pool = Pool(processes=params['num_analyzers'])
# send the 3d array to multiprocessing
#segmented_imgs = pool.map(segment_image, sub_stack, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# image by image for debug
segmented_imgs = []
for sub_image in sub_stack:
segmented_imgs.append(segment_image(sub_image))
# stack them up along a time axis
segmented_imgs = np.stack(segmented_imgs, axis=0)
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stack
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'],seg_filename),
segmented_imgs, compress=5)
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(segmented_imgs, name='Segmented' + '_xy1_p'+str(peak_id)+'_sub_'+str(params['seg_img'])+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
information("Saved segmented channel %d." % peak_id)
return True
# segmentation algorithm
def segment_image(image):
'''Segments a subtracted image and returns a labeled image
Parameters
image : a ndarray which is an image. This should be the subtracted image
Returns
labeled_image : a ndarray which is also an image. Labeled values, which
should correspond to cells, all have the same integer value starting with 1.
Non labeled area should have value zero.
'''
# load in segmentation parameters
OTSU_threshold = params['segment']['otsu']['OTSU_threshold']
first_opening_size = params['segment']['otsu']['first_opening_size']
distance_threshold = params['segment']['otsu']['distance_threshold']
second_opening_size = params['segment']['otsu']['second_opening_size']
min_object_size = params['segment']['otsu']['min_object_size']
# threshold image
try:
thresh = threshold_otsu(image) # finds optimal OTSU threshhold value
except:
return np.zeros_like(image)
threshholded = image > OTSU_threshold*thresh # will create binary image
# if there are no cells, good to clear the border
# because otherwise the OTSU is just for random bullshit, most
# likely on the side of the image
threshholded = segmentation.clear_border(threshholded)
# Opening = erosion then dialation.
# opening smooths images, breaks isthmuses, and eliminates protrusions.
# "opens" dark gaps between bright features.
morph = morphology.binary_opening(threshholded, morphology.disk(first_opening_size))
# if this image is empty at this point (likely if there were no cells), just return
# zero array
if np.amax(morph) == 0:
return np.zeros_like(image)
### Calculate distance matrix, use as markers for random walker (diffusion watershed)
# Generate the markers based on distance to the background
distance = ndi.distance_transform_edt(morph)
# threshold distance image
distance_thresh = np.zeros_like(distance)
distance_thresh[distance < distance_threshold] = 0
distance_thresh[distance >= distance_threshold] = 1
# do an extra opening on the distance
distance_opened = morphology.binary_opening(distance_thresh,
morphology.disk(second_opening_size))
# remove artifacts connected to image border
cleared = segmentation.clear_border(distance_opened)
# remove small objects. Remove small objects wants a
# labeled image and will fail if there is only one label. Return zero image in that case
# could have used try/except but remove_small_objects loves to issue warnings.
cleared, label_num = morphology.label(cleared, connectivity=1, return_num=True)
if label_num > 1:
cleared = morphology.remove_small_objects(cleared, min_size=min_object_size)
else:
# if there are no labels, then just return the cleared image as it is zero
return np.zeros_like(image)
# relabel now that small objects and labels on edges have been cleared
markers = morphology.label(cleared, connectivity=1)
# just break if there is no label
if np.amax(markers) == 0:
return np.zeros_like(image)
# the binary image for the watershed, which uses the unmodified OTSU threshold
threshholded_watershed = threshholded
threshholded_watershed = segmentation.clear_border(threshholded_watershed)
# label using the random walker (diffusion watershed) algorithm
try:
# set anything outside of OTSU threshold to -1 so it will not be labeled
markers[threshholded_watershed == 0] = -1
# here is the main algorithm
labeled_image = segmentation.random_walker(-1*image, markers)
# put negative values back to zero for proper image
labeled_image[labeled_image == -1] = 0
except:
return np.zeros_like(image)
return labeled_image
# loss functions for model
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
def tversky_loss(y_true, y_pred):
alpha = 0.5
beta = 0.5
ones = K.ones((512,512,3)) #K.ones(K.shape(y_true))
p0 = y_pred # proba that voxels are class i
p1 = ones-y_pred # proba that voxels are not class i
g0 = y_true
g1 = ones-y_true
num = K.sum(p0*g0, (0,1,2))
den = num + alpha*K.sum(p0*g1,(0,1,2)) + beta*K.sum(p1*g0,(0,1,2))
T = K.sum(num/den) # when summing over classes, T has dynamic range [0 Ncl]
Ncl = K.cast(K.shape(y_true)[-1], 'float32')
return Ncl-T
def cce_tversky_loss(y_true, y_pred):
loss = losses.categorical_crossentropy(y_true, y_pred) + tversky_loss(y_true, y_pred)
return loss
def get_pad_distances(unet_shape, img_height, img_width):
'''Finds padding and trimming sizes to make the input image the same as the size expected by the U-net model.
Padding is done evenly to the top and bottom of the image. Trimming is only done from the right or bottom.
'''
half_width_pad = (unet_shape[1]-img_width)/2
if half_width_pad > 0:
left_pad = int(np.floor(half_width_pad))
right_pad = int(np.ceil(half_width_pad))
right_trim = 0
else:
left_pad = 0
right_pad = 0
right_trim = img_width - unet_shape[1]
half_height_pad = (unet_shape[0]-img_height)/2
if half_height_pad > 0:
top_pad = int(np.floor(half_height_pad))
bottom_pad = int(np.ceil(half_height_pad))
bottom_trim = 0
else:
top_pad = 0
bottom_pad = 0
bottom_trim = img_height - unet_shape[0]
pad_dict = {'top_pad' : top_pad,
'bottom_pad' : bottom_pad,
'right_pad' : right_pad,
'left_pad' : left_pad,
'bottom_trim' : bottom_trim,
'right_trim' : right_trim}
return pad_dict
#@profile
def segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
batch_size = params['segment']['batch_size']
cellClassThreshold = params['segment']['cell_class_threshold']
if cellClassThreshold == 'None': # yaml imports None as a string
cellClassThreshold = False
min_object_size = params['segment']['min_object_size']
# arguments to data generator
# data_gen_args = {'batch_size':batch_size,
# 'n_channels':1,
# 'normalize_to_one':False,
# 'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=True,
workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting peak {}.'.format(peak_id))
img_stack = load_stack(fov_id, peak_id, color=params['phase_plane'])
if params['segment']['normalize_to_one']:
med_stack = np.zeros(img_stack.shape)
selem = morphology.disk(1)
for frame_idx in range(img_stack.shape[0]):
tmpImg = img_stack[frame_idx,...]
med_stack[frame_idx,...] = median(tmpImg, selem)
# robust normalization of peak's image stack to 1
max_val = np.max(med_stack)
img_stack = img_stack/max_val
img_stack[img_stack > 1] = 1
# trim and pad image to correct size
img_stack = img_stack[:, :unet_shape[0], :unet_shape[1]]
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1) # TF expects images to be 4D
# set up image generator
# image_generator = CellSegmentationDataGenerator(img_stack, **data_gen_args)
image_datagen = ImageDataGenerator()
image_generator = image_datagen.flow(x=img_stack,
batch_size=batch_size,
shuffle=False) # keep same order
# predict cell locations. This has multiprocessing built in but I need to mess with the parameters to see how to best utilize it. ***
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
# pad back incase the image had been trimmed
predictions = np.pad(predictions,
((0,0),
(0,pad_dict['bottom_trim']),
(0,pad_dict['right_trim'])),
mode='constant')
if params['segment']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['pred_dir']):
os.makedirs(params['pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if cellClassThreshold:
predictions[predictions >= cellClassThreshold] = 1
predictions[predictions < cellClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=1)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
#@profile
def segment_fov_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
#ana_peak_ids = ana_peak_ids[:2]
segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return
def segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
# batch_size = params['foci']['batch_size']
focusClassThreshold = params['foci']['focus_threshold']
if focusClassThreshold == 'None': # yaml imports None as a string
focusClassThreshold = False
# arguments to data generator
data_gen_args = {'batch_size':params['foci']['batch_size'],
'n_channels':1,
'normalize_to_one':False,
'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=False,
# workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting foci in peak {}.'.format(peak_id))
# print(peak_id) # debugging a shape error at some traps
img_stack = load_stack(fov_id, peak_id, color=params['foci']['foci_plane'])
# pad image to correct size
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1)
# set up image generator
image_generator = FocusSegmentationDataGenerator(img_stack, **data_gen_args)
# predict foci locations.
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
if params['foci']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['foci_pred_dir']):
os.makedirs(params['foci_pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['foci_pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if focusClassThreshold:
predictions[predictions >= focusClassThreshold] = 1
predictions[predictions < focusClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
# predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
# predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=2)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['foci_seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
def segment_fov_foci_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
# find padding and trimming distances
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# timepoints = img_stack.shape[0]
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
k = segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return(k)
# class for image generation for predicting cell locations in phase-contrast images
class CellSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
if tmpImg.dtype=="uint16":
tmpImg = tmpImg / 2**16 * 2**8
tmpImg = tmpImg.astype('uint8')
if self.normalize_to_one:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
medImg = median(tmpImg, self.selem)
tmpImg = tmpImg/np.max(medImg)
tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
return (X)
class TemporalCellDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
fileName,
batch_size=32,
dim=(32,32,32),
n_channels=1,
n_classes=10,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.fileName = fileName
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.batch_size / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate data
X = self.__data_generation()
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
pass
def __data_generation(self):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], self.n_channels))
full_stack = io.imread(self.fileName)
if full_stack.dtype=="uint16":
full_stack = full_stack / 2**16 * 2**8
full_stack = full_stack.astype('uint8')
img_height = full_stack.shape[1]
img_width = full_stack.shape[2]
pad_dict = get_pad_distances(self.dim, img_height, img_width)
full_stack = np.pad(full_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])
),
mode='constant')
full_stack = full_stack.transpose(1,2,0)
# Generate data
for i in range(self.batch_size):
if i == 0:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,0,0] = full_stack[:,:,0]
for j in range(1,self.dim[2]):
tmpImg[:,:,j,0] = full_stack[:,:,j]
elif i == (self.batch_size - 1):
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,-1,0] = full_stack[:,:,-1]
for j in range(self.dim[2]-1):
tmpImg[:,:,j,0] = full_stack[:,:,j]
else:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,:,0] = full_stack[:,:,(i-1):(i+2)]
X[i,:,:,:,:] = tmpImg
return X
# class for image generation for predicting cell locations in phase-contrast images
class FocusSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels), 'uint16')
if self.normalize_to_one:
max_pixels = []
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
if self.normalize_to_one:
# tmpMedian = filters.median(tmpImg, self.selem)
tmpMax = np.max(tmpImg)
max_pixels.append(tmpMax)
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
# if tmpImg.dtype=="uint16":
# tmpImg = tmpImg / 2**16 * 2**8
# tmpImg = tmpImg.astype('uint8')
# if self.normalize_to_one:
# with warnings.catch_warnings():
# warnings.simplefilter('ignore')
# medImg = median(tmpImg, self.selem)
# tmpImg = tmpImg/np.max(medImg)
# tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
if self.normalize_to_one:
channel_max = np.max(max_pixels) / (2**8 - 1)
# print("Channel max: {}".format(channel_max))
# print("Array max: {}".format(np.max(X)))
X = X/channel_max
# print("Normalized array max: {}".format(np.max(X)))
X[X > 1] = 1
return (X)
# class for image generation for predicting trap locations in phase-contrast images
class TrapSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, img_array, batch_size=32,
n_channels=1, normalize_to_one=False, shuffle=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.img_number = img_array.shape[0]
self.img_array = img_array
self.batch_size = batch_size
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(3)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.img_number / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
if self.normalize_to_one:
medImg = median(tmpImg, self.selem)
tmpImg = medImg/np.max(medImg)
X[i,:,:,0] = tmpImg
return (X)
# class for image generation for classifying traps as good, empty, out-of-focus, or defective
class TrapKymographPredictionDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, list_fileNames, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=10, shuffle=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_fileNames = list_fileNames
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.list_fileNames) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_fileNames_temp = [self.list_fileNames[k] for k in indexes]
# Generate data
X = self.__data_generation(list_fileNames_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_fileNames))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_fileNames_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i, fName in enumerate(list_fileNames_temp):
# Store sample
tmpImg = io.imread(fName)
tmpImgShape = tmpImg.shape
if tmpImgShape[0] < self.dim[0]:
t_end = tmpImgShape[0]
else:
t_end = self.dim[0]
X[i,:t_end,:,:] = np.expand_dims(tmpImg[:t_end,:,tmpImg.shape[-1]//2], axis=-1)
return X
def absolute_diff(y_true, y_pred):
y_true_sum = K.sum(y_true)
y_pred_sum = K.sum(y_pred)
diff = K.abs(y_pred_sum - y_true_sum)/tf.to_float(tf.size(y_true))
return diff
def all_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def absolute_dice_loss(y_true, y_pred):
loss = dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def f2_m(y_true, y_pred, beta=2):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
def f_precision_m(y_true, y_pred, beta=0.5):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
# finds lineages for all peaks in a fov
def make_lineages_fov(fov_id, specs):
'''
For a given fov, create the lineages from the segmented images.
Called by
mm3_Segment.py
Calls
mm3.make_lineage_chnl_stack
'''
ana_peak_ids = [] # channels to be analyzed
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 1 means analyze
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information('Creating lineage for FOV %d with %d channels.' % (fov_id, len(ana_peak_ids)))
# just break if there are no peaks to analize
if not ana_peak_ids:
# returning empty dictionary will add nothing to current cells dictionary
return {}
# This is a list of tuples (fov_id, peak_id) to send to the Pool command
fov_and_peak_ids_list = [(fov_id, peak_id) for peak_id in ana_peak_ids]
# set up multiprocessing pool. will complete pool before going on
#pool = Pool(processes=params['num_analyzers'])
# create the lineages for each peak individually
# the output is a list of dictionaries
#lineages = pool.map(make_lineage_chnl_stack, fov_and_peak_ids_list, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# This is the non-parallelized version (useful for debug)
lineages = []
for fov_and_peak_ids in fov_and_peak_ids_list:
lineages.append(make_lineage_chnl_stack(fov_and_peak_ids))
# combine all dictionaries into one dictionary
Cells = {} # create dictionary to hold all information
for cell_dict in lineages: # for all the other dictionaries in the list
Cells.update(cell_dict) # updates Cells with the entries in cell_dict
return Cells
# get number of cells in each frame and total number of pairwise interactions
def get_cell_counts(regionprops_list):
cell_count_list = [len(time_regions) for time_regions in regionprops_list]
interaction_count_list = []
for i,cell_count in enumerate(cell_count_list):
if i+1 == len(cell_count_list):
break
interaction_count_list.append(cell_count*cell_count_list[i+1])
total_cells = np.sum(cell_count_list)
total_interactions = np.sum(interaction_count_list)
return(total_cells, total_interactions, cell_count_list, interaction_count_list)
# get cells' information for track prediction
def gather_interactions_and_events(regionprops_list):
total_cells, total_interactions, cell_count_list, interaction_count_list = get_cell_counts(regionprops_list)
# instantiate an array with a 2x4 array for each pair of cells'
# min_y, max_y, centroid_y, and area
# in reality it would be much, much more efficient to
# look this information up in the data generator at run time
# for now, this will work
pairwise_cell_data = np.zeros((total_interactions,2,5,1))
# make a dictionary, the keys of which will be row indices so that we
# can quickly look up which timepoints/cells correspond to which
# rows of our model's ouput
pairwise_cell_lookup = {}
# populate arrays
interaction_count = 0
cell_count = 0
for frame, frame_regions in enumerate(regionprops_list):
for region in frame_regions:
cell_label = region.label
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
area = region.area
cell_label = region.label
cell_info = (min_y, max_y, y, area, orientation)
cell_count += 1
try:
frame_plus_one_regions = regionprops_list[frame+1]
except IndexError as e:
# print(e)
break
for region_plus_one in frame_plus_one_regions:
paired_cell_label = region_plus_one.label
y,x = region_plus_one.centroid
bbox = region_plus_one.bbox
min_y = bbox[0]
max_y = bbox[2]
area = region_plus_one.area
paired_cell_label = region_plus_one.label
pairwise_cell_data[interaction_count,0,:,0] = cell_info
pairwise_cell_data[interaction_count,1,:,0] = (min_y, max_y, y, area, orientation)
pairwise_cell_lookup[interaction_count] = {'frame':frame, 'cell_label':cell_label, 'paired_cell_label':paired_cell_label}
interaction_count += 1
return(pairwise_cell_data, pairwise_cell_lookup)
# look up which cells are interacting according to the track model
def cell_interaction_lookup(predictions, lookup_table):
'''
Accepts prediction matrix and
'''
frame = []
cell_label = []
paired_cell_label = []
interaction_type = []
# loop over rows of predictions
for row_index in range(predictions.shape[0]):
row_predictions = predictions[row_index]
row_relationship = np.where(row_predictions > 0.95)[0]
if row_relationship.size == 0:
continue
elif row_relationship[0] == 3:
continue
elif row_relationship[0] == 0:
interaction_type.append('migration')
elif row_relationship[0] == 1:
interaction_type.append('child')
elif row_relationship[0] == 2:
interaction_type.append('false_join')
frame.append(lookup_table[row_index]['frame'])
cell_label.append(lookup_table[row_index]['cell_label'])
paired_cell_label.append(lookup_table[row_index]['paired_cell_label'])
track_df = pd.DataFrame(data={'frame':frame,
'cell_label':cell_label,
'paired_cell_label':paired_cell_label,
'interaction_type':interaction_type})
return(track_df)
def get_tracking_model_dict():
model_dict = {}
if not 'migrate_model' in model_dict:
model_dict['migrate_model'] = models.load_model(params['tracking']['migrate_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'child_model' in model_dict:
model_dict['child_model'] = models.load_model(params['tracking']['child_model'],
custom_objects={'bce_dice_loss':bce_dice_loss,
'f2_m':f2_m})
if not 'appear_model' in model_dict:
model_dict['appear_model'] = models.load_model(params['tracking']['appear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'die_model' in model_dict:
model_dict['die_model'] = models.load_model(params['tracking']['die_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'disappear_model' in model_dict:
model_dict['disappear_model'] = models.load_model(params['tracking']['disappear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'born_model' in model_dict:
model_dict['born_model'] = models.load_model(params['tracking']['born_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
# if not 'zero_cell_model' in model_dict:
# model_dict['zero_cell_model'] = models.load_model(params['tracking']['zero_cell_model'],
# custom_objects={'absolute_dice_loss':absolute_dice_loss,
# 'f2_m':f2_m})
# if not 'one_cell_model' in model_dict:
# model_dict['one_cell_model'] = models.load_model(params['tracking']['one_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
# if not 'two_cell_model' in model_dict:
# model_dict['two_cell_model'] = models.load_model(params['tracking']['two_cell_model'],
# custom_objects={'all_loss':all_loss,
# 'f2_m':f2_m})
# if not 'geq_three_cell_model' in model_dict:
# model_dict['geq_three_cell_model'] = models.load_model(params['tracking']['geq_three_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
return(model_dict)
# Creates lineage for a single channel
def make_lineage_chnl_stack(fov_and_peak_id):
'''
Create the lineage for a set of segmented images for one channel. Start by making the regions in the first time points potenial cells. Go forward in time and map regions in the timepoint to the potential cells in previous time points, building the life of a cell. Used basic checks such as the regions should overlap, and grow by a little and not shrink too much. If regions do not link back in time, discard them. If two regions map to one previous region, check if it is a sensible division event.
Parameters
----------
fov_and_peak_ids : tuple.
(fov_id, peak_id)
Returns
-------
Cells : dict
A dictionary of all the cells from this lineage, divided and undivided
'''
# load in parameters
# if leaf regions see no action for longer than this, drop them
lost_cell_time = params['track']['lost_cell_time']
# only cells with y positions below this value will recieve the honor of becoming new
# cells, unless they are daughters of current cells
new_cell_y_cutoff = params['track']['new_cell_y_cutoff']
# only regions with labels less than or equal to this value will be considered to start cells
new_cell_region_cutoff = params['track']['new_cell_region_cutoff']
# get the specific ids from the tuple
fov_id, peak_id = fov_and_peak_id
# start time is the first time point for this series of TIFFs.
start_time_index = min(params['time_table'][fov_id].keys())
information('Creating lineage for FOV %d, channel %d.' % (fov_id, peak_id))
# load segmented data
image_data_seg = load_stack(fov_id, peak_id, color=params['track']['seg_img'])
# image_data_seg = load_stack(fov_id, peak_id, color='seg')
# Calculate all data for all time points.
# this list will be length of the number of time points
regions_by_time = [regionprops(label_image=timepoint) for timepoint in image_data_seg] # removed coordinates='xy'
# Set up data structures.
Cells = {} # Dict that holds all the cell objects, divided and undivided
cell_leaves = [] # cell ids of the current leaves of the growing lineage tree
# go through regions by timepoint and build lineages
# timepoints start with the index of the first image
for t, regions in enumerate(regions_by_time, start=start_time_index):
# if there are cell leaves who are still waiting to be linked, but
# too much time has passed, remove them.
for leaf_id in cell_leaves:
if t - Cells[leaf_id].times[-1] > lost_cell_time:
cell_leaves.remove(leaf_id)
# make all the regions leaves if there are no current leaves
if not cell_leaves:
for region in regions:
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
# Create cell and put in cell dictionary
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
# add thes id to list of current leaves
cell_leaves.append(cell_id)
# Determine if the regions are children of current leaves
else:
### create mapping between regions and leaves
leaf_region_map = {}
leaf_region_map = {leaf_id : [] for leaf_id in cell_leaves}
# get the last y position of current leaves and create tuple with the id
current_leaf_positions = [(leaf_id, Cells[leaf_id].centroids[-1][0]) for leaf_id in cell_leaves]
# go through regions, they will come off in Y position order
for r, region in enumerate(regions):
# create tuple which is cell_id of closest leaf, distance
current_closest = (None, float('inf'))
# check this region against all positions of all current leaf regions,
# find the closest one in y.
for leaf in current_leaf_positions:
# calculate distance between region and leaf
y_dist_region_to_leaf = abs(region.centroid[0] - leaf[1])
# if the distance is closer than before, update
if y_dist_region_to_leaf < current_closest[1]:
current_closest = (leaf[0], y_dist_region_to_leaf)
# update map with the closest region
leaf_region_map[current_closest[0]].append((r, y_dist_region_to_leaf))
# go through the current leaf regions.
# limit by the closest two current regions if there are three regions to the leaf
for leaf_id, region_links in six.iteritems(leaf_region_map):
if len(region_links) > 2:
closest_two_regions = sorted(region_links, key=lambda x: x[1])[:2]
# but sort by region order so top region is first
closest_two_regions = sorted(closest_two_regions, key=lambda x: x[0])
# replace value in dictionary
leaf_region_map[leaf_id] = closest_two_regions
# for the discarded regions, put them as new leaves
# if they are near the closed end of the channel
discarded_regions = sorted(region_links, key=lambda x: x[1])[2:]
for discarded_region in discarded_regions:
region = regions[discarded_region[0]]
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
else:
# since the regions are ordered, none of the remaining will pass
break
### iterate over the leaves, looking to see what regions connect to them.
for leaf_id, region_links in six.iteritems(leaf_region_map):
# if there is just one suggested descendant,
# see if it checks out and append the data
if len(region_links) == 1:
region = regions[region_links[0][0]] # grab the region from the list
# check if the pairing makes sense based on size and position
# this function returns true if things are okay
if check_growth_by_region(Cells[leaf_id], region):
# grow the cell by the region in this case
Cells[leaf_id].grow(region, t)
# there may be two daughters, or maybe there is just one child and a new cell
elif len(region_links) == 2:
# grab these two daughters
region1 = regions[region_links[0][0]]
region2 = regions[region_links[1][0]]
# check_division returns 3 if cell divided,
# 1 if first region is just the cell growing and the second is trash
# 2 if the second region is the cell, and the first is trash
# or 0 if it cannot be determined.
check_division_result = check_division(Cells[leaf_id], region1, region2)
if check_division_result == 3:
# create two new cells and divide the mother
daughter1_id = create_cell_id(region1, t, peak_id, fov_id)
daughter2_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[daughter1_id] = Cell(daughter1_id, region1, t,
parent_id=leaf_id)
Cells[daughter2_id] = Cell(daughter2_id, region2, t,
parent_id=leaf_id)
Cells[leaf_id].divide(Cells[daughter1_id], Cells[daughter2_id], t)
# remove mother from current leaves
cell_leaves.remove(leaf_id)
# add the daughter ids to list of current leaves if they pass cutoffs
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_leaves.append(daughter1_id)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_leaves.append(daughter2_id)
# 1 means that daughter 1 is just a continuation of the mother
# The other region should be a leaf it passes the requirements
elif check_division_result == 1:
Cells[leaf_id].grow(region1, t)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region2, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# ditto for 2
elif check_division_result == 2:
Cells[leaf_id].grow(region2, t)
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region1, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region1, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# return the dictionary with all the cells
return Cells
### Cell class and related functions
# this is the object that holds all information for a detection
class Detection():
'''
The Detection is a single detection in a single frame.
'''
# initialize (birth) the cell
def __init__(self, detection_id, region, t):
'''The detection must be given a unique detection_id and passed the region
information from the segmentation
Parameters
__________
detection_id : str
detection_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point
r is region label for that segmentation
Use the function create_detection_id to return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
'''
# create all the attributes
# id
self.id = detection_id
# identification convenience
self.fov = int(detection_id.split('f')[1].split('p')[0])
self.peak = int(detection_id.split('p')[1].split('t')[0])
self.t = t
self.cell_count = 1
# self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
if region is not None:
self.label = region.label
self.bbox = region.bbox
self.area = region.area
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.length = length_tmp
self.width = width_tmp
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = (length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 + (4/3) * np.pi * (width_tmp/2)**3
# angle of the fit elipsoid and centroid location
self.orientation = region.orientation
self.centroid = region.centroid
else:
self.label = None
self.bbox = None
self.area = None
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = (None, None)
self.length = None
self.width = None
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = None
# angle of the fit elipsoid and centroid location
self.orientation = None
self.centroid = None
# this is the object that holds all information for a cell
class Cell():
'''
The Cell class is one cell that has been born. It is not neccesarily a cell that
has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent_id=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(cell_id.split('r')[1])
# parent id may be none
self.parent = parent_id
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
#calculating cell length and width by using <NAME>
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def divide(self, daughter1, daughter2, t):
'''Divide the cell and update stats.
daugther1 and daugther2 are instances of the Cell class.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = daughter1.birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (daughter1.lengths[0] + daughter2.lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((daughter1.widths[0] + daughter2.widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0)
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = daughter1.lengths[0] / (daughter1.lengths[0] + daughter2.lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
class CellTree():
def __init__(self):
self.cells = {}
self.scores = [] # probably needs to be different
self.score = 0
self.cell_id_list = []
def add_cell(self, cell):
self.cells[cell.id] = cell
self.cell_id_list.append(cell.id)
self.cell_id_list.sort()
def update_score(self):
pass
def get_cell(self, cell_id):
return(self.cells[cell_id])
def get_top_from_cell(self, cell_id):
pass
# this is the object that holds all information for a cell
class CellFromGraph():
'''
The CellFromGraph class is one cell that has been born.
It is not neccesarily a cell that has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(region.label)
self.regions = [region]
# parent is a CellFromGraph object, can be None
self.parent = parent
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
self.disappear = None
self.area_mean_fluorescence = {}
self.volume_mean_fluorescence = {}
self.total_fluorescence = {}
self.foci = {}
def __len__(self):
return(len(self.times))
def add_parent(self, parent):
self.parent = parent
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating cell length and width by using Feret Diamter
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def disappears(self, region, t):
'''
Annotate cell as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
assert len(self.daughters) < 3, "Too many daughter cells in cell {}".format(self.id)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda cell: cell.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = self.daughters[0].birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (self.daughters[0].lengths[0] + self.daughters[1].lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((self.daughters[0].widths[0] + self.daughters[1].widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0) # convert times to minutes
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = self.daughters[0].lengths[0] / (self.daughters[0].lengths[0] + self.daughters[1].lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def add_focus(self, focus, t):
'''Adds a focus to the cell. See function foci_info_unet'''
self.foci[focus.id] = focus
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.parent is not None:
print('parent = {}'.format(self.parent.id))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['fov'] = self.fov
data['trap'] = self.peak
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
data['division_time'] = self.division_time
data['birth_label'] = self.birth_label
data['birth_time'] = self.birth_time
data['sb'] = self.sb
data['sd'] = self.sd
data['delta'] = self.delta
data['tau'] = self.tau
data['elong_rate'] = self.elong_rate
data['septum_position'] = self.septum_position
data['death'] = self.death
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['times'] = self.times
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
# if a cell divides then there is one extra value in abs_times
if self.division_time is None:
data['seconds'] = self.abs_times
else:
data['seconds'] = self.abs_times[:-1]
# if there is fluorescence data, place it into the dataframe
if len(self.area_mean_fluorescence.keys()) != 0:
for fluorescence_channel in self.area_mean_fluorescence.keys():
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = self.area_mean_fluorescence[fluorescence_channel]
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = self.volume_mean_fluorescence[fluorescence_channel]
data['{}_total_fluorescence'.format(fluorescence_channel)] = self.total_fluorescence[fluorescence_channel]
df = pd.DataFrame(data, index=data['id'])
return(df)
# this is the object that holds all information for a fluorescent focus
# this class can eventually be used in focus tracking, much like the Cell class
# is used for cell tracking
class Focus():
'''
The Focus class holds information on fluorescent foci.
A single focus can be present in multiple different cells.
'''
# initialize the focus
def __init__(self,
cell,
region,
seg_img,
intensity_image,
t):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell : a Cell object
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
seg_img : 2D numpy array
Labelled image of cell segmentations
intensity_image : 2D numpy array
Fluorescence image with foci
'''
# create all the attributes
# id
focus_id = create_focus_id(region,
t,
cell.peak,
cell.fov,
experiment_name=params['experiment_name'])
self.id = focus_id
# identification convenience
self.appear_label = int(region.label)
self.regions = [region]
self.fov = cell.fov
self.peak = cell.peak
# cell is a CellFromGraph object
# cells are added later using the .add_cell method
self.cells = [cell]
# daughters is updated when focus splits
# if this is none then the focus did not split
self.parent = None
self.daughters = None
self.merger_partner = None
# appearance and split time
self.appear_time = t
self.split_time = None # filled out if focus splits
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][cell.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating focus length and width by using Feret Diamter.
# These values are in pixels
# NOTE: in the future, update to straighten a focus an get straightened length/width
# print(region)
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate focus volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# special information for focci
self.elong_rate = None
self.disappear = None
self.area_mean_fluorescence = []
self.volume_mean_fluorescence = []
self.total_fluorescence = []
self.median_fluorescence = []
self.sd_fluorescence = []
self.disp_l = []
self.disp_w = []
self.calculate_fluorescence(seg_img, intensity_image, region)
def __len__(self):
return(len(self.times))
def __str__(self):
return(self.print_info())
def add_cell(self, cell):
self.cells.append(cell)
def add_parent_focus(self, parent):
self.parent = parent
def merge(self, partner):
self.merger_partner = partner
def grow(self,
region,
t,
seg_img,
intensity_image,
current_cell):
'''Append data from a region to this focus.
use self.times[-1] to get most current value.'''
if current_cell is not self.cells[-1]:
self.add_cell(current_cell)
self.times.append(t)
self.abs_times.append(params['time_table'][self.cells[-1].fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating focus length and width by using Feret Diamter
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
self.calculate_fluorescence(seg_img, intensity_image, region)
def calculate_fluorescence(self,
seg_img,
intensity_image,
region):
total_fluor = np.sum(intensity_image[seg_img == region.label])
self.total_fluorescence.append(total_fluor)
self.area_mean_fluorescence.append(total_fluor/self.areas[-1])
self.volume_mean_fluorescence.append(total_fluor/self.volumes[-1])
self.median_fluorescence.append(np.median(intensity_image[seg_img == region.label]))
self.sd_fluorescence.append(np.std(intensity_image[seg_img == region.label]))
# get the focus' displacement from center of cell
# find x and y position relative to the whole image (convert from small box)
# calculate distance of foci from middle of cell (scikit image)
orientation = region.orientation
if orientation < 0:
orientation = np.pi+orientation
cell_idx = self.cells[-1].times.index(self.times[-1]) # final time in self.times is current time
cell_centroid = self.cells[-1].centroids[cell_idx]
focus_centroid = region.centroid
disp_y = (focus_centroid[0]-cell_centroid[0])*np.sin(orientation) - (focus_centroid[1]-cell_centroid[1])*np.cos(orientation)
disp_x = (focus_centroid[0]-cell_centroid[0])*np.cos(orientation) + (focus_centroid[1]-cell_centroid[1])*np.sin(orientation)
# append foci information to the list
self.disp_l = np.append(self.disp_l, disp_y)
self.disp_w = np.append(self.disp_w, disp_x)
def disappears(self, region, t):
'''
Annotate focus as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda focus: focus.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.split_time = self.daughters[0].appear_time
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.widths = [width.astype(convert_to) for width in self.widths]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the focus'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.cells is not None:
print('cells = {}'.format([cell.id for cell in self.cells]))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['cells'] = self.cells
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
# data['division_time'] = self.division_time
data['appear_label'] = self.appear_label
data['appear_time'] = self.appear_time
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['time'] = self.times
# data['cell'] = self.cells
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
data['seconds'] = self.abs_times
data['area_mean_fluorescence'] = self.area_mean_fluorescence
data['volume_mean_fluorescence'] = self.volume_mean_fluorescence
data['total_fluorescence'] = self.total_fluorescence
data['median_fluorescence'] = self.median_fluorescence
data['sd_fluorescence'] = self.sd_fluorescence
data['disp_l'] = self.disp_l
data['disp_w'] = self.disp_w
# print(data['id'])
df = pd.DataFrame(data, index=data['id'])
return(df)
class PredictTrackDataGenerator(utils.Sequence):
'''Generates data for running tracking class preditions
Input is a stack of labeled images'''
def __init__(self,
data,
batch_size=32,
dim=(4,5,9)):
'Initialization'
self.batch_size = batch_size
self.data = data
self.dim = dim
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.data) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate keys of the batch
batch_indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X = self.__data_generation(batch_indices)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indices = np.arange(len(self.data))
def __data_generation(self, batch_indices):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
# shape is (batch_size, max_cell_num, frame_num, cell_feature_num, 1)
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], 1))
# Generate data
for idx in batch_indices:
start_idx = idx-2
end_idx = idx+3
# print(start_idx, end_idx)
if start_idx < 0:
batch_frame_list = []
for empty_idx in range(abs(start_idx)):
batch_frame_list.append([])
batch_frame_list.extend(self.data[0:end_idx])
elif end_idx > len(self.data):
batch_frame_list = self.data[start_idx:len(self.data)+1]
for empty_idx in range(abs(end_idx - len(self.data))):
batch_frame_list.extend([])
else:
batch_frame_list = self.data[start_idx:end_idx]
for i,frame_region_list in enumerate(batch_frame_list):
# shape is (max_cell_num, frame_num, cell_feature_num)
# tmp_x = np.zeros((self.dim[0], self.dim[1], self.dim[2]))
if not frame_region_list:
continue
for region_idx, region, in enumerate(frame_region_list):
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
min_x = bbox[1]
max_x = bbox[3]
area = region.area
length = region.major_axis_length
cell_label = region.label
cell_index = cell_label - 1
cell_info = (min_x, max_x, x, min_y, max_y, y, orientation, area, length)
if region_idx + 1 > self.dim[0]:
continue
# supplement tmp_x at (region_idx, )
# tmp_x[region_idx, i, :] = cell_info
X[idx, cell_index, i, :,0] = cell_info # tmp_x
return X
def get_greatest_score_info(first_node, second_node, graph):
'''A function that is useful for track linking
'''
score_names = [k for k in graph.get_edge_data(first_node, second_node).keys()]
pred_scores = [val['score'] for k,val in graph.get_edge_data(first_node, second_node).items()]
max_score_index = np.argmax(pred_scores)
max_name = score_names[max_score_index]
max_score = pred_scores[max_score_index]
return(max_name, max_score)
def get_score_by_type(first_node, second_node, graph, score_type='child'):
'''A function useful in track linking
'''
pred_score = graph.get_edge_data(first_node, second_node)[score_type]['score']
return(pred_score)
def count_unvisited(G, experiment_name):
count = 0
for node_id in G.nodes:
if node_id.startswith(experiment_name):
if not G.nodes[node_id]['visited']:
count += 1
return(count)
def create_lineages_from_graph(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in graph
# graph_score = 0
# track_dict = {}
# tracks = CellTree()
tracks = {}
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.keys():
tracks[cell_id] = current_cell
else:
current_cell = tracks[cell_id]
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_score = np.max(successor_scores)
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if the max_score in successor_scores isn't greater than log(0.1), just make the cell disappear for now.
if max_score < np.log(0.1):
max_edge_type = 'disappear'
next_node_id = [n_id for n_id in unvisited_node_ids if n_id.startswith('disappear')][0]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks[new_cell_id] = new_cell
current_cell.add_daughter(new_cell, new_cell_time)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
try:
second_daughter_score = np.max(child_scores)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
if second_daughter_score < np.log(0.5):
current_cell = new_cell
else:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks[other_daughter_cell_id] = other_daughter_cell
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
prior_node_id = next_node_id
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
print("WARNING: Ten iterations surpassed without decreasing the number of visited nodes.\n \
Breaking tracking loop now. You should probably not trust these results.")
break
return tracks
def viterbi_create_lineages_from_graph(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a maximally-scoring CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in G
graph_score = 0
# track_dict = {}
tracks = CellTree()
max_time = np.max([node.timepoint for node in graph.nodes])
print(max_time)
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
for t in range(1,max_time+1):
if t > 1:
prior_time_nodes = time_nodes
if t == 1:
time_nodes = [node for node in G.nodes if node.time == t]
else:
time_nodes = next_time_nodes
if t != max_time:
next_time_nodes = [node for node in G.nodes if node.time == t+1]
for node in time_nodes:
pass
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.cell_id_list:
tracks.add_cell(current_cell)
else:
current_cell = tracks.get_cell(cell_id)
# track_dict_key = prior_node_id
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks.add_cell(new_cell)
current_cell.add_daughter(new_cell, new_cell_time)
# print("First daughter", current_cell.id, new_cell.id)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
# print(child_scores)
try:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks.add_cell(other_daughter_cell)
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# print("Second daughter", current_cell.parent.id, other_daughter_cell.id)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
# current_track.append(next_node_id)
prior_node_id = next_node_id
# print(current_cell.id, current_cell.parent.id)
# track_dict[track_dict_key][:] = current_track
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
break
return(tracks)
def create_lineages_from_graph_2(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in G
# graph_score = 0
# track_dict = {}
tracks = CellTree()
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.cell_id_list:
tracks.add_cell(current_cell)
else:
current_cell = tracks.get_cell(cell_id)
# track_dict_key = prior_node_id
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks.add_cell(new_cell)
current_cell.add_daughter(new_cell, new_cell_time)
# print("First daughter", current_cell.id, new_cell.id)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
# print(child_scores)
try:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks.add_cell(other_daughter_cell)
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# print("Second daughter", current_cell.parent.id, other_daughter_cell.id)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
# current_track.append(next_node_id)
prior_node_id = next_node_id
# print(current_cell.id, current_cell.parent.id)
# track_dict[track_dict_key][:] = current_track
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
break
return(tracks)
# obtains cell length and width of the cell using the feret diameter
def feretdiameter(region):
'''
feretdiameter calculates the length and width of the binary region shape. The cell orientation
from the ellipsoid is used to find the major and minor axis of the cell.
See https://en.wikipedia.org/wiki/Feret_diameter.
'''
# y: along vertical axis of the image; x: along horizontal axis of the image;
# calculate the relative centroid in the bounding box (non-rotated)
# print(region.centroid)
y0, x0 = region.centroid
y0 = y0 - np.int16(region.bbox[0]) + 1
x0 = x0 - np.int16(region.bbox[1]) + 1
cosorient = np.cos(region.orientation)
sinorient = np.sin(region.orientation)
# print(cosorient, sinorient)
amp_param = 1.2 #amplifying number to make sure the axis is longer than actual cell length
# coordinates relative to bounding box
# r_coords = region.coords - [np.int16(region.bbox[0]), np.int16(region.bbox[1])]
# limit to perimeter coords. pixels are relative to bounding box
region_binimg = np.pad(region.image, 1, 'constant') # pad region binary image by 1 to avoid boundary non-zero pixels
distance_image = ndi.distance_transform_edt(region_binimg)
r_coords = np.where(distance_image == 1)
r_coords = list(zip(r_coords[0], r_coords[1]))
# coordinates are already sorted by y. partion into top and bottom to search faster later
# if orientation > 0, L1 is closer to top of image (lower Y coord)
if region.orientation > 0:
L1_coords = r_coords[:int(np.round(len(r_coords)/4))]
L2_coords = r_coords[int(np.round(len(r_coords)/4)):]
else:
L1_coords = r_coords[int(np.round(len(r_coords)/4)):]
L2_coords = r_coords[:int(np.round(len(r_coords)/4))]
#####################
# calculte cell length
L1_pt = np.zeros((2,1))
L2_pt = np.zeros((2,1))
# define the two end points of the the long axis line
# one pole.
L1_pt[1] = x0 + cosorient * 0.5 * region.major_axis_length*amp_param
L1_pt[0] = y0 - sinorient * 0.5 * region.major_axis_length*amp_param
# the other pole.
L2_pt[1] = x0 - cosorient * 0.5 * region.major_axis_length*amp_param
L2_pt[0] = y0 + sinorient * 0.5 * region.major_axis_length*amp_param
# calculate the minimal distance between the points at both ends of 3 lines
# aka calcule the closest coordiante in the region to each of the above points.
# pt_L1 = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-L1_pt[0],2) + np.power(Pt[1]-L1_pt[1],2)) for Pt in r_coords])]
# pt_L2 = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-L2_pt[0],2) + np.power(Pt[1]-L2_pt[1],2)) for Pt in r_coords])]
try:
pt_L1 = L1_coords[np.argmin([np.sqrt(np.power(Pt[0]-L1_pt[0],2) + np.power(Pt[1]-L1_pt[1],2)) for Pt in L1_coords])]
pt_L2 = L2_coords[np.argmin([np.sqrt(np.power(Pt[0]-L2_pt[0],2) + np.power(Pt[1]-L2_pt[1],2)) for Pt in L2_coords])]
length = np.sqrt(np.power(pt_L1[0]-pt_L2[0],2) + np.power(pt_L1[1]-pt_L2[1],2))
except:
length = None
#####################
# calculate cell width
# draw 2 parallel lines along the short axis line spaced by 0.8*quarter of length = 0.4, to avoid in midcell
# limit to points in each half
W_coords = []
if region.orientation > 0:
W_coords.append(r_coords[:int(np.round(len(r_coords)/2))]) # note the /2 here instead of /4
W_coords.append(r_coords[int(np.round(len(r_coords)/2)):])
else:
W_coords.append(r_coords[int(np.round(len(r_coords)/2)):])
W_coords.append(r_coords[:int(np.round(len(r_coords)/2))])
# starting points
x1 = x0 + cosorient * 0.5 * length*0.4
y1 = y0 - sinorient * 0.5 * length*0.4
x2 = x0 - cosorient * 0.5 * length*0.4
y2 = y0 + sinorient * 0.5 * length*0.4
W1_pts = np.zeros((2,2))
W2_pts = np.zeros((2,2))
# now find the ends of the lines
# one side
W1_pts[0,1] = x1 - sinorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[0,0] = y1 - cosorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[1,1] = x2 - sinorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[1,0] = y2 - cosorient * 0.5 * region.minor_axis_length*amp_param
# the other side
W2_pts[0,1] = x1 + sinorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[0,0] = y1 + cosorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[1,1] = x2 + sinorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[1,0] = y2 + cosorient * 0.5 * region.minor_axis_length*amp_param
# calculate the minimal distance between the points at both ends of 3 lines
pt_W1 = np.zeros((2,2))
pt_W2 = np.zeros((2,2))
d_W = np.zeros((2,1))
i = 0
for W1_pt, W2_pt in zip(W1_pts, W2_pts):
# # find the points closest to the guide points
# pt_W1[i,0], pt_W1[i,1] = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-W1_pt[0],2) + np.power(Pt[1]-W1_pt[1],2)) for Pt in r_coords])]
# pt_W2[i,0], pt_W2[i,1] = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-W2_pt[0],2) + np.power(Pt[1]-W2_pt[1],2)) for Pt in r_coords])]
# find the points closest to the guide points
pt_W1[i,0], pt_W1[i,1] = W_coords[i][np.argmin([np.sqrt(np.power(Pt[0]-W1_pt[0],2) + np.power(Pt[1]-W1_pt[1],2)) for Pt in W_coords[i]])]
pt_W2[i,0], pt_W2[i,1] = W_coords[i][np.argmin([np.sqrt(np.power(Pt[0]-W2_pt[0],2) + np.power(Pt[1]-W2_pt[1],2)) for Pt in W_coords[i]])]
# calculate the actual width
d_W[i] = np.sqrt(np.power(pt_W1[i,0]-pt_W2[i,0],2) + np.power(pt_W1[i,1]-pt_W2[i,1],2))
i += 1
# take the average of the two at quarter positions
width = np.mean([d_W[0],d_W[1]])
return length, width
# take info and make string for cell id
def create_focus_id(region, t, peak, fov, experiment_name=None):
'''Make a unique focus id string for a new focus'''
if experiment_name is None:
focus_id = 'f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(fov, peak, t, region.label)
else:
focus_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region.label)
return focus_id
# take info and make string for cell id
def create_cell_id(region, t, peak, fov, experiment_name=None):
'''Make a unique cell id string for a new cell'''
# cell_id = ['f', str(fov), 'p', str(peak), 't', str(t), 'r', str(region.label)]
if experiment_name is None:
cell_id = ['f', '%02d' % fov, 'p', '%04d' % peak, 't', '%04d' % t, 'r', '%02d' % region.label]
cell_id = ''.join(cell_id)
else:
cell_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region.label)
return cell_id
def create_detection_id(t, peak, fov, region_label, experiment_name=None, max_cell_number=6):
'''Make a unique cell id string for a new cell'''
# cell_id = ['f', str(fov), 'p', str(peak), 't', str(t), 'r', str(region.label)]
if experiment_name is None:
det_id = ['f', '%02d' % fov, 'p', '%04d' % peak, 't', '%04d' % t, 'r', '%02d' % region_label]
det_id = ''.join(det_id)
else:
det_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region_label)
return det_id
def initialize_track_graph(peak_id,
fov_id,
experiment_name,
predictions_dict,
regions_by_time,
max_cell_number=6,
born_threshold=0.75,
appear_threshold=0.75):
detection_dict = {}
frame_num = predictions_dict['migrate_model_predictions'].shape[0]
ebunch = []
G = nx.MultiDiGraph()
# create common start point
G.add_node('A')
# create common end point
G.add_node('B')
last_frame = False
node_id_list = []
timepoint_list = []
region_label_list = []
for frame_idx in range(frame_num):
timepoint = frame_idx + 1
paired_detection_time = timepoint+1
# get detections for this frame
frame_regions_list = regions_by_time[frame_idx]
# if we're at the end of the imaging, make all cells migrate to node 'B'
if timepoint == frame_num:
last_frame = True
else:
paired_frame_regions_list = regions_by_time[frame_idx+1]
# get state change probabilities (class predictions) for this frame
frame_prediction_dict = {key:val[frame_idx,...] for key,val in predictions_dict.items() if key != 'general_model_predictions'}
# for i in range(len(predictions_dict['general_model_predictions'])):
# frame_general_prediction = predictions_dict['general_model_predictions'][]
# create the "will be born" and "will appear" nodes for this frame
prior_born_state = 'born_{:0=4}'.format(timepoint-1)
born_state = 'born_{:0=4}'.format(timepoint)
G.add_node(born_state, visited=False, time=timepoint)
prior_appear_state = 'appear_{:0=4}'.format(timepoint-1)
appear_state = 'appear_{:0=4}'.format(timepoint)
G.add_node(appear_state, visited=False, time=timepoint)
if frame_idx == 0:
ebunch.append(('A', appear_state, 'start', {'weight':appear_threshold, 'score':1*np.log(appear_threshold)}))
ebunch.append(('A', born_state, 'start', {'weight':born_threshold, 'score':1*np.log(born_threshold)}))
# create the "Dies" and "Disappeared" nodes to link from prior frame
prior_dies_state = 'dies_{:0=4}'.format(timepoint-1)
dies_state = 'dies_{:0=4}'.format(timepoint)
next_dies_state = 'dies_{:0=4}'.format(timepoint+1)
G.add_node(dies_state, visited=False, time=timepoint)
prior_disappear_state = 'disappear_{:0=4}'.format(timepoint-1)
disappear_state = 'disappear_{:0=4}'.format(timepoint)
next_disappear_state = 'disappear_{:0=4}'.format(timepoint+1)
G.add_node(disappear_state, visited=False, time=timepoint)
node_id_list.extend([born_state, dies_state, appear_state, disappear_state])
timepoint_list.extend([timepoint, timepoint, timepoint, timepoint])
region_label_list.extend([0,0,0,0])
if frame_idx > 0:
ebunch.append((prior_dies_state, dies_state, 'die', {'weight':1.1, 'score':1*np.log(1.1)})) # impossible to move out of dies track
ebunch.append((prior_disappear_state, disappear_state, 'disappear', {'weight':1.1, 'score':1*np.log(1.1)})) # impossible to move out of disappear track
ebunch.append((prior_born_state, born_state, 'born', {'weight':born_threshold, 'score':1*np.log(born_threshold)}))
ebunch.append((prior_appear_state, appear_state, 'appear', {'weight':appear_threshold, 'score':1*np.log(appear_threshold)}))
if last_frame:
ebunch.append((appear_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((disappear_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((born_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((dies_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
for region_idx in range(max_cell_number):
# the tracking models assume there are 6 detections in each frame, regardless of how many
# are actually there. Therefore, this try/except logic will catch cases where there
# were fewer than 6 detections in a frame.
try:
region = frame_regions_list[region_idx]
region_label = region.label
except IndexError:
region = None
region_label = region_idx + 1
# create the name for this detection
detection_id = create_detection_id(timepoint,
peak_id,
fov_id,
region_label,
experiment_name=experiment_name)
det = Detection(detection_id, region, timepoint)
detection_dict[det.id] = det
if det.area is not None:
# if the detection represents a segmentation from our imaging, add its ID,
# which is also its key in detection_dict, as a node in G
G.add_node(det.id, visited=False, cell_count=1, region=region, time=timepoint)
timepoint_list.append(timepoint)
node_id_list.append(detection_id)
region_label_list.append(region.label)
# also set up all edges for this detection's node in our ebunch
# loop through prediction types and add each to the ebunch
for key,val in frame_prediction_dict.items():
if frame_idx == 0:
ebunch.append(('A', detection_id, 'start', {'weight':1, 'score':1*np.log(1)}))
if last_frame:
ebunch.append((detection_id, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
if val.shape[0] == max_cell_number ** 2:
continue
else:
frame_predictions = val
detection_prediction = frame_predictions[region_idx]
if key == 'appear_model_predictions':
if frame_idx == 0:
continue
elem = (prior_appear_state, detection_id, 'appear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'born' in key:
if frame_idx == 0:
continue
elem = (prior_born_state, detection_id, 'born', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'zero_cell' in key:
G.nodes[det.id]['zero_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'one_cell' in key:
G.nodes[det.id]['one_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'two_cell' in key:
G.nodes[det.id]['two_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
ebunch.append(elem)
else:
# if the array is cell_number^2, reshape it to cell_number x cell_number
# Then slice our detection's row and iterate over paired_cells
if val.shape[0] == max_cell_number**2:
frame_predictions = val.reshape((max_cell_number,max_cell_number))
detection_predictions = frame_predictions[region_idx,:]
# loop through paired detection predictions, test whether paired detection exists
# then append the edge to our ebunch
for paired_cell_idx in range(detection_predictions.size):
# attempt to grab the paired detection. If we get an IndexError, it doesn't exist.
try:
paired_detection = paired_frame_regions_list[paired_cell_idx]
except IndexError:
continue
# create the paired detection's id for use in our ebunch
paired_detection_id = create_detection_id(paired_detection_time,
peak_id,
fov_id,
paired_detection.label,
experiment_name=experiment_name)
paired_prediction = detection_predictions[paired_cell_idx]
if 'child_' in key:
child_weight = paired_prediction
elem = (detection_id, paired_detection_id, 'child', {'child_weight':child_weight, 'score':1*np.log(child_weight)})
ebunch.append(elem)
if 'migrate_' in key:
migrate_weight = paired_prediction
elem = (detection_id, paired_detection_id, 'migrate', {'migrate_weight':migrate_weight, 'score':1*np.log(migrate_weight)})
ebunch.append(elem)
# if 'interaction_' in key:
# interaction_weight = paired_prediction
# elem = (detection_id, paired_detection_id, 'interaction', {'weight':interaction_weight, 'score':1*np.log(interaction_weight)})
# ebunch.append(elem)
# if the array is cell_number long, do similar stuff as above.
elif val.shape[0] == max_cell_number:
frame_predictions = val
detection_prediction = frame_predictions[region_idx]
if key == 'appear_model_predictions':
if frame_idx == 0:
continue
# print("Linking {} to {}.".format(prior_appear_state, detection_id))
elem = (prior_appear_state, detection_id, 'appear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'disappear_' in key:
if last_frame:
continue
# print("Linking {} to {}.".format(detection_id, next_disappear_state))
elem = (detection_id, next_disappear_state, 'disappear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'born_' in key:
if frame_idx == 0:
continue
# print("Linking {} to {}.".format(prior_born_state, detection_id))
elem = (prior_born_state, detection_id, 'born', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'die_model' in key:
if last_frame:
continue
# print("Linking {} to {}.".format(detection_id, next_dies_state))
elem = (detection_id, next_dies_state, 'die', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
# the following classes aren't yet implemented
elif 'zero_cell' in key:
G.nodes[det.id]['zero_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'one_cell' in key:
G.nodes[det.id]['one_cell_weight'] = detection_prediction
G.nodes[det.id]['one_cell_score'] = 1*np.log(detection_prediction)
elif 'two_cell' in key:
G.nodes[det.id]['two_cell_weight'] = detection_prediction
G.nodes[det.id]['two_cell_score'] = 1*np.log(detection_prediction)
ebunch.append(elem)
G.add_edges_from(ebunch)
graph_df = pd.DataFrame(data={'timepoint':timepoint_list,
'node_id':node_id_list,
'region_label':region_label_list})
return(G, graph_df)
# function for a growing cell, used to calculate growth rate
def cell_growth_func(t, sb, elong_rate):
'''
Assumes you have taken log of the data.
It also allows the size at birth to be a free parameter, rather than fixed
at the actual size at birth (but still uses that as a guess)
Assumes natural log, not base 2 (though I think that makes less sense)
old form: sb*2**(alpha*t)
'''
return sb+elong_rate*t
# functions for checking if a cell has divided or not
# this function should also take the variable t to
# weight the allowed changes by the difference in time as well
def check_growth_by_region(cell, region):
'''Checks to see if it makes sense
to grow a cell by a particular region'''
# load parameters for checking
max_growth_length = params['track']['max_growth_length']
min_growth_length = params['track']['min_growth_length']
max_growth_area = params['track']['max_growth_area']
min_growth_area = params['track']['min_growth_area']
# check if length is not too much longer
if cell.lengths[-1]*max_growth_length < region.major_axis_length:
return False
# check if it is not too short (cell should not shrink really)
if cell.lengths[-1]*min_growth_length > region.major_axis_length:
return False
# check if area is not too great
if cell.areas[-1]*max_growth_area < region.area:
return False
# check if area is not too small
if cell.lengths[-1]*min_growth_area > region.area:
return False
# # check if y position of region is within
# # the quarter positions of the bounding box
# lower_quarter = cell.bboxes[-1][0] + (region.major_axis_length / 4)
# upper_quarter = cell.bboxes[-1][2] - (region.major_axis_length / 4)
# if lower_quarter > region.centroid[0] or upper_quarter < region.centroid[0]:
# return False
# check if y position of region is within the bounding box of previous region
lower_bound = cell.bboxes[-1][0]
upper_bound = cell.bboxes[-1][2]
if lower_bound > region.centroid[0] or upper_bound < region.centroid[0]:
return False
# return true if you get this far
return True
# see if a cell has reasonably divided
def check_division(cell, region1, region2):
'''Checks to see if it makes sense to divide a
cell into two new cells based on two regions.
Return 0 if nothing should happend and regions ignored
Return 1 if cell should grow by region 1
Return 2 if cell should grow by region 2
Return 3 if cell should divide into the regions.'''
# load in parameters
max_growth_length = params['track']['max_growth_length']
min_growth_length = params['track']['min_growth_length']
# see if either region just could be continued growth,
# if that is the case then just return
# these shouldn't return true if the cells are divided
# as they would be too small
if check_growth_by_region(cell, region1):
return 1
if check_growth_by_region(cell, region2):
return 2
# make sure combined size of daughters is not too big
combined_size = region1.major_axis_length + region2.major_axis_length
# check if length is not too much longer
if cell.lengths[-1]*max_growth_length < combined_size:
return 0
# and not too small
if cell.lengths[-1]*min_growth_length > combined_size:
return 0
# centroids of regions should be in the upper and lower half of the
# of the mother's bounding box, respectively
# top region within top half of mother bounding box
if cell.bboxes[-1][0] > region1.centroid[0] or cell.centroids[-1][0] < region1.centroid[0]:
return 0
# bottom region with bottom half of mother bounding box
if cell.centroids[-1][0] > region2.centroid[0] or cell.bboxes[-1][2] < region2.centroid[0]:
return 0
# if you got this far then divide the mother
return 3
### functions for pruning a dictionary of cells
# find cells with both a mother and two daughters
def find_complete_cells(Cells):
'''Go through a dictionary of cells and return another dictionary
that contains just those with a parent and daughters'''
Complete_Cells = {}
for cell_id in Cells:
if Cells[cell_id].daughters and Cells[cell_id].parent:
Complete_Cells[cell_id] = Cells[cell_id]
return Complete_Cells
# finds cells whose birth label is 1
def find_mother_cells(Cells):
'''Return only cells whose starting region label is 1.'''
Mother_Cells = {}
for cell_id in Cells:
if Cells[cell_id].birth_label == 1:
Mother_Cells[cell_id] = Cells[cell_id]
return Mother_Cells
def filter_foci(Foci, label, t, debug=False):
Filtered_Foci = {}
for focus_id, focus in Foci.items():
# copy the times list so as not to update it in-place
times = focus.times
if debug:
print(times)
match_inds = [i for i,time in enumerate(times) if time == t]
labels = [focus.labels[idx] for idx in match_inds]
if label in labels:
Filtered_Foci[focus_id] = focus
return Filtered_Foci
def filter_cells(Cells, attr, val, idx=None, debug=False):
'''Return only cells whose designated attribute equals "val".'''
Filtered_Cells = {}
for cell_id, cell in Cells.items():
at_val = getattr(cell, attr)
if debug:
print(at_val)
print("Times: ", cell.times)
if idx is not None:
at_val = at_val[idx]
if at_val == val:
Filtered_Cells[cell_id] = cell
return Filtered_Cells
def filter_cells_containing_val_in_attr(Cells, attr, val):
'''Return only cells that have val in list attribute, attr.'''
Filtered_Cells = {}
for cell_id, cell in Cells.items():
at_list = getattr(cell, attr)
if val in at_list:
Filtered_Cells[cell_id] = cell
return Filtered_Cells
### functions for additional cell centric analysis
def compile_cell_info_df(Cells):
# count the number of rows that will be in the long dataframe
quant_fluor = False
long_df_row_number = 0
for cell in Cells.values():
# first time through, evaluate whether we quantified cells' fluorescence
if long_df_row_number == 0:
if len(cell.area_mean_fluorescence.keys()) != 0:
quant_fluor = True
fluorescence_channels = [k for k in cell.area_mean_fluorescence.keys()]
long_df_row_number += len(cell.times)
# initialize some arrays for filling with data
data = {
# ids can be up to 100 characters long
'id': np.chararray(long_df_row_number, itemsize=100),
'times': np.zeros(long_df_row_number, dtype='uint16'),
'lengths': np.zeros(long_df_row_number),
'volumes': np.zeros(long_df_row_number),
'areas': np.zeros(long_df_row_number),
'abs_times': np.zeros(long_df_row_number, dtype='uint32')
}
if quant_fluor:
for fluorescence_channel in fluorescence_channels:
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data['{}_total_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data = populate_focus_arrays(Cells, data, cell_quants=True)
long_df = pd.DataFrame(data=data)
wide_df_row_number = len(Cells)
data = {
# ids can be up to 100 characters long
'id': np.chararray(wide_df_row_number, itemsize=100),
'fov': np.zeros(wide_df_row_number, dtype='uint8'),
'peak': np.zeros(wide_df_row_number, dtype='uint16'),
'parent_id': np.chararray(wide_df_row_number, itemsize=100),
'child1_id': np.chararray(wide_df_row_number, itemsize=100),
'child2_id': np.chararray(wide_df_row_number, itemsize=100),
'division_time': np.zeros(wide_df_row_number),
'birth_label': np.zeros(wide_df_row_number, dtype='uint8'),
'birth_time': np.zeros(wide_df_row_number, dtype='uint16'),
'sb': np.zeros(wide_df_row_number),
'sd': np.zeros(wide_df_row_number),
'delta': np.zeros(wide_df_row_number),
'tau': np.zeros(wide_df_row_number),
'elong_rate': np.zeros(wide_df_row_number),
'septum_position': np.zeros(wide_df_row_number),
'death': np.zeros(wide_df_row_number),
'disappear': np.zeros(wide_df_row_number)
}
data = populate_focus_arrays(Cells, data, cell_quants=True, wide=True)
# data['parent_id'] = data['parent_id'].decode()
# data['child1_id'] = data['child1_id'].decode()
# data['child2_id'] = data['child2_id'].decode()
wide_df = pd.DataFrame(data=data)
return(wide_df,long_df)
def populate_focus_arrays(Foci, data_dict, cell_quants=False, wide=False):
focus_counter = 0
focus_count = len(Foci)
end_idx = 0
for i,focus in enumerate(Foci.values()):
if wide:
start_idx = i
end_idx = i + 1
else:
start_idx = end_idx
end_idx = len(focus) + start_idx
if focus_counter % 100 == 0:
print("Generating focus information for focus {} out of {}.".format(focus_counter+1, focus_count))
# loop over keys in data dictionary, and set
# values in appropriate array, at appropriate indices
# to those we find in the focus.
for key in data_dict.keys():
if '_id' in key:
if key == 'parent_id':
if focus.parent is None:
data_dict[key][start_idx:end_idx] = ''
else:
data_dict[key][start_idx:end_idx] = focus.parent.id
if focus.daughters is None:
if key == 'child1_id' or key == 'child2_id':
data_dict[key][start_idx:end_idx] = ''
elif len(focus.daughters) == 1:
if key == 'child2_id':
data_dict[key][start_idx:end_idx] = ''
elif key == 'child1_id':
data_dict[key][start_idx:end_idx] = focus.daughters[0].id
elif key == 'child2_id':
data_dict[key][start_idx:end_idx] = focus.daughters[1].id
else:
attr_vals = getattr(focus, key)
if (cell_quants and key=='abs_times'):
if len(attr_vals) == end_idx-start_idx:
data_dict[key][start_idx:end_idx] = attr_vals
else:
data_dict[key][start_idx:end_idx] = attr_vals[:-1]
else:
# print(key)
# print(attr_vals)
data_dict[key][start_idx:end_idx] = attr_vals
focus_counter += 1
data_dict['id'] = data_dict['id'].decode()
return(data_dict)
def compile_foci_info_long_df(Foci):
'''
Parameters
----------------
Foci : dictionary, keys of which are focus_ids,
values of which are objects of class Focus
Returns
----------------------
A long DataFrame with
detailed information about each timepoint for each focus.
'''
# count the number of rows that will be in the long dataframe
long_df_row_number = 0
for focus in Foci.values():
long_df_row_number += len(focus)
# initialize some arrays for filling with data
data = {
# ids can be up to 100 characters long
'id': np.chararray(long_df_row_number, itemsize=100),
'times': np.zeros(long_df_row_number, dtype='uint16'),
'lengths': np.zeros(long_df_row_number),
'volumes': np.zeros(long_df_row_number),
'areas': np.zeros(long_df_row_number),
'abs_times': np.zeros(long_df_row_number, dtype='uint32'),
'area_mean_fluorescence': np.zeros(long_df_row_number),
'volume_mean_fluorescence': np.zeros(long_df_row_number),
'total_fluorescence': np.zeros(long_df_row_number),
'median_fluorescence': np.zeros(long_df_row_number),
'sd_fluorescence': np.zeros(long_df_row_number),
'disp_l': np.zeros(long_df_row_number),
'disp_w': np.zeros(long_df_row_number)
}
data = populate_focus_arrays(Foci, data)
long_df = pd.DataFrame(data=data)
return(long_df)
def find_all_cell_intensities(Cells,
specs, time_table, channel_name='sub_c2',
apply_background_correction=True):
'''
Finds fluorescenct information for cells. All the cells in Cells
should be from one fov/peak.
'''
# iterate over each fov in specs
for fov_id,fov_peaks in specs.items():
# iterate over each peak in fov
for peak_id,peak_value in fov_peaks.items():
# if peak_id's value is not 1, go to next peak
if peak_value != 1:
continue
print("Quantifying channel {} fluorescence in cells in fov {}, peak {}.".format(channel_name, fov_id, peak_id))
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel_name)
corrected_stack = np.zeros(fl_stack.shape)
for frame in range(fl_stack.shape[0]):
# median filter will be applied to every image
with warnings.catch_warnings():
warnings.simplefilter("ignore")
median_filtered = median(fl_stack[frame,...], selem=morphology.disk(1))
# subtract the gaussian-filtered image from true image to correct
# uneven background fluorescence
if apply_background_correction:
blurred = filters.gaussian(median_filtered, sigma=10, preserve_range=True)
corrected_stack[frame,:,:] = median_filtered-blurred
else:
corrected_stack[frame,:,:] = median_filtered
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# evaluate whether each cell is in this fov/peak combination
for cell_id,cell in Cells.items():
cell_fov = cell.fov
if cell_fov != fov_id:
continue
cell_peak = cell.peak
if cell_peak != peak_id:
continue
cell_times = cell.times
cell_labels = cell.labels
cell.area_mean_fluorescence[channel_name] = []
cell.volume_mean_fluorescence[channel_name] = []
cell.total_fluorescence[channel_name] = []
# loop through cell's times
for i,t in enumerate(cell_times):
frame = t-1
cell_label = cell_labels[i]
total_fluor = np.sum(corrected_stack[frame, seg_stack[frame, :,:] == cell_label])
cell.area_mean_fluorescence[channel_name].append(total_fluor/cell.areas[i])
cell.volume_mean_fluorescence[channel_name].append(total_fluor/cell.volumes[i])
cell.total_fluorescence[channel_name].append(total_fluor)
# The cell objects in the original dictionary will be updated,
# no need to return anything specifically.
return
def find_cell_intensities_worker(fov_id, peak_id, Cells, midline=True, channel='sub_c3'):
'''
Finds fluorescenct information for cells. All the cells in Cells
should be from one fov/peak. See the function
organize_cells_by_channel()
This version is the same as find_cell_intensities but return the Cells object for collection by the pool.
The original find_cell_intensities is kept for compatibility.
'''
information('Processing peak {} in FOV {}'.format(peak_id, fov_id))
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel)
seg_stack = load_stack(fov_id, peak_id, color='seg_otsu')
# determine absolute time index
time_table = params['time_table']
times_all = []
for fov in params['time_table']:
times_all = np.append(times_all, [int(x) for x in time_table[fov].keys()])
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all,np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# give this cell two lists to hold new information
Cell.fl_tots = [] # total fluorescence per time point
Cell.fl_area_avgs = [] # avg fluorescence per unit area by timepoint
Cell.fl_vol_avgs = [] # avg fluorescence per unit volume by timepoint
if midline:
Cell.mid_fl = [] # avg fluorescence of midline
# and the time points that make up this cell's life
for n, t in enumerate(Cell.times):
# create fluorescent image only for this cell and timepoint.
fl_image_masked = np.copy(fl_stack[t-t0])
fl_image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# append total flourescent image
Cell.fl_tots.append(np.sum(fl_image_masked))
# and the average fluorescence
Cell.fl_area_avgs.append(np.sum(fl_image_masked) / Cell.areas[n])
Cell.fl_vol_avgs.append(np.sum(fl_image_masked) / Cell.volumes[n])
if midline:
# add the midline average by first applying morphology transform
bin_mask = np.copy(seg_stack[t-t0])
bin_mask[bin_mask != Cell.labels[n]] = 0
med_mask, _ = morphology.medial_axis(bin_mask, return_distance=True)
# med_mask[med_dist < np.floor(cap_radius/2)] = 0
# print(img_fluo[med_mask])
if (np.shape(fl_image_masked[med_mask])[0] > 0):
Cell.mid_fl.append(np.nanmean(fl_image_masked[med_mask]))
else:
Cell.mid_fl.append(0)
# return the cell object to the pool initiated by mm3_Colors.
return Cells
def find_cell_intensities(fov_id, peak_id, Cells, midline=False, channel_name='sub_c2'):
'''
Finds fluorescenct information for cells. All the cells in Cells
should be from one fov/peak. See the function
organize_cells_by_channel()
'''
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel_name)
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# determine absolute time index
times_all = []
for fov in params['time_table']:
times_all = np.append(times_all, time_table[fov].keys())
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all,np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# give this cell two lists to hold new information
Cell.fl_tots = [] # total fluorescence per time point
Cell.fl_area_avgs = [] # avg fluorescence per unit area by timepoint
Cell.fl_vol_avgs = [] # avg fluorescence per unit volume by timepoint
if midline:
Cell.mid_fl = [] # avg fluorescence of midline
# and the time points that make up this cell's life
for n, t in enumerate(Cell.times):
# create fluorescent image only for this cell and timepoint.
fl_image_masked = np.copy(fl_stack[t-t0])
fl_image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# append total flourescent image
Cell.fl_tots.append(np.sum(fl_image_masked))
# and the average fluorescence
Cell.fl_area_avgs.append(np.sum(fl_image_masked) / Cell.areas[n])
Cell.fl_vol_avgs.append(np.sum(fl_image_masked) / Cell.volumes[n])
if midline:
# add the midline average by first applying morphology transform
bin_mask = np.copy(seg_stack[t-t0])
bin_mask[bin_mask != Cell.labels[n]] = 0
med_mask, _ = morphology.medial_axis(bin_mask, return_distance=True)
# med_mask[med_dist < np.floor(cap_radius/2)] = 0
# print(img_fluo[med_mask])
if (np.shape(fl_image_masked[med_mask])[0] > 0):
Cell.mid_fl.append(np.nanmean(fl_image_masked[med_mask]))
else:
Cell.mid_fl.append(0)
# The cell objects in the original dictionary will be updated,
# no need to return anything specifically.
return
# find foci using a difference of gaussians method
def foci_analysis(fov_id, peak_id, Cells):
'''Find foci in cells using a fluorescent image channel.
This function works on a single peak and all the cells therein.'''
# make directory for foci debug
# foci_dir = os.path.join(params['ana_dir'], 'overlay/')
# if not os.path.exists(foci_dir):
# os.makedirs(foci_dir)
# Import segmented and fluorescenct images
try:
image_data_seg = load_stack(fov_id, peak_id, color='seg_unet')
except IOError:
image_data_seg = load_stack(fov_id, peak_id, color='seg_otsu')
image_data_FL = load_stack(fov_id, peak_id,
color='sub_{}'.format(params['foci']['foci_plane']))
# determine absolute time index
times_all = []
for fov, times in params['time_table'].items():
times_all = np.append(times_all, list(times.keys()))
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all, np.int_)
t0 = times_all[0] # first time index
for cell_id, cell in six.iteritems(Cells):
information('Extracting foci information for %s.' % (cell_id))
# declare lists holding information about foci.
disp_l = []
disp_w = []
foci_h = []
# foci_stack = np.zeros((np.size(cell.times),
# image_data_seg[0,:,:].shape[0], image_data_seg[0,:,:].shape[1]))
# Go through each time point of this cell
for t in cell.times:
# retrieve this timepoint and images.
image_data_temp = image_data_FL[t-t0,:,:]
image_data_temp_seg = image_data_seg[t-t0,:,:]
# find foci as long as there is information in the fluorescent image
if np.sum(image_data_temp) != 0:
disp_l_tmp, disp_w_tmp, foci_h_tmp = foci_lap(image_data_temp_seg,
image_data_temp, cell, t)
disp_l.append(disp_l_tmp)
disp_w.append(disp_w_tmp)
foci_h.append(foci_h_tmp)
# if there is no information, append an empty list.
# Should this be NaN?
else:
disp_l.append([])
disp_w.append([])
foci_h.append([])
# foci_stack[i] = image_data_temp_seg
# add information to the cell (will replace old data)
cell.disp_l = disp_l
cell.disp_w = disp_w
cell.foci_h = foci_h
# Create a stack of the segmented images with marked foci
# This should poentially be changed to the fluorescent images with marked foci
# foci_stack = np.uint16(foci_stack)
# foci_stack = np.stack(foci_stack, axis=0)
# # Export overlaid images
# foci_filename = params['experiment_name'] + 't%04d_xy%03d_p%04d_r%02d_overlay.tif' % (Cells[cell_id].birth_time, Cells[cell_id].fov, Cells[cell_id].peak, Cells[cell_id].birth_label)
# foci_filepath = foci_dir + foci_filename
#
# tiff.imsave(foci_filepath, foci_stack, compress=3) # save it
# test
# sys.exit()
return
# foci pool (for parallel analysis)
def foci_analysis_pool(fov_id, peak_id, Cells):
'''Find foci in cells using a fluorescent image channel.
This function works on a single peak and all the cells therein.'''
# make directory for foci debug
# foci_dir = os.path.join(params['ana_dir'], 'overlay/')
# if not os.path.exists(foci_dir):
# os.makedirs(foci_dir)
# Import segmented and fluorescenct images
image_data_seg = load_stack(fov_id, peak_id, color='seg_unet')
image_data_FL = load_stack(fov_id, peak_id,
color='sub_{}'.format(params['foci']['foci_plane']))
# Load time table to determine first image index.
times_all = np.array(np.sort(params['time_table'][fov_id].keys()), np.int_)
t0 = times_all[0] # first time index
tN = times_all[-1] # last time index
# call foci_cell for each cell object
pool = Pool(processes=params['num_analyzers'])
[pool.apply_async(foci_cell(cell_id, cell, t0, image_data_seg, image_data_FL)) for cell_id, cell in six.iteritems(Cells)]
pool.close()
pool.join()
# parralel function for each cell
def foci_cell(cell_id, cell, t0, image_data_seg, image_data_FL):
'''find foci in a cell, single instance to be called by the foci_analysis_pool for parallel processing.
'''
disp_l = []
disp_w = []
foci_h = []
# foci_stack = np.zeros((np.size(cell.times),
# image_data_seg[0,:,:].shape[0], image_data_seg[0,:,:].shape[1]))
# Go through each time point of this cell
for t in cell.times:
# retrieve this timepoint and images.
image_data_temp = image_data_FL[t-t0,:,:]
image_data_temp_seg = image_data_seg[t-t0,:,:]
# find foci as long as there is information in the fluorescent image
if np.sum(image_data_temp) != 0:
disp_l_tmp, disp_w_tmp, foci_h_tmp = foci_lap(image_data_temp_seg,
image_data_temp, cell, t)
disp_l.append(disp_l_tmp)
disp_w.append(disp_w_tmp)
foci_h.append(foci_h_tmp)
# if there is no information, append an empty list.
# Should this be NaN?
else:
disp_l.append(np.nan)
disp_w.append(np.nan)
foci_h.append(np.nan)
# foci_stack[i] = image_data_temp_seg
# add information to the cell (will replace old data)
cell.disp_l = disp_l
cell.disp_w = disp_w
cell.foci_h = foci_h
# actual worker function for foci detection
def foci_lap(img, img_foci, cell, t):
'''foci_lap finds foci using a laplacian convolution then fits a 2D
Gaussian.
The returned information are the parameters of this Gaussian.
All the information is returned in the form of np.arrays which are the
length of the number of found foci across all cells in the image.
Parameters
----------
img : 2D np.array
phase contrast or bright field image. Only used for debug
img_foci : 2D np.array
fluorescent image with foci.
cell : cell object
t : int
time point to which the images correspond
Returns
-------
disp_l : 1D np.array
displacement on long axis, in px, of a foci from the center of the cell
disp_w : 1D np.array
displacement on short axis, in px, of a foci from the center of the cell
foci_h : 1D np.array
Foci "height." Sum of the intensity of the gaussian fitting area.
'''
# pull out useful information for just this time point
i = cell.times.index(t) # find position of the time point in lists (time points may be missing)
bbox = cell.bboxes[i]
orientation = cell.orientations[i]
centroid = cell.centroids[i]
region = cell.labels[i]
# declare arrays which will hold foci data
disp_l = [] # displacement in length of foci from cell center
disp_w = [] # displacement in width of foci from cell center
foci_h = [] # foci total amount (from raw image)
# define parameters for foci finding
minsig = params['foci']['foci_log_minsig']
maxsig = params['foci']['foci_log_maxsig']
thresh = params['foci']['foci_log_thresh']
peak_med_ratio = params['foci']['foci_log_peak_med_ratio']
debug_foci = params['foci']['debug_foci']
# test
#print ("minsig={:d} maxsig={:d} thres={:.4g} peak_med_ratio={:.2g}".format(minsig,maxsig,thresh,peak_med_ratio))
# test
# calculate median cell intensity. Used to filter foci
img_foci_masked = np.copy(img_foci).astype(np.float)
img_foci_masked[img != region] = np.nan
cell_fl_median = np.nanmedian(img_foci_masked)
cell_fl_mean = np.nanmean(img_foci_masked)
img_foci_masked[img != region] = 0
# subtract this value from the cell
if False:
img_foci = img_foci.astype('int32') - cell_fl_median.astype('int32')
img_foci[img_foci < 0] = 0
img_foci = img_foci.astype('uint16')
# int_mask = np.zeros(img_foci.shape, np.uint8)
# avg_int = cv2.mean(img_foci, mask=int_mask)
# avg_int = avg_int[0]
# print('median', cell_fl_median)
# find blobs using difference of gaussian
over_lap = .95 # if two blobs overlap by more than this fraction, smaller blob is cut
numsig = (maxsig - minsig + 1) # number of division to consider between min ang max sig
blobs = blob_log(img_foci_masked, min_sigma=minsig, max_sigma=maxsig,
overlap=over_lap, num_sigma=numsig, threshold=thresh)
# these will hold information about foci position temporarily
x_blob, y_blob, r_blob = [], [], []
x_gaus, y_gaus, w_gaus = [], [], []
# loop through each potential foci
for blob in blobs:
yloc, xloc, sig = blob # x location, y location, and sigma of gaus
xloc = int(np.around(xloc)) # switch to int for slicing images
yloc = int(np.around(yloc))
radius = int(np.ceil(np.sqrt(2)*sig)) # will be used to slice out area around foci
# ensure blob is inside the bounding box
# this might be better to check if (xloc, yloc) is in regions.coords
if yloc > np.int16(bbox[0]) and yloc < np.int16(bbox[2]) and xloc > np.int16(bbox[1]) and xloc < np.int16(bbox[3]):
x_blob.append(xloc) # for plotting
y_blob.append(yloc) # for plotting
r_blob.append(radius)
# cut out a small image from original image to fit gaussian
gfit_area = img_foci[yloc-radius:yloc+radius, xloc-radius:xloc+radius]
# gfit_area_0 = img_foci[max(0, yloc-1*radius):min(img_foci.shape[0], yloc+1*radius),
# max(0, xloc-1*radius):min(img_foci.shape[1], xloc+1*radius)]
gfit_area_fixed = img_foci[yloc-maxsig:yloc+maxsig, xloc-maxsig:xloc+maxsig]
# fit gaussian to proposed foci in small box
p = fitgaussian(gfit_area)
(peak_fit, x_fit, y_fit, w_fit) = p
# print('peak', peak_fit)
if x_fit <= 0 or x_fit >= radius*2 or y_fit <= 0 or y_fit >= radius*2:
if debug_foci: print('Throw out foci (gaus fit not in gfit_area)')
continue
elif peak_fit/cell_fl_median < peak_med_ratio:
if debug_foci: print('Peak does not pass height test.')
continue
else:
# find x and y position relative to the whole image (convert from small box)
x_rel = int(xloc - radius + x_fit)
y_rel = int(yloc - radius + y_fit)
x_gaus = np.append(x_gaus, x_rel) # for plotting
y_gaus = np.append(y_gaus, y_rel) # for plotting
w_gaus = np.append(w_gaus, w_fit) # for plotting
if debug_foci: print('x', xloc, x_rel, x_fit, 'y', yloc, y_rel, y_fit, 'w', sig, radius, w_fit, 'h', np.sum(gfit_area), np.sum(gfit_area_fixed), peak_fit)
# calculate distance of foci from middle of cell (scikit image)
if orientation < 0:
orientation = np.pi+orientation
disp_y = (y_rel-centroid[0])*np.sin(orientation) - (x_rel-centroid[1])*np.cos(orientation)
disp_x = (y_rel-centroid[0])*np.cos(orientation) + (x_rel-centroid[1])*np.sin(orientation)
# append foci information to the list
disp_l = np.append(disp_l, disp_y)
disp_w = np.append(disp_w, disp_x)
foci_h = np.append(foci_h, np.sum(gfit_area_fixed))
# foci_h = np.append(foci_h, peak_fit)
else:
if debug_foci:
print ('Blob not in bounding box.')
# draw foci on image for quality control
if debug_foci:
outputdir = os.path.join(params['ana_dir'], 'debug_foci')
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
# print(np.min(gfit_area), np.max(gfit_area), gfit_median, avg_int, peak)
# processing of image
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(1,5,1)
plt.title('fluor image')
plt.imshow(img_foci, interpolation='nearest', cmap='gray')
ax = fig.add_subplot(1,5,2)
ax.set_title('segmented image')
ax.imshow(img, interpolation='nearest', cmap='gray')
ax = fig.add_subplot(1,5,3)
ax.set_title('DoG blobs')
ax.imshow(img_foci, interpolation='nearest', cmap='gray')
# add circles for where the blobs are
for i, spot in enumerate(x_blob):
foci_center = Ellipse([x_blob[i], y_blob[i]], r_blob[i], r_blob[i],
color=(1.0, 1.0, 0), linewidth=2, fill=False, alpha=0.5)
ax.add_patch(foci_center)
# show the shape of the gaussian for recorded foci
ax = fig.add_subplot(1,5,4)
ax.set_title('final foci')
ax.imshow(img_foci, interpolation='nearest', cmap='gray')
# print foci that pass and had gaussians fit
for i, spot in enumerate(x_gaus):
foci_ellipse = Ellipse([x_gaus[i], y_gaus[i]], w_gaus[i], w_gaus[i],
color=(0, 1.0, 0.0), linewidth=2, fill=False, alpha=0.5)
ax.add_patch(foci_ellipse)
ax = fig.add_subplot(1,5,5)
ax.set_title('overlay')
ax.imshow(img, interpolation='nearest', cmap='gray')
# print foci that pass and had gaussians fit
for i, spot in enumerate(x_gaus):
foci_ellipse = Ellipse([x_gaus[i], y_gaus[i]], 3, 3,
color=(1.0, 1.0, 0), linewidth=2, fill=False, alpha=0.5)
ax.add_patch(foci_ellipse)
#plt.show()
filename = 'foci_' + cell.id + '_time{:04d}'.format(t) + '.pdf'
fileout = os.path.join(outputdir,filename)
fig.savefig(fileout, bbox_inches='tight', pad_inches=0)
print (fileout)
plt.close('all')
nblobs = len(blobs)
print ("nblobs = {:d}".format(nblobs))
return disp_l, disp_w, foci_h
# actual worker function for foci detection
def foci_info_unet(foci, Cells, specs, time_table, channel_name='sub_c2'):
'''foci_info_unet operates on cells in which foci have been found using
using Unet.
Parameters
----------
Foci : empty dictionary for Focus objects to be placed into
Cells : dictionary of Cell objects to which foci will be added
specs : dictionary containing information on which fov/peak ids
are to be used, and which are to be excluded from analysis
time_table : dictionary containing information on which time
points correspond to which absolute times in seconds
channel_name : name of fluorescent channel for reading in
fluorescence images for focus quantification
Returns
-------
Updates cell information in Cells in-place.
Cells must have .foci attribute
'''
# iterate over each fov in specs
for fov_id,fov_peaks in specs.items():
# keep cells with this fov_id
fov_cells = filter_cells(Cells, attr='fov', val=fov_id)
# iterate over each peak in fov
for peak_id,peak_value in fov_peaks.items():
# print(fov_id, peak_id)
# keep cells with this peak_id
peak_cells = filter_cells(fov_cells, attr='peak', val=peak_id)
# if peak_id's value is not 1, go to next peak
if peak_value != 1:
continue
print("Analyzing foci in experiment {}, channel {}, fov {}, peak {}.".format(params['experiment_name'], channel_name, fov_id, peak_id))
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel_name)
seg_foci_stack = load_stack(fov_id, peak_id, color='foci_seg_unet')
seg_cell_stack = load_stack(fov_id, peak_id, color='seg_unet')
# loop over each frame
for frame in range(fl_stack.shape[0]):
fl_img = fl_stack[frame, ...]
seg_foci_img = seg_foci_stack[frame, ...]
seg_cell_img = seg_cell_stack[frame, ...]
# if there are no foci in this frame, move to next frame
if np.max(seg_foci_img) == 0:
continue
# if there are no cells in this fov/peak/frame, move to next frame
if np.max(seg_cell_img) == 0:
continue
t = frame+1
frame_cells = filter_cells_containing_val_in_attr(peak_cells, attr='times', val=t)
# loop over focus regions in this frame
focus_regions = measure.regionprops(seg_foci_img)
# compare this frame's foci to prior frame's foci for tracking
if frame > 0:
prior_seg_foci_img = seg_foci_stack[frame-1, ...]
fov_foci = filter_cells(foci,
attr='fov',
val=fov_id)
peak_foci = filter_cells(fov_foci,
attr='peak',
val=peak_id)
prior_frame_foci = filter_cells_containing_val_in_attr(peak_foci, attr='times', val=t-1)
# if there were foci in prior frame, do stuff
if len(prior_frame_foci) > 0:
prior_regions = measure.regionprops(prior_seg_foci_img)
# compare_array is prior_focus_number x this_focus_number
# contains dice indices for each pairwise comparison
# between focus positions
compare_array = np.zeros((np.max(prior_seg_foci_img),
np.max(seg_foci_img)))
# populate the array with dice indices
for prior_focus_idx in range(np.max(prior_seg_foci_img)):
prior_focus_mask = np.zeros(seg_foci_img.shape)
prior_focus_mask[prior_seg_foci_img == (prior_focus_idx + 1)] = 1
# apply gaussian blur with sigma=1 to prior focus mask
sig = 1
gaus_1 = filters.gaussian(prior_focus_mask, sigma=sig)
for this_focus_idx in range(np.max(seg_foci_img)):
this_focus_mask = np.zeros(seg_foci_img.shape)
this_focus_mask[seg_foci_img == (this_focus_idx + 1)] = 1
# apply gaussian blur with sigma=1 to this focus mask
gaus_2 = filters.gaussian(this_focus_mask, sigma=sig)
# multiply the two images and place max into campare_array
product = gaus_1 * gaus_2
compare_array[prior_focus_idx, this_focus_idx] = np.max(product)
# which rows of each column are maximum product of gaussian blurs?
max_inds = np.argmax(compare_array, axis=0)
# because np.argmax returns zero if all rows are equal, we
# need to evaluate if all rows are equal.
# If std_dev is zero, then all were equal,
# and we omit that index from consideration for
# focus tracking.
sd_vals = np.std(compare_array, axis=0)
tracked_inds = np.where(sd_vals > 0)[0]
# if there is an index from a tracked focus, do this
if tracked_inds.size > 0:
for tracked_idx in tracked_inds:
# grab this frame's region belonging to tracked focus
tracked_label = tracked_idx + 1
(tracked_region_idx, tracked_region) = [(_,reg) for _,reg in enumerate(focus_regions) if reg.label == tracked_label][0]
# pop the region from focus_regions
del focus_regions[tracked_region_idx]
# grab prior frame's region belonging to tracked focus
prior_tracked_label = max_inds[tracked_idx] + 1
# prior_tracked_region = [reg for reg in prior_regions if reg.label == prior_tracked_label][0]
# grab the focus for which the prior_tracked_label is in
# any of the labels in the prior focus from the prior time
prior_tracked_foci = filter_foci(
prior_frame_foci,
label=prior_tracked_label,
t = t-1,
debug=False
)
prior_tracked_focus = [val for val in prior_tracked_foci.values()][0]
# determine which cell this focus belongs to
for cell_id,cell in frame_cells.items():
cell_idx = cell.times.index(t)
cell_label = cell.labels[cell_idx]
masked_cell_img = np.zeros(seg_cell_img.shape)
masked_cell_img[seg_cell_img == cell_label] = 1
masked_focus_img = np.zeros(seg_foci_img.shape)
masked_focus_img[seg_foci_img == tracked_region.label] = 1
intersect_img = masked_cell_img + masked_focus_img
pixels_two = len(np.where(intersect_img == 2))
pixels_one = len(np.where(masked_focus_img == 1))
# if over half the focus is within this cell, do the following
if pixels_two/pixels_one >= 0.5:
prior_tracked_focus.grow(
region=tracked_region,
t=t,
seg_img=seg_foci_img,
intensity_image=fl_img,
current_cell=cell
)
# after tracking foci, those that were tracked have been removed from focus_regions list
# now we check if any regions remain in the list
# if there are any remaining, instantiate new foci
if len(focus_regions) > 0:
new_ids = []
for focus_region in focus_regions:
# make the focus_id
new_id = create_focus_id(
region = focus_region,
t = t,
peak = peak_id,
fov = fov_id,
experiment_name = params['experiment_name'])
# populate list for later checking if any are missing
# from foci dictionary's keys
new_ids.append(new_id)
# determine which cell this focus belongs to
for cell_id,cell in frame_cells.items():
cell_idx = cell.times.index(t)
cell_label = cell.labels[cell_idx]
masked_cell_img = np.zeros(seg_cell_img.shape)
masked_cell_img[seg_cell_img == cell_label] = 1
masked_focus_img = np.zeros(seg_foci_img.shape)
masked_focus_img[seg_foci_img == focus_region.label] = 1
intersect_img = masked_cell_img + masked_focus_img
pixels_two = len(np.where(intersect_img == 2))
pixels_one = len(np.where(masked_focus_img == 1))
# if over half the focus is within this cell, do the following
if pixels_two/pixels_one >= 0.5:
# set up the focus
# if no foci in cell, just add this one.
foci[new_id] = Focus(cell = cell,
region = focus_region,
seg_img = seg_foci_img,
intensity_image = fl_img,
t = t)
for new_id in new_ids:
# if new_id is not a key in the foci dictionary,
# that suggests the focus doesn't overlap well
# with any cells in this frame, so we'll relabel
# this frame of seg_foci_stack to zero for that
# focus to avoid trying to track a focus
# that doesn't exist.
if new_id not in foci:
# get label of new_id's region
this_label = int(new_id[-2:])
# set pixels in this frame that match this label to 0
seg_foci_stack[frame, seg_foci_img == this_label] = 0
return
def update_cell_foci(cells, foci):
'''Updates cells' .foci attribute in-place using information
in foci dictionary
'''
for focus_id, focus in foci.items():
for cell in focus.cells:
cell_id = cell.id
cells[cell_id].foci[focus_id] = focus
# finds best fit for 2d gaussian using functin above
def fitgaussian(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit
if params are not provided, they are calculated from the moments
params should be (height, x, y, width_x, width_y)"""
gparams = moments(data) # create guess parameters.
errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) - data)
p, success = leastsq(errorfunction, gparams)
return p
# calculate dice coefficient for two blobs
def dice_coeff_foci(mask_1_f, mask_2_f):
'''Accepts two flattened numpy arrays from
binary masks of two blobs and compares them
using the dice metric.
Returns a single dice score.
'''
intersection = np.sum(mask_1_f * mask_2_f)
score = (2. * intersection) / (np.sum(mask_1_f) + np.sum(mask_2_f))
return score
# returnes a 2D gaussian function
def gaussian(height, center_x, center_y, width):
'''Returns a gaussian function with the given parameters. It is a circular gaussian.
width is 2*sigma x or y
'''
# return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
return lambda x,y: height*np.exp(-(((center_x-x)/width)**2+((center_y-y)/width)**2)/2)
# moments of a 2D gaussian
def moments(data):
'''
Returns (height, x, y, width_x, width_y)
The (circular) gaussian parameters of a 2D distribution by calculating its moments.
width_x and width_y are 2*sigma x and sigma y of the guassian.
'''
total = data.sum()
X, Y = np.indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
width = float(np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum()))
row = data[int(x), :]
# width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
return height, x, y, width
# returns a 1D gaussian function
def gaussian1d(x, height, mean, sigma):
'''
x : data
height : height
mean : center
sigma : RMS width
'''
return height * np.exp(-(x-mean)**2 / (2*sigma**2))
# analyze ring fluroescence.
def ring_analysis(fov_id, peak_id, Cells, ring_plane='c2'):
'''Add information to the Cell objects about the location of the Z ring. Sums the fluorescent channel along the long axis of the cell. This can be plotted directly to give a good idea about the development of the ring. Also fits a gaussian to the profile.
Parameters
----------
fov_id : int
FOV number of the lineage to analyze.
peak_id : int
Peak number of the lineage to analyze.
Cells : dict of Cell objects (from a Lineages dictionary)
Cells should be prefiltered to match fov_id and peak_id.
ring_plane : str
The suffix of the channel to analyze. 'c1', 'c2', 'sub_c2', etc.
Usage
-----
for fov_id, peaks in Lineages.iteritems():
for peak_id, Cells in peaks.iteritems():
mm3.ring_analysis(fov_id, peak_id, Cells, ring_plane='sub_c2')
'''
peak_width_guess = 2
# Load data
ring_stack = load_stack(fov_id, peak_id, color=ring_plane)
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# Load time table to determine first image index.
time_table = load_time_table()
times_all = np.array(np.sort(time_table[fov_id].keys()), np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# initialize ring data arrays for cell
Cell.ring_locs = []
Cell.ring_heights = []
Cell.ring_widths = []
Cell.ring_medians = []
Cell.ring_profiles = []
# loop through each time point for this cell
for n, t in enumerate(Cell.times):
# Make mask of fluorescent channel using segmented image
ring_image_masked = np.copy(ring_stack[t-t0])
ring_image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# Sum along long axis, use the profile_line function from skimage
# Use orientation of cell as calculated from the ellipsoid fit,
# the known length of the cell from the feret diameter,
# and a width that is greater than the cell width.
# find endpoints of line
centroid = Cell.centroids[n]
orientation = Cell.orientations[n]
length = Cell.lengths[n]
width = Cell.widths[n] * 1.25
# give 2 pixel buffer to each end to capture area outside cell.
p1 = (centroid[0] - np.sin(orientation) * (length+4)/2,
centroid[1] - np.cos(orientation) * (length+4)/2)
p2 = (centroid[0] + np.sin(orientation) * (length+4)/2,
centroid[1] + np.cos(orientation) * (length+4)/2)
# ensure old pole is always first point
if p1[0] > p2[0]:
p1, p2 = p2, p1 # python is cool
profile = profile_line(ring_image_masked, p1, p2, linewidth=width,
order=1, mode='constant', cval=0)
profile_indicies = np.arange(len(profile))
# subtract median from profile, using non-zero values for median
profile_median = np.median(profile[np.nonzero(profile)])
profile_sub = profile - profile_median
profile_sub[profile_sub < 0] = 0
# find peak position simply using maximum.
peak_index = np.argmax(profile)
peak_height = profile[peak_index]
peak_height_sub = profile_sub[peak_index]
try:
# Fit gaussian
p_guess = [peak_height_sub, peak_index, peak_width_guess]
popt, pcov = curve_fit(gaussian1d, profile_indicies,
profile_sub, p0=p_guess)
peak_width = popt[2]
except:
# information('Ring gaussian fit failed. {} {} {}'.format(fov_id, peak_id, t))
peak_width = np.float('NaN')
# Add data to cells
Cell.ring_locs.append(peak_index - 3) # minus 3 because we added 2 before and line_profile adds 1.
Cell.ring_heights.append(peak_height)
Cell.ring_widths.append(peak_width)
Cell.ring_medians.append(profile_median)
Cell.ring_profiles.append(profile) # append whole profile
return
# Calculate Y projection intensity of a fluorecent channel per cell
def profile_analysis(fov_id, peak_id, Cells, profile_plane='c2'):
'''Calculate profile of plane along cell and add information to Cell object. Sums the fluorescent channel along the long axis of the cell.
Parameters
----------
fov_id : int
FOV number of the lineage to analyze.
peak_id : int
Peak number of the lineage to analyze.
Cells : dict of Cell objects (from a Lineages dictionary)
Cells should be prefiltered to match fov_id and peak_id.
profile_plane : str
The suffix of the channel to analyze. 'c1', 'c2', 'sub_c2', etc.
Usage
-----
'''
# Load data
fl_stack = load_stack(fov_id, peak_id, color=profile_plane)
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# Load time table to determine first image index.
# load_time_table()
times_all = []
for fov in params['time_table']:
times_all = np.append(times_all, list(params['time_table'][fov].keys()))
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all,np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# initialize ring data arrays for cell
fl_profiles = []
# loop through each time point for this cell
for n, t in enumerate(Cell.times):
# Make mask of fluorescent channel using segmented image
image_masked = np.copy(fl_stack[t-t0])
image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# Sum along long axis, use the profile_line function from skimage
# Use orientation of cell as calculated from the ellipsoid fit,
# the known length of the cell from the feret diameter,
# and a width that is greater than the cell width.
# find endpoints of line
centroid = Cell.centroids[n]
orientation = Cell.orientations[n]
length = Cell.lengths[n]
width = Cell.widths[n] * 1.25
# give 2 pixel buffer to each end to capture area outside cell.
p1 = (centroid[0] - np.sin(orientation) * (length+4)/2,
centroid[1] - np.cos(orientation) * (length+4)/2)
p2 = (centroid[0] + np.sin(orientation) * (length+4)/2,
centroid[1] + np.cos(orientation) * (length+4)/2)
# ensure old pole is always first point
if p1[0] > p2[0]:
p1, p2 = p2, p1 # python is cool
profile = profile_line(image_masked, p1, p2, linewidth=width,
order=1, mode='constant', cval=0)
fl_profiles.append(profile)
# append whole profile, using plane name
setattr(Cell, 'fl_profiles_'+profile_plane, fl_profiles)
return
# Calculate X projection at midcell and quarter position
def x_profile_analysis(fov_id, peak_id, Cells, profile_plane='sub_c2'):
'''Calculate profile of plane along cell and add information to Cell object. Sums the fluorescent channel along the long axis of the cell.
Parameters
----------
fov_id : int
FOV number of the lineage to analyze.
peak_id : int
Peak number of the lineage to analyze.
Cells : dict of Cell objects (from a Lineages dictionary)
Cells should be prefiltered to match fov_id and peak_id.
profile_plane : str
The suffix of the channel to analyze. 'c1', 'c2', 'sub_c2', etc.
'''
# width to sum over in pixels
line_width = 6
# Load data
fl_stack = load_stack(fov_id, peak_id, color=profile_plane)
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# Load time table to determine first image index.
time_table = load_time_table()
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# print(Cell.id)
# initialize data arrays for cell
midcell_fl_profiles = []
midcell_pts = []
quarter_fl_profiles = []
quarter_pts = []
# loop through each time point for this cell
for n, t in enumerate(Cell.times):
# Make mask of fluorescent channel using segmented image
image_masked = np.copy(fl_stack[t-t0])
# image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# Sum along short axis, use the profile_line function from skimage
# Use orientation of cell as calculated from the ellipsoid fit,
# the known length of the cell from the feret diameter,
# and a width that is greater than the cell width.
# find end points for summing
centroid = Cell.centroids[n]
orientation = Cell.orientations[n]
length = Cell.lengths[n]
width = Cell.widths[n]
# midcell
# give 2 pixel buffer to each end to capture area outside cell.
md_p1 = (centroid[0] - np.cos(orientation) * (width+8)/2,
centroid[1] - np.sin(orientation) * (width+8)/2)
md_p2 = (centroid[0] + np.cos(orientation) * (width+8)/2,
centroid[1] + np.sin(orientation) * (width+8)/2)
# ensure lower x point is always first
if md_p1[1] > md_p2[1]:
md_p1, md_p2 = md_p2, md_p1 # python is cool
midcell_pts.append((md_p1, md_p2))
# print(t, centroid, orientation, md_p1, md_p2)
md_profile = profile_line(image_masked, md_p1, md_p2,
linewidth=line_width,
order=1, mode='constant', cval=0)
midcell_fl_profiles.append(md_profile)
# quarter position, want to measure at mother end
if orientation > 0:
yq = centroid[0] - np.sin(orientation) * 0.5 * (length * 0.5)
xq = centroid[1] + np.cos(orientation) * 0.5 * (length * 0.5)
else:
yq = centroid[0] + | np.sin(orientation) | numpy.sin |
from itertools import product
import numpy as np
import pytest
from mrrt import utils
array_modules = [np]
if utils.config.have_cupy:
import cupy
array_modules += [cupy]
@pytest.mark.parametrize(
"dtype, xp",
product(
[np.float32, np.float64, np.complex64, np.complex128], array_modules
),
)
def test_power_method_dtype(dtype, xp):
lam = utils.power_method(xp.eye(16, dtype=dtype), dtype=dtype)
assert abs(lam - 1) < 1e-5
# lam is real and has the same precision
assert lam.dtype is np.ones(1, dtype=dtype).real.dtype
def test_power_method_basic():
lam = utils.power_method(np.eye(4))
assert abs(lam - 1) < 1e-12
b, lam = utils.power_method( | np.eye(4) | numpy.eye |
from . import moog
from .star import Star
import numpy as np
import datetime
import logging
from scipy import interpolate
import os
from .config import *
from .tools import read_csv
from collections import OrderedDict
from bokeh.plotting import *
from bokeh.models import HoverTool
logger = logging.getLogger(__name__)
def get_all(Data, output_file, species_ids=None, reference=None, grid='odfnew',
errors=False):
print('------------------------------------------------------')
print('Initializing ...')
start_time = datetime.datetime.now()
print('- Date and time: '+start_time.strftime('%d-%b-%Y, %H:%M:%S'))
print('- Model atmospheres: '+grid)
print('- Star data: '+Data.star_data_fname)
print('- Line list: '+Data.lines_fname)
if reference:
print('- Reference star: '+reference)
print('------------------------------------------------------')
if reference:
ref = Star(reference)
ref.get_data_from(Data)
if hasattr(ref, 'feh_model'): #####
ref.feh = getattr(ref, 'feh_model') #####
ref.get_model_atmosphere(grid)
else:
ref = None
fout = open(output_file, 'w')
header = 'id'
if species_ids == None:
species_codes = sorted(set(Data.lines['species']))
species_ids = getsp_ids(species_codes)
print('"species_ids" not provided')
print('Lines found for the following species: '+\
','.join(species_ids))
print('')
for species_id in species_ids:
header += ','+species_id+',e_'+species_id+',n_'+species_id
if reference:
header += ',['+species_id+'],e_['+species_id+\
'],n_['+species_id+']'
if errors:
header += ',err_'+species_id
fout.write(header + '\n')
for star_id in Data.star_data['id']:
line = star_id
print('')
print('*'*len(star_id))
print(star_id)
print('*'*len(star_id))
s = Star(star_id)
try:
s.get_data_from(Data)
if hasattr(s, 'feh_model'):
s.feh = getattr(s, 'feh_model')
s.get_model_atmosphere(grid)
except:
print('No data available (1)')
logger.warning('Could not get all the necessary data')
print(len(species_ids))
line += ','*(len(species_ids)*2)
if reference:
line += ','*(len(species_ids)*2)
fout.write(line+'\n')
continue
print('Using [Fe/H] = {0:6.3f} for the model atmosphere'.format(s.feh))
get_one(s, species_ids, ref, errors=errors)
for species_id in species_ids:
print('\n'+species_id+'\n'+'-'*len(species_id))
if not hasattr(s, species_id):
print('No data available (2)')
logger.warning('There are no '+species_id+' abundances '+\
'for this star')
line += ',,,'
if reference:
line += ',,,'
if error:
line += ','
continue
mab = np.mean(getattr(s, species_id)['ab'])
sab = np.std(getattr(s, species_id)['ab'])
nab = len(getattr(s, species_id)['ab'])
print("ABS = {0:6.3f} +/- {1:6.3f} , n = {2:.0f}".\
format(mab, sab, nab))
line += ',{0:.3f},{1:.3f},{2:.0f}'.format(mab, sab, nab)
if reference:
da = getattr(s, species_id)['difab']
da = np.array(da, dtype=np.float) #convert None to np.nan
mda = np.ma.masked_array(da, np.isnan(da))
ndifab = mda.count()
if ndifab > 0:
mdifab = np.mean(mda)
sdifab = np.std(mda)
else:
mdifab = -99.0
sdifab = -99.0
print("DIF = {0:6.3f} +/- {1:6.3f} , n = {2:.0f}".\
format(mdifab, sdifab, ndifab))
line += ',{0:.3f},{1:.3f},{2:.0f}'.\
format(mdifab, sdifab, ndifab)
if errors:
print("ERR = {0:5.3f} (DIF)".\
format(getattr(s, species_id)['err_difab']))
line += ',{0:.3f}'.\
format(getattr(s, species_id)['err_difab'])
else:
mdifab = 0
if errors:
print("ERR = {0:5.3f} (ABS)".\
format(getattr(s, species_id)['err_ab']))
line += ',{0:.3f}'.\
format(getattr(s, species_id)['err_ab'])
print('')
llhd1 = 'Wavelength ABS RES '
llhd2 = '---------- ----- ------'
if reference:
llhd1 += ' DIF RES '
llhd2 += ' ----- -----'
print(llhd1+'\n'+llhd2)
for wi, ab, difab in \
zip(getattr(s, species_id)['ww'],
getattr(s, species_id)['ab'],
getattr(s, species_id)['difab']):
if reference and difab != None:
print("{0:10.4f} {1:6.3f} {2:6.3f} {3:6.3f} {4:6.3f}".\
format(wi, ab, ab-mab, difab, difab-mdifab))
else:
print("{0:10.4f} {1:6.3f} {2:6.3f}".\
format(wi, ab, ab-mab))
fout.write(line+'\n')
fout.close()
print('')
print('------------------------------------------------------')
end_time = datetime.datetime.now()
print('- Date and time: '+end_time.strftime('%d-%b-%Y, %H:%M:%S'))
delta_t = (end_time - start_time).seconds
hours, remainder = divmod(delta_t, 3600)
minutes, seconds = divmod(remainder, 60)
print('- Time elapsed: %sH %sM %sS' % (hours, minutes, seconds))
print('Done!')
print('------------------------------------------------------')
print('')
def get_one(Star, species_ids=None, Ref=object, silent=True, errors=False):
logger.info('Working on: '+Star.name)
if species_ids == None:
species_codes = sorted(set(Star.linelist['species']))
species_ids = getsp_ids(species_codes)
if not silent:
print('"species_ids" not provided')
print('Lines found for the following species: '+\
','.join(species_ids))
print('')
for species_id in species_ids:
species = getsp(species_id)
if not silent:
print("*** Begin "+species_id+":")
if species == None:
logger.warning('Not doing calculations for: '+species_id)
continue
logger.info('Working on: '+species_id)
moog.abfind(Star, species, species_id)
if not hasattr(Star, species_id):
logger.warning('Did not calculate '+species_id+' abundances')
continue
if species_id == 'OI':
if not silent:
print('777 nm oxygen abundances will be NLTE corrected')
ao = []
for wx in [7771.94, 7774.16, 7775.39]:
k = np.where(abs(Star.OI['ww']-wx) < 0.05)
if len(k[0]) == 1:
ao.append(np.mean(Star.OI['ab'][k]))
else:
ao.append(0)
aon = nlte_triplet(Star.teff, Star.logg, Star.feh, ao,
silent=silent)
k= np.where(np.array(ao) > 0)
getattr(Star, species_id)['ab'] = aon[k]
getattr(Star, species_id)['ref'] = None
if hasattr(Ref, 'name'):
logger.info('Diferential analysis: '+Ref.name)
if Star.name == Ref.name:
logger.warning('Reference star object redefined!')
Ref = Star
if not hasattr(Ref, species_id):
logger.info('Calculating reference star abundances: '+Ref.name)
moog.abfind(Ref, species, species_id)
if species_id == 'OI':
if not silent:
print('777 nm oxygen abundances will be NLTE '\
+'corrected (Reference)')
ao = []
for wx in [7771.94, 7774.16, 7775.39]:
k = np.where(abs(Ref.OI['ww']-wx) < 0.05)
if len(k[0]) == 1:
ao.append(np.mean(Ref.OI['ab'][k]))
else:
ao.append(0)
aon = nlte_triplet(Ref.teff, Ref.logg, Ref.feh, ao,
silent=silent)
k= np.where(np.array(ao) > 0)
getattr(Ref, species_id)['ab'] = aon[k]
else:
logger.info('Reference star has '+species_id+\
' abundances computed already: '+Ref.name)
ws = getattr(Star, species_id)['ww']
wr = getattr(Ref, species_id)['ww']
ww = np.intersect1d(ws, wr)
k = [i for i, w in zip(range(len(ws)), ws) if w in ww]
kr = [i for i, w in zip(range(len(wr)), wr) if w in ww]
a = getattr(Star, species_id)['ab'][k] - \
getattr(Ref, species_id)['ab'][kr]
ax, ix = [], 0
for wx in ws:
if wx in ww:
ax.append(a[ix])
ix += 1
else:
ax.append(None)
getattr(Star, species_id)['difab'] = ax
getattr(Star, species_id)['ref'] = Ref.name
if not silent:
ab = getattr(Star, species_id)['ab']
difab = getattr(Star, species_id)['difab']
aa = np.array(ab, dtype=np.float) #convert None to np.nan
maa = np.ma.masked_array(aa, np.isnan(aa))
da = np.array(difab, dtype=np.float) #convert None to np.nan
mda = np.ma.masked_array(da, np.isnan(da))
print("A({0}) = {1:6.3f} +/- {2:5.3f} (# of lines = {3})".\
format(species_id, np.mean(maa), np.std(maa), maa.count()))
if hasattr(Ref, 'name'):
print("[{0}/H] = {1:6.3f} +/- {2:5.3f} (# of lines = {3})".\
format(species_id, np.mean(mda), np.std(mda), mda.count()))
if errors:
error(Star, species_id, Ref=Ref, silent=silent)
if not silent:
print('---' + species_id + ' done')
if not silent and len(species_ids) >= 1:
print('All species completed')
def error(Star_in, species_id, Ref=object, silent=True):
s = Star()
s.__dict__ = Star_in.__dict__.copy()
if not silent:
print('-----------------------------')
print('Error propagation for '+species_id+':')
try:
Ref.model_atmosphere_grid
dab = getattr(Star_in, species_id)['difab']
l2l_sct = np.std(dab)/np.sqrt(max([len(dab),2])-1)
abx = 'difab'
except:
try:
ab = getattr(Star_in, species_id)['ab']
l2l_sct = np.std(ab)/np.sqrt(max([len(ab),2])-1)
abx = 'ab'
except:
logger.error('Must calculate abundances before errors')
return None
if hasattr(s, 'err_teff'):
if s.err_teff > 0:
s.teff += s.err_teff
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
ap = np.mean(getattr(s, species_id)[abx])
s.teff -= 2*s.err_teff
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
am = np.mean(getattr(s, species_id)[abx])
a_teff = abs(ap-am)/2.
s.teff += s.err_teff
else:
a_teff = 0.
else:
a_teff = 0.
if hasattr(s, 'err_logg'):
if s.err_logg > 0:
s.logg += s.err_logg
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
ap = np.mean(getattr(s, species_id)[abx])
s.logg -= 2*s.err_logg
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
am = np.mean(getattr(s, species_id)[abx])
a_logg = abs(ap-am)/2.
s.logg += s.err_logg
else:
a_logg = 0.
else:
a_logg = 0.
if hasattr(s, 'err_feh'):
if s.err_feh > 0:
s.feh += s.err_feh
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
ap = np.mean(getattr(s, species_id)[abx])
s.feh -= 2*s.err_feh
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
am = np.mean(getattr(s, species_id)[abx])
a_feh = abs(ap-am)/2.
s.feh += s.err_feh
else:
a_feh = 0.
else:
a_feh = 0.
if hasattr(s, 'err_vt'):
if s.err_vt > 0:
s.vt += s.err_vt
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
ap = np.mean(getattr(s, species_id)[abx])
s.vt -= 2*s.err_vt
s.get_model_atmosphere(s.model_atmosphere_grid)
get_one(s, [species_id], Ref=Ref)
am = np.mean(getattr(s, species_id)[abx])
a_vt = abs(ap-am)/2.
s.vt += s.err_vt
else:
a_vt = 0.
else:
a_vt = 0.
a_tot = np.sqrt(a_teff**2+a_logg**2+a_feh**2+a_vt**2+l2l_sct**2)
if not silent:
print('Line to line scatter: {0:.3f}'.format(l2l_sct))
print('Error from Teff: {0:.3f}'.format(a_teff))
print('Error from logg: {0:.3f}'.format(a_logg))
print('Error from [Fe/H]: {0:.3f}'.format(a_feh))
print('Error from vt: {0:.3f}'.format(a_vt))
print(' -------')
print('Total abundance error: {0:.3f}'.format(a_tot))
print('-----------------------------')
try:
Ref.model_atmosphere_grid
getattr(Star_in, species_id)['err_difab'] = a_tot
except:
getattr(Star_in, species_id)['err_ab'] = a_tot
sp_map = {
'LiI' : 3.0,
'BeI' : 4.0,
'BeII': 4.1,
'BI' : 5.0,
'CI' : 6.0,
'CH' : 106.0,
'NI' : 7.0,
'OI' : 8.0,
'FI' : 9.0,
'NaI' : 11.0,
'MgI' : 12.0,
'MgII': 12.1,
'AlI' : 13.0,
'SiI' : 14.0,
'PI' : 15.0,
'SI' : 16.0,
'KI' : 19.0,
'CaI' : 20.0,
'ScI' : 21.0,
'ScII': 21.1,
'TiI' : 22.0,
'TiII': 22.1,
'VI' : 23.0,
'CrI' : 24.0,
'CrII': 24.1,
'MnI' : 25.0,
'FeI' : 26.0,
'FeII': 26.1,
'CoI' : 27.0,
'NiI' : 28.0,
'CuI' : 29.0,
'ZnI' : 30.0,
'RbI' : 37.0,
'SrI' : 38.0,
'SrII': 38.1,
'YII' : 39.1,
'ZrII': 40.1,
'BaII': 56.1,
'LaII': 57.1,
'CeII': 58.1,
'NdII': 60.1,
'SmII': 62.1,
'EuII': 63.1,
'DyII': 66.1
}
def getsp(species_id):
try:
species = sp_map[species_id]
except:
logger.warning('species id not recognized: '+species_id)
return None
return species
def getsp_ids(species_list):
species_ids = []
for species_code in species_list:
try:
species_id = [key for key in sp_map if sp_map[key] == species_code][0]
species_ids.append(species_id)
except:
logger.warning('species_code '+str(species_code)+' not found')
return species_ids
def nlte_triplet(teff, logg, feh, ao, silent=True):
if feh >= 0.4:
feh = 0.4
grid = read_csv(os.path.join(OTHER_PATH ,'nlte_triplet.csv'))
t,g,f,dao0,dao1,dao2=[],[],[],[],[],[]
for i in range(640):
rg = range(i*7, i*7+7)
x0 = interpolate.griddata(grid['ao'][rg], grid['dao0'][rg],\
ao[0], method='cubic')
x1 = interpolate.griddata(grid['ao'][rg], grid['dao1'][rg],\
ao[1], method='cubic')
x2 = interpolate.griddata(grid['ao'][rg], grid['dao2'][rg],\
ao[2], method='cubic')
x0, x1, x2 = float(x0), float(x1), float(x2)
t.append(grid['teff'][rg[0]])
g.append(grid['logg'][rg[0]])
f.append(grid['feh'][rg[0]])
dao0.append(x0)
dao1.append(x1)
dao2.append(x2)
t = np.array(t)
g = np.array(g)
f = np.array(f)
dao0 = np.array(dao0)
dao1 = np.array(dao1)
dao2 = | np.array(dao2) | numpy.array |
#!/usr/bin/env python
from __future__ import division, print_function
import os
import re
import sys
import argparse
import cv2
import pickle
import numpy as np
import h5py
import chainer
from chainer.links import caffe
from chainer import cuda
"""
Resize and crop an image to 224x224 (some part of sourcecode from chainer_imagenet_tools/inspect_caffenet.py)
Extract features of an image frame using caffe pretrained model and chainer
"""
def mismatch(error_message):
print('An error occurred in loading a property of model.')
print('Probably there is a mismatch between the versions of Chainer.')
print('Remove the pickle file and try again.')
print(error_message)
sys.exit(1)
def chainer_extract_features(input_folder, batchsize, layer='fc7'):
i = 0
z = xp.zeros((len(frames), 4096), dtype=np.float32)
x_batch = | np.ndarray((batchsize, 3, in_size, in_size), dtype=np.float32) | numpy.ndarray |
import torch
import numpy as np
import matplotlib.pylab as plt
from matplotlib import cm
from time import time
from os.path import join
from Hessian.GAN_hessian_compute import get_full_hessian
from GAN_utils import loadBigGAN
#%%
BGAN = loadBigGAN()
L2dist_col = []
def Hess_hook(module, fea_in, fea_out):
print("hooker on %s"%module.__class__)
ref_feat = fea_out.detach().clone()
ref_feat.requires_grad_(False)
L2dist = torch.pow(fea_out - ref_feat, 2).sum()
L2dist_col.append(L2dist)
return None
#%%
clas_vec = BGAN.embeddings.weight[:,120].detach().clone().unsqueeze(0)
feat = torch.cat((0.7*torch.randn(1, 128).cuda(), clas_vec), dim=1)
feat.requires_grad_(True)
#%%
"""Compute Hessian towards middle layers layer by layer"""
datadir = r"E:\OneDrive - Washington University in St. Louis\HessNetArchit\BigGAN"
L2dist_col = []
torch.cuda.empty_cache()
H1 = BGAN.generator.gen_z.register_forward_hook(Hess_hook)
img = BGAN.generator(feat, 0.7)
H1.remove()
T0 = time()
H00 = get_full_hessian(L2dist_col[0], feat)
eva00, evc00 = np.linalg.eigh(H00)
print("Spent %.2f sec computing" % (time() - T0))
np.savez(join(datadir, "eig_gen_z.npz"), H=H00, eva=eva00, evc=evc00)
plt.plot(np.log10(eva00)[::-1])
plt.title("gen_z Linear Layer Spectra" % ())
plt.xlim([0, len(evc00)])
plt.savefig(join(datadir, "spectrum_gen_z.png" ))
plt.show()
#%%
"""Compute Hessian towards each layer"""
for blocki in range(len(BGAN.generator.layers)):
L2dist_col = []
torch.cuda.empty_cache()
H1 = BGAN.generator.layers[blocki].register_forward_hook(Hess_hook)
img = BGAN.generator(feat, 0.7)
H1.remove()
T0 = time()
H00 = get_full_hessian(L2dist_col[0], feat)
eva00, evc00 = np.linalg.eigh(H00)
print("Spent %.2f sec computing" % (time() - T0))
np.savez(join(datadir, "eig_genBlock%02d.npz"%blocki), H=H00, eva=eva00, evc=evc00)
plt.plot(np.log10(eva00)[::-1])
plt.title("GenBlock %d Spectra" % (blocki,))
plt.xlim([0, len(evc00)])
plt.savefig(join(datadir, "spectrum_genBlock%d.png" % (blocki)))
plt.show()
#%%
plt.figure(figsize=[7,8])
Ln = len(BGAN.generator.layers)
eva00 = np.load(join(datadir, "eig_gen_z.npz"))["eva"] #, H=H00, eva=eva00, evc=evc00)
plt.plot(np.log10(eva00)[::-1], label="gen_z")
for blocki in range(Ln):
eva00 = np.load(join(datadir, "eig_genBlock%02d.npz" % blocki))["eva"]
plt.plot(np.log10(eva00)[::-1], color=cm.jet((blocki+1) / Ln),
label=("GenBlock%02d" % blocki) if blocki!=8 else "SelfAttention")
plt.xlim([0, len(evc00)])
plt.xlabel("eigenvalue rank")
plt.ylabel("log(eig value)")
plt.title("BigGAN Hessian Spectra of Intermediate Layers Compared")
plt.subplots_adjust(top=0.9)
plt.legend()
plt.savefig(join(datadir, "spectrum_all_Layers_cmp.png"))
plt.show()
#%% Normalized
plt.figure(figsize=[7,8])
Ln = len(BGAN.generator.layers)
eva00 = np.load(join(datadir, "eig_gen_z.npz"))["eva"] #, H=H00, eva=eva00, evc=evc00)
plt.plot(np.log10(eva00/eva00.max())[::-1], label="gen_z")
for blocki in range(Ln):
eva00 = np.load(join(datadir, "eig_genBlock%02d.npz" % blocki))["eva"]
plt.plot(np.log10(eva00/eva00.max())[::-1], color=cm.jet((blocki+1) / Ln),
label=("GenBlock%02d" % blocki) if blocki!=8 else "SelfAttention")
plt.xlim([0, len(evc00)])
plt.xlabel("eigenvalue rank")
plt.ylabel("log(eig value/max(eig))")
plt.title("BigGAN Hessian Normalized Spectra of Intermediate Layers Compared")
plt.subplots_adjust(top=0.9)
plt.legend()
plt.savefig(join(datadir, "spectrum_all_Layers_maxnorm.png"))
plt.show()
#%%
"""Loading and analyzing data of Normal GAN"""
datadir = r"E:\OneDrive - Washington University in St. Louis\HessNetArchit\BigGAN"
figdir = r"E:\OneDrive - Washington University in St. Louis\HessNetArchit\BigGAN"
Ln = len(BGAN.generator.layers) # 13
layernames = ["gen_z"] + [("GenBlock%02d" % blocki) if blocki!=8 else "SelfAttention" for blocki in range(Ln)] + [
"Image"]
data = np.load(join(datadir, "eig_gen_z.npz"))
eva_col = [data["eva"]]
evc_col = [data["evc"]]
H_col = [data["H"]]
for blocki in range(Ln):
data = np.load(join(datadir, "eig_genBlock%02d.npz" % blocki))
eva_col.append(data["eva"])
evc_col.append(data["evc"])
H_col.append(data["H"])
realfigdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\BigGAN"
data = np.load(join(realfigdir, "H_avg_1000cls.npz"))
eva_Havg, evc_Havg, Havg = data['eigvals_avg'], data['eigvects_avg'], data['H_avg']
H_col.append(Havg)
eva_col.append(eva_Havg)
evc_col.append(evc_Havg)
#%% plot examples as A applied to B
#%% Compute Relationships between Hessian in different layers
Lnum = len(H_col)
corr_mat_lin = np.zeros((Lnum, Lnum))
corr_mat_log = np.zeros((Lnum, Lnum))
log_reg_slope = np.zeros((Lnum, Lnum))
log_reg_intcp = np.zeros((Lnum, Lnum))
for Li in range(len(H_col)):
evc = evc_col[Li]
eva = eva_col[Li]
for Lj in range(len(H_col)):
H = H_col[Lj]
alphavec = np.diag(evc.T @ H @ evc)
corr_mat_lin[Li, Lj] = np.corrcoef(alphavec, eva)[0,1]
corr_mat_log[Li, Lj] = np.corrcoef(np.log10(alphavec), np.log10(eva))[0,1]
slope, intercept = np.polyfit(np.log10(eva), np.log10(alphavec), 1)
log_reg_slope[Li, Lj] = slope
log_reg_intcp[Li, Lj] = intercept
#%%
fig = plot_layer_mat(corr_mat_lin, layernames=layernames, titstr="Linear Correlation of Amplification in BigGAN")
fig.savefig(join(figdir, "BigGAN_Layer_corr_lin_mat.pdf"))
fig = plot_layer_mat(corr_mat_log, layernames=layernames, titstr="Log scale Correlation of Amplification in BigGAN")
fig.savefig(join(figdir, "BigGAN_Layer_corr_log_mat.pdf"))
fig = plot_layer_mat(log_reg_slope, layernames=layernames, titstr="Log scale Slope of Amplification in BigGAN")
fig.savefig(join(figdir, "BigGAN_Layer_log_reg_slope.pdf"))
fig = plot_layer_mat(log_reg_intcp, layernames=layernames, titstr="Log scale intercept of Amplification in BigGAN")
fig.savefig(join(figdir, "BigGAN_Layer_log_reg_intercept.pdf"))
#%%
savestr = "BigGAN"
colorseq = [cm.jet(Li / (Lnum-1) ) for Li in range(Lnum)] # color for each curve.
for Li in range(Lnum):
alphavec_col = []
evc = evc_col[Li]
eva = eva_col[Li]
plt.figure(figsize=[5,4])
for Lj in range(Lnum):
H = H_col[Lj]
alphavec = np.diag(evc.T @ H @ evc)
alphavec_col.append(alphavec)
plt.plot(np.log10(alphavec[::-1]), label=layernames[Lj], color=colorseq[Lj], lw=2)
plt.xlabel("Rank of eigenvector (layer %d %s)"%(Li, layernames[Li]))
plt.ylabel("Amplification") # (layer %d %s)"%(Lj, layernames[Lj]
plt.legend()
plt.savefig(join(figdir, "%s_Ampl_curv_evc_Layer%d.png"%(savestr, Li)))
plt.savefig(join(figdir, "%s_Ampl_curv_evc_Layer%d.pdf"%(savestr, Li)))
plt.show()
#%%
def plot_layer_mat(layer_mat, layernames=None, titstr="Correlation of Amplification in BigGAN"):
"""Formatting function for ploting Layer by Layer matrix"""
Lnum = layer_mat.shape[0]
fig = plt.figure(figsize=[9, 8])
plt.matshow(layer_mat, fignum=0)
layermat_nan = layer_mat.copy()
np.fill_diagonal(layermat_nan, np.nan)
plt.title("%s across %d layers"
"\nNon-Diagonal mean %.3f median %.3f"%(titstr, Lnum, np.nanmean(layermat_nan), np.nanmedian(layermat_nan)), fontsize=15)
fig.axes[0].tick_params(axis="x", bottom=True, top=False, labelbottom=True, labeltop=False)
if layernames is not None:
plt.yticks(range(Lnum), layernames)
plt.ylim(-0.5, Lnum - 0.5)
plt.xticks(range(Lnum), layernames, rotation=35, rotation_mode='anchor', ha='right')
plt.xlim(-0.5, Lnum - 0.5)
plt.colorbar()
plt.subplots_adjust(top=0.85)
plt.show()
return fig
def compute_plot_layer_corr_mat(eva_col, evc_col, H_col, layernames, titstr="BigGAN", savestr="BigGAN", figdir=""):
Lnum = len(H_col)
corr_mat_lin = np.zeros((Lnum, Lnum))
corr_mat_log = np.zeros((Lnum, Lnum))
log_reg_slope = np.zeros((Lnum, Lnum))
log_reg_intcp = np.zeros((Lnum, Lnum))
for Li in range(len(H_col)):
evc = evc_col[Li]
eva = eva_col[Li]
for Lj in range(len(H_col)):
H = H_col[Lj]
alphavec = np.diag(evc.T @ H @ evc)
log10alphavec = np.log10(alphavec)
log10eva = np.log10(eva)
corr_mat_lin[Li, Lj] = np.corrcoef(alphavec, eva)[0,1]
corr_mat_log[Li, Lj] = np.corrcoef(log10alphavec, log10eva)[0,1]
nanmask = (~np.isnan(log10alphavec)) * (~np.isnan(log10eva))
slope, intercept = np.polyfit(log10eva[nanmask], log10alphavec[nanmask], 1)
log_reg_slope[Li, Lj] = slope
log_reg_intcp[Li, Lj] = intercept
fig1 = plot_layer_mat(corr_mat_lin, layernames=layernames, titstr="Linear Correlation of Amplification in %s"%titstr)
fig1.savefig(join(figdir, "%s_Layer_corr_lin_mat.pdf"%savestr))
fig2 = plot_layer_mat(corr_mat_log, layernames=layernames, titstr="Log scale Correlation of Amplification in %s"%titstr)
fig2.savefig(join(figdir, "%s_Layer_corr_log_mat.pdf"%savestr))
fig3 = plot_layer_mat(log_reg_slope, layernames=layernames, titstr="Log scale Slope of Amplification in %s"%titstr)
fig3.savefig(join(figdir, "%s_Layer_log_reg_slope.pdf"%savestr))
fig4 = plot_layer_mat(log_reg_intcp, layernames=layernames, titstr="Log scale intercept of Amplification in %s"%titstr)
fig4.savefig(join(figdir, "%s_Layer_log_reg_intercept.pdf"%savestr))
return corr_mat_lin, corr_mat_log, log_reg_slope, log_reg_intcp, fig1, fig2, fig3, fig4,
def plot_layer_amplif_curves(eva_col, evc_col, H_col, layernames, savestr="", figdir="",
maxnorm=False):
Lnum = len(evc_col)
colorseq = [cm.jet(Li / (Lnum - 1)) for Li in range(Lnum)] # color for each curve.
for Li in range(Lnum): # source of eigenvector basis
alphavec_col = []
evc = evc_col[Li]
eva = eva_col[Li]
plt.figure(figsize=[5, 4])
for Lj in range(Lnum): # hessian target
H = H_col[Lj]
alphavec = np.diag(evc.T @ H @ evc)
alphavec_col.append(alphavec)
scaler = alphavec[-1] if maxnorm else 1
plt.plot(np.log10(alphavec[::-1] / scaler), label=layernames[Lj], color=colorseq[Lj], lw=2, alpha=0.7)
plt.xlabel("Rank of eigenvector (layer %d %s)" % (Li, layernames[Li]))
plt.ylabel("Amplification (normalize max to 1)" if maxnorm else "Amplification") # (layer %d %s)"%(Lj, layernames[Lj]
plt.legend()
plt.savefig(join(figdir, "%s_Ampl_curv_evc_Layer%d%s.png" % (savestr, Li, "_mxnorm" if maxnorm else "")))
plt.savefig(join(figdir, "%s_Ampl_curv_evc_Layer%d%s.pdf" % (savestr, Li, "_mxnorm" if maxnorm else "")))
plt.show()
def plot_layer_amplif_consistency(eigval_col, eigvec_col, layernames, layeridx=[0,1,-1], titstr="GAN", figdir="",
savelabel=""):
nsamp = len(layeridx)
print("Plot hessian of layers : ", [layernames[idx] for idx in layeridx])
fig = plt.figure(figsize=[10, 10], constrained_layout=False)
spec = fig.add_gridspec(ncols=nsamp, nrows=nsamp, left=0.075, right=0.975, top=0.9, bottom=0.05)
for axi, Li in enumerate(layeridx):
eigval_i, eigvect_i = eigval_col[Li], eigvec_col[Li]
for axj, Lj in enumerate(layeridx):
eigval_j, eigvect_j = eigval_col[Lj], eigvec_col[Lj]
inpr = eigvect_i.T @ eigvect_j
vHv_ij = np.diag((inpr @ np.diag(eigval_j)) @ inpr.T)
ax = fig.add_subplot(spec[axi, axj])
if axi == axj:
ax.hist(np.log10(eigval_i), 20)
else:
ax.scatter( | np.log10(eigval_i) | numpy.log10 |
import builtins
import collections
import collections.abc
import copy
from datetime import datetime, timedelta
import functools
import itertools
import json
import logging
import os
import sys
import threading
from bson.objectid import ObjectId, InvalidId
import cachetools
import entrypoints
import event_model
from dask.array.core import cached_cumsum, normalize_chunks
import numpy
import pymongo
import pymongo.errors
import toolz.itertoolz
import xarray
from tiled.adapters.xarray import DatasetAdapter
from tiled.structures.array import (
ArrayStructure,
ArrayMacroStructure,
BuiltinDtype,
Kind,
StructDtype,
)
from tiled.structures.xarray import (
DataArrayStructure,
DataArrayMacroStructure,
DatasetMacroStructure,
)
from tiled.adapters.mapping import MapAdapter
from tiled.query_registration import QueryTranslationRegistry
from tiled.queries import FullText
from tiled.utils import (
SpecialUsers,
)
from tiled.adapters.utils import (
tree_repr,
IndexersMixin,
)
from tiled.utils import import_object, OneShotCachedMap, UNCHANGED
from .common import BlueskyEventStreamMixin, BlueskyRunMixin, CatalogOfBlueskyRunsMixin
from .queries import (
BlueskyMapAdapter,
RawMongo,
_PartialUID,
_ScanID,
TimeRange,
partial_uid,
scan_id,
time_range,
)
from .server import router
CHUNK_SIZE_LIMIT = os.getenv("DATABROKER_CHUNK_SIZE_LIMIT", "100MB")
MAX_AD_FRAMES_PER_CHUNK = int(os.getenv("DATABROKER_MAX_AD_FRAMES_PER_CHUNK", "10"))
logger = logging.getLogger(__name__)
def _try_descr(field_metadata):
descr = field_metadata.get("dtype_descr")
if descr:
if len(descr) == 1 and descr[0][0] == "":
return None
dtype = StructDtype.from_numpy_dtype(numpy.dtype(descr))
if dtype.max_depth() > 1:
raise RuntimeError(
"We can not yet cope with multiple nested structured dtypes. "
f"{descr}"
)
return dtype
else:
return None
def structure_from_descriptor(descriptor, sub_dict, max_seq_num, unicode_columns=None):
# Build the time coordinate.
time_shape = (max_seq_num - 1,)
time_chunks = normalize_chunks(
("auto",) * len(time_shape),
shape=time_shape,
limit=CHUNK_SIZE_LIMIT,
dtype=FLOAT_DTYPE.to_numpy_dtype(),
)
time_variable = ArrayStructure(
macro=ArrayMacroStructure(
shape=time_shape,
chunks=time_chunks,
dims=["time"],
),
micro=FLOAT_DTYPE,
)
time_data_array = DataArrayStructure(
macro=DataArrayMacroStructure(
variable=time_variable, coords={}, coord_names=[], name="time"
),
micro=None,
)
if unicode_columns is None:
unicode_columns = {}
dim_counter = itertools.count()
data_vars = {}
metadata = {"data_vars": {}, "coords": {"time": {"attrs": {}}}}
for key, field_metadata in descriptor["data_keys"].items():
# if the EventDescriptor doesn't provide names for the
# dimensions (it's optional) use the same default dimension
# names that xarray would.
try:
dims = ["time"] + field_metadata["dims"]
except KeyError:
ndim = len(field_metadata["shape"])
dims = ["time"] + [f"dim_{next(dim_counter)}" for _ in range(ndim)]
attrs = {}
# Record which object (i.e. device) this column is associated with,
# which enables one to find the relevant configuration, if any.
for object_name, keys_ in descriptor.get("object_keys", {}).items():
for item in keys_:
if item == key:
attrs["object"] = object_name
break
units = field_metadata.get("units")
if units:
if isinstance(units, str):
attrs["units_string"] = units
# TODO We may soon add a more structured units type, which
# would likely be a dict here.
if sub_dict == "data":
shape = tuple((max_seq_num - 1, *field_metadata["shape"]))
# if we have a descr, then this is a
dtype = _try_descr(field_metadata)
dt_str = field_metadata.get("dtype_str")
if dtype is not None:
if len(shape) > 2:
raise RuntimeError(
"We do not yet support general structured arrays, only 1D ones."
)
# if we have a detailed string, trust that
elif dt_str is not None:
dtype = BuiltinDtype.from_numpy_dtype(numpy.dtype(dt_str))
# otherwise guess!
else:
dtype = JSON_DTYPE_TO_MACHINE_DATA_TYPE[field_metadata["dtype"]]
if dtype.kind == Kind.unicode:
array = unicode_columns[key]
dtype = BuiltinDtype.from_numpy_dtype(
numpy.dtype(f"<U{array.itemsize // 4}")
)
else:
# assert sub_dict == "timestamps"
shape = tuple((max_seq_num - 1,))
dtype = FLOAT_DTYPE
numpy_dtype = dtype.to_numpy_dtype()
if "chunks" in field_metadata:
# If the Event Descriptor tells us a preferred chunking, use that.
suggested_chunks = field_metadata["chunks"]
elif (0 in shape) or (numpy_dtype.itemsize == 0):
# special case to avoid warning from dask
suggested_chunks = shape
elif len(shape) == 4:
# TEMP: Special-case 4D data in a way that optimzes single-frame
# access of area detector data.
# If we choose 1 that would make single-frame access fast
# but many-frame access too slow.
suggested_chunks = (
min(MAX_AD_FRAMES_PER_CHUNK, shape[0]),
min(MAX_AD_FRAMES_PER_CHUNK, shape[1]),
"auto",
"auto",
)
else:
suggested_chunks = ("auto",) * len(shape)
try:
chunks = normalize_chunks(
suggested_chunks,
shape=shape,
limit=CHUNK_SIZE_LIMIT,
dtype=numpy_dtype,
)
except Exception as err:
raise ValueError(
"Failed to normalize chunks with suggested_chunks. Params: "
f"suggested_chunks={suggested_chunks} "
f"shape={shape} "
f"limit={CHUNK_SIZE_LIMIT} "
f"dtype={numpy_dtype}"
) from err
variable = ArrayStructure(
macro=ArrayMacroStructure(shape=shape, chunks=chunks, dims=dims),
micro=dtype,
)
data_array = DataArrayStructure(
macro=DataArrayMacroStructure(
variable, coords={}, coord_names=["time"], name=key
),
micro=None,
)
data_vars[key] = data_array
metadata["data_vars"][key] = {"attrs": attrs}
return (
DatasetMacroStructure(data_vars=data_vars, coords={"time": time_data_array}),
metadata,
)
class BlueskyRun(MapAdapter, BlueskyRunMixin):
specs = ["BlueskyRun"]
def __init__(
self,
*args,
handler_registry,
transforms,
root_map,
datum_collection,
resource_collection,
**kwargs,
):
super().__init__(*args, **kwargs)
self.transforms = transforms or {}
self.root_map = root_map
self._datum_collection = datum_collection
self._resource_collection = resource_collection
# This is used to create the Filler on first access.
self._init_handler_registry = handler_registry
self._filler = None
self._filler_creation_lock = threading.RLock()
def must_revalidate(self):
return self._metadata["stop"] is not None
@property
def metadata_stale_at(self):
return datetime.utcnow() + timedelta(hours=1)
@property
def entries_stale_at(self):
if self._metadata["stop"] is not None:
return datetime.utcnow() + timedelta(hours=1)
@property
def metadata(self):
"Metadata about this MongoAdapter."
# If there are transforms configured, shadow the 'start' and 'stop' documents
# with transfomed copies.
transformed = {}
if "start" in self.transforms:
transformed["start"] = self.transforms["start"](self._metadata["start"])
if "stop" in self.transforms:
transformed["stop"] = self.transforms["stop"](self._metadata["stop"])
metadata = dict(collections.ChainMap(transformed, self._metadata))
return metadata
@property
def filler(self):
# Often multiple requests prompt this to be created in parallel.
# We need this to be threadsafe.
with self._filler_creation_lock:
if self._filler is None:
filler = event_model.Filler(
handler_registry=self._init_handler_registry,
root_map=self.root_map,
inplace=False,
)
for descriptor in itertools.chain(
*(stream.metadata["descriptors"] for stream in self.values())
):
filler("descriptor", descriptor)
self._filler = filler
return self._filler
@property
def register_handler(self):
return self.filler.register_handler
@property
def deregister_handler(self):
return self.filler.deregister_handler
@property
def handler_registry(self):
return self.filler.handler_registry
def new_variation(self, *args, **kwargs):
return super().new_variation(
*args,
handler_registry=self.handler_registry,
transforms=self.transforms,
root_map=self.root_map,
datum_collection=self._datum_collection,
resource_collection=self._resource_collection,
**kwargs,
)
def get_datum_for_resource(self, resource_uid):
return self._datum_collection.find({"resource": resource_uid}, {"_id": False})
def get_resource(self, uid):
doc = self._resource_collection.find_one({"uid": uid}, {"_id": False})
# Some old resource documents don't have a 'uid' and they are
# referenced by '_id'.
if doc is None:
try:
_id = ObjectId(uid)
except InvalidId:
pass
else:
doc = self._resource_collection.find_one({"_id": _id}, {"_id": False})
doc["uid"] = uid
if doc is None:
raise ValueError(f"Could not find Resource with uid={uid}")
if "resource" in self.transforms:
transformed_doc = self.transforms["resource"](doc)
else:
transformed_doc = doc
return transformed_doc
def lookup_resource_for_datum(self, datum_id):
doc = self._datum_collection.find_one({"datum_id": datum_id})
if doc is None:
raise ValueError(f"Could not find Datum with datum_id={datum_id}")
return doc["resource"]
def single_documents(self, fill):
if fill:
raise NotImplementedError("Only fill=False is implemented.")
external_fields = {} # map descriptor uid to set of external fields
datum_cache = {} # map datum_id to datum document
# Track which Resource and Datum documents we have yielded so far.
resource_uids = set()
datum_ids = set()
# Interleave the documents from the streams in time order.
merged_iter = toolz.itertoolz.merge_sorted(
*(stream.iter_descriptors_and_events() for stream in self.values()),
key=lambda item: item[1]["time"],
)
yield ("start", self.metadata["start"])
for name, doc in merged_iter:
# Insert Datum, Resource as needed, and then yield (name, doc).
if name == "event":
for field in external_fields[doc["descriptor"]]:
datum_id = doc["data"][field]
if datum_ids not in datum_ids:
# We haven't yielded this Datum yet. Look it up, and yield it.
try:
# Check to see if it's been pre-fetched.
datum = datum_cache.pop(datum_id)
except KeyError:
resource_uid = self.lookup_resource_for_datum(datum_id)
if resource_uid not in resource_uids:
# We haven't yielded this Resource yet. Look it up, and yield it.
resource = self.get_resource(resource_uid)
resource_uids.add(resource_uid)
yield ("resource", resource)
# Pre-fetch *all* the Datum documents for this resource in one query.
datum_cache.update(
{
doc["datum_id"]: doc
for doc in self.get_datum_for_resource(
resource_uid
)
}
)
# Now get the Datum we originally were looking for.
datum = datum_cache.pop(datum_id)
datum_ids.add(datum_id)
yield ("datum", datum)
elif name == "descriptor":
# Track which fields ("data keys") hold references to external data.
external_fields[doc["uid"]] = {
key
for key, value in doc["data_keys"].items()
if value.get("external")
}
yield name, doc
stop_doc = self.metadata["stop"]
if stop_doc is not None:
yield ("stop", stop_doc)
def documents(self, fill, size=25):
"""
Yield ``(name, document)`` items from the run.
Batch Event and Datum documents into pages of up to ``size`` rows,
while preserving time-ordering.
"""
yield from batch_documents(self.single_documents(fill=fill), size)
class BlueskyEventStream(MapAdapter, BlueskyEventStreamMixin):
specs = ["BlueskyEventStream"]
def __init__(self, *args, event_collection, cutoff_seq_num, run, **kwargs):
super().__init__(*args, **kwargs)
self._event_collection = event_collection
self._cutoff_seq_num = cutoff_seq_num
self._run = run
@property
def must_revalidate(self):
# The keys in this node are *always* stable.
return False
@property
def metadata_stale_at(self):
if self._run.metadata["stop"] is not None:
return datetime.utcnow() + timedelta(hours=1)
return datetime.utcnow() + timedelta(hours=1)
@property
def entries_stale_at(self):
if self._run.metadata["stop"] is not None:
return datetime.utcnow() + timedelta(hours=1)
@property
def metadata(self):
# If there are transforms configured, shadow the 'descriptor' documents
# with transfomed copies.
transformed = {}
transforms = self._run.transforms
if "descriptor" in transforms:
transformed["descriptors"] = [
transforms["descriptor"](d) for d in self._metadata["descriptors"]
]
metadata = dict(collections.ChainMap(transformed, self._metadata))
return metadata
def new_variation(self, **kwargs):
return super().new_variation(
event_collection=self._event_collection,
cutoff_seq_num=self._cutoff_seq_num,
run=self._run,
**kwargs,
)
def iter_descriptors_and_events(self):
for descriptor in sorted(self.metadata["descriptors"], key=lambda d: d["time"]):
yield ("descriptor", descriptor)
# TODO Grab paginated chunks.
events = list(
self._event_collection.find(
{
"descriptor": descriptor["uid"],
"seq_num": {"$lte": self._cutoff_seq_num},
},
{"_id": False},
sort=[("time", pymongo.ASCENDING)],
)
)
for event in events:
yield ("event", event)
class ArrayFromDocuments:
structure_family = "array"
metadata = {}
def __init__(self, data_array_adapter):
self._data_array_adapter = data_array_adapter
def read(self, slice):
return self._data_array_adapter.read(slice)
def read_block(self, block, slice=None):
return self._data_array_adapter.read_block(block, slice=slice)
def macrostructure(self):
return self._data_array_adapter.macrostructure().variable.macro
def microstructure(self):
return self._data_array_adapter.macrostructure().variable.micro
class TimeArrayFromDocuments:
structure_family = "array"
metadata = {}
def __init__(self, data_array_adapter):
self._data_array_adapter = data_array_adapter
def read(self, slice):
return self._data_array_adapter.read(slice)
def read_block(self, block, slice=None):
return self._data_array_adapter.read_block(None, block, coord="time", slice=slice)
def macrostructure(self):
return self._data_array_adapter.macrostructure().coords["time"].macro
def microstructure(self):
return self._data_array_adapter.macrostructure().coords["time"].micro
class DataArrayFromDocuments:
"""
Represents one column
"""
structure_family = "xarray_data_array"
def __init__(self, dataset_adapter, field):
self._dataset_adapter = dataset_adapter
self._field = field
self.metadata = dataset_adapter.metadata["data_vars"].get(field, {})
def read_block(self, block, slice=None):
return self._dataset_adapter.read_block(self._field, block, slice=slice)
def read(self, slice=None):
da = self._dataset_adapter.read(fields=[self._field])[self._field]
if slice:
da = da[slice]
return da
def __getitem__(self, key):
if key == "variable":
return ArrayFromDocuments(self)
elif key == "coords":
return MapAdapter({"time": TimeArrayFromDocuments(self)})
else:
raise KeyError(key)
def macrostructure(self):
return self._dataset_adapter.macrostructure().data_vars[self._field].macro
def microstructure(self):
return self._dataset_adapter.macrostructure().data_vars[self._field].micro
class DatasetFromDocuments:
"""
An xarray.Dataset from a sub-dict of an Event stream
"""
structure_family = "xarray_dataset"
def __init__(
self,
*,
run,
stream_name,
cutoff_seq_num,
event_descriptors,
event_collection,
root_map,
sub_dict,
metadata=None,
):
self._run = run
self._stream_name = stream_name
self._cutoff_seq_num = cutoff_seq_num
self._event_descriptors = event_descriptors
self._event_collection = event_collection
self._sub_dict = sub_dict
self.root_map = root_map
# metadata should look like
# {
# "stream_name": "...",
# "descriptors": [...],
# "attrs": {...},
# "data_vars": {...},
# "coords": {"time": {}},
# }
# We intentionally do not put the descriptors in attrs (ruins UI)
# but we put the stream_name there.
self.metadata = self._run[
self._stream_name
].metadata.copy() # {"descriptors": [...], "stream_name: "..."}
# Put the stream_name in attrs so it shows up in the xarray repr.
self.metadata["attrs"] = {"stream_name": self.metadata["stream_name"]}
# The `data_keys` in a series of Event Descriptor documents with the same
# `name` MUST be alike, so we can choose one arbitrarily.
# IMPORTANT: Access via self.metadata so that the transforms are applied.
descriptor, *_ = self.metadata["descriptors"]
unicode_columns = {}
if self._sub_dict == "data":
# Collect the keys (column names) that are of unicode data type.
unicode_keys = []
for key, field_metadata in descriptor["data_keys"].items():
if field_metadata["dtype"] == "string":
# Skip this if it has a dtype_str with an itemsize.
dtype_str = field_metadata.get("dtype_str")
if dtype_str is not None:
if numpy.dtype(dtype_str).itemsize != 0:
continue
unicode_keys.append(key)
# Load the all the data for unicode columns to figure out the itemsize.
# We have no other choice, except to *guess* but we'd be in
# trouble if our guess were too small, and we'll waste space
# if our guess is too large.
if unicode_keys:
unicode_columns.update(self._get_columns(unicode_keys, slices=None))
self._macrostructure, metadata = structure_from_descriptor(
descriptor, self._sub_dict, self._cutoff_seq_num, unicode_columns
)
self.metadata.update(metadata) # adds "data_vars" and "coords"
self._data_vars = MapAdapter(
{
field: DataArrayFromDocuments(self, field)
for field in self._macrostructure.data_vars
}
)
self._coords = MapAdapter(
{"time": MapAdapter({"variable": TimeArrayFromDocuments(self)})}
)
def __repr__(self):
return f"<{type(self).__name__}>"
@property
def metadata_stale_at(self):
if self._run.metadata["stop"] is not None:
return datetime.utcnow() + timedelta(hours=1)
return datetime.utcnow() + timedelta(hours=1)
@property
def content_stale_at(self):
if self._run.metadata["stop"] is not None:
return datetime.utcnow() + timedelta(hours=1)
def macrostructure(self):
return self._macrostructure
def microstructure(self):
return None
def read(self, fields=None):
# num_blocks = (range(len(n)) for n in chunks)
# for block in itertools.product(*num_blocks):
structure = self.macrostructure()
data_arrays = {}
if fields is None:
keys = list(structure.data_vars)
else:
keys = fields
columns = self._get_columns(keys, slices=None)
# Build the time coordinate.
time_coord = self._get_time_coord(slice_params=None)
for key, data_array in structure.data_vars.items():
if (fields is not None) and (key not in fields):
continue
variable = structure.data_vars[key].macro.variable
dtype = variable.micro.to_numpy_dtype()
raw_array = columns[key]
if raw_array.dtype != dtype:
logger.warning(
f"{key!r} actually has dtype {raw_array.dtype.str!r} "
f"but was reported as having dtype {dtype.str!r}. "
"It will be converted to the reported type, "
"but this should be fixed by setting 'dtype_str' "
"in the data_key of the EventDescriptor. "
f"RunStart UID: {self._run.metadata['start']['uid']!r}"
)
array = raw_array.astype(dtype)
else:
array = raw_array
data_array = xarray.DataArray(
array,
attrs=self.metadata["data_vars"][key]["attrs"],
dims=variable.macro.dims,
coords={"time": time_coord},
)
data_arrays[key] = data_array
return xarray.Dataset(data_arrays, coords={"time": time_coord})
def __getitem__(self, key):
if key == "data_vars":
return self._data_vars
elif key == "coords":
return self._coords
else:
raise KeyError(key)
def read_block(self, variable, block, coord=None, slice=None):
structure = self.macrostructure()
if coord == "time":
data_structure = structure.coords["time"].macro.variable
chunks = data_structure.macro.chunks
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
slices_for_chunks = [
[builtins.slice(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
(slice_,) = [s[index] for s, index in zip(slices_for_chunks, block)]
return self._get_time_coord(slice_params=(slice_.start, slice_.stop))
elif coord is not None:
# We only have a "time" coordinate. Any other coordinate is invalid.
raise KeyError(coord)
dtype = structure.data_vars[variable].macro.variable.micro.to_numpy_dtype()
data_structure = structure.data_vars[variable].macro.variable
dtype = data_structure.micro.to_numpy_dtype()
chunks = data_structure.macro.chunks
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
slices_for_chunks = [
[builtins.slice(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
slices = [s[index] for s, index in zip(slices_for_chunks, block)]
raw_array = self._get_columns([variable], slices=slices)[variable]
if raw_array.dtype != dtype:
logger.warning(
f"{variable!r} actually has dtype {raw_array.dtype.str!r} "
f"but was reported as having dtype {dtype.str!r}. "
"It will be converted to the reported type, "
"but this should be fixed by setting 'dtype_str' "
"in the data_key of the EventDescriptor. "
f"RunStart UID: {self._run.metadata['start']['uid']!r}"
)
array = raw_array.astype(dtype)
else:
array = raw_array
if slice is not None:
array = array[slice]
return array
@functools.lru_cache(maxsize=1024)
def _get_time_coord(self, slice_params):
if slice_params is None:
min_seq_num = 1
max_seq_num = self._cutoff_seq_num
else:
min_seq_num = 1 + slice_params[0]
max_seq_num = 1 + slice_params[1]
column = []
descriptor_uids = [doc["uid"] for doc in self.metadata["descriptors"]]
def populate_column(min_seq_num, max_seq_num):
cursor = self._event_collection.aggregate(
[
# Select Events for this Descriptor with the appropriate seq_num range.
{
"$match": {
"descriptor": {"$in": descriptor_uids},
# It's important to use a half-open interval here
# so that the boundaries work.
"seq_num": {"$gte": min_seq_num, "$lt": max_seq_num},
},
},
# Include only the fields of interest.
{
"$project": {"descriptor": 1, "seq_num": 1, "time": 1},
},
# Sort by time.
{"$sort": {"time": 1}},
# If seq_num is repeated, take the latest one.
{
"$group": {
"_id": "$seq_num",
"doc": {"$last": "$$ROOT"},
},
},
# Re-sort, now by seq_num which *should* be equivalent to
# sorting by time but could not be in weird cases
# (which I'm not aware have ever occurred) where an NTP sync
# moves system time backward mid-run.
{"$sort": {"doc.seq_num": 1}},
# Extract the column of interest as an array.
{
"$group": {
"_id": "$descriptor",
"column": {"$push": "$doc.time"},
},
},
]
)
(result,) = cursor
column.extend(result["column"])
# Aim for 10 MB pages to stay safely clear the MongoDB's hard limit
# of 16 MB.
TARGET_PAGE_BYTESIZE = 10_000_000
page_size = TARGET_PAGE_BYTESIZE // 8 # estimated row byte size is 8
boundaries = list(range(min_seq_num, 1 + max_seq_num, page_size))
if boundaries[-1] != max_seq_num:
boundaries.append(max_seq_num)
for min_, max_ in zip(boundaries[:-1], boundaries[1:]):
populate_column(min_, max_)
return numpy.array(column)
def _get_columns(self, keys, slices):
if slices is None:
min_seq_num = 1
max_seq_num = self._cutoff_seq_num
else:
slice_ = slices[0]
min_seq_num = 1 + slice_.start
max_seq_num = 1 + slice_.stop
to_stack = self._inner_get_columns(tuple(keys), min_seq_num, max_seq_num)
result = {}
for key, value in to_stack.items():
array = numpy.stack(value)
if slices:
sliced_array = array[(..., *slices[1:])]
else:
sliced_array = array
result[key] = sliced_array
return result
@functools.lru_cache(maxsize=1024)
def _inner_get_columns(self, keys, min_seq_num, max_seq_num):
columns = {key: [] for key in keys}
# IMPORTANT: Access via self.metadata so that transforms are applied.
descriptors = self.metadata["descriptors"]
descriptor_uids = [doc["uid"] for doc in descriptors]
# The `data_keys` in a series of Event Descriptor documents with the
# same `name` MUST be alike, so we can just use the first one.
data_keys = [descriptors[0]["data_keys"][key] for key in keys]
is_externals = ["external" in data_key for data_key in data_keys]
expected_shapes = [tuple(data_key["shape"] or []) for data_key in data_keys]
def populate_columns(keys, min_seq_num, max_seq_num):
# This closes over the local variable columns and appends to its
# contents.
cursor = self._event_collection.aggregate(
[
# Select Events for this Descriptor with the appropriate seq_num range.
{
"$match": {
"descriptor": {"$in": descriptor_uids},
# It's important to use a half-open interval here
# so that the boundaries work.
"seq_num": {"$gte": min_seq_num, "$lt": max_seq_num},
},
},
# Include only the fields of interest.
{
"$project": {
"descriptor": 1,
"seq_num": 1,
**{f"{self._sub_dict}.{key}": 1 for key in keys},
},
},
# Sort by time.
{"$sort": {"time": 1}},
# If seq_num is repeated, take the latest one.
{
"$group": {
"_id": "$seq_num",
"doc": {"$last": "$$ROOT"},
},
},
# Re-sort, now by seq_num which *should* be equivalent to
# sorting by time but could not be in weird cases
# (which I'm not aware have ever occurred) where an NTP sync
# moves system time backward mid-run.
{"$sort": {"doc.seq_num": 1}},
# Extract the column of interest as an array.
{
"$group": {
"_id": "$descriptor",
**{
key: {"$push": f"$doc.{self._sub_dict}.{key}"}
for key in keys
},
},
},
]
)
(result,) = cursor
for key, expected_shape, is_external in zip(
keys, expected_shapes, is_externals
):
if expected_shape and (not is_external):
validated_column = list(
map(
lambda item: _validate_shape(
key, | numpy.asarray(item) | numpy.asarray |
import numpy as np
import pandas as pd
import statsmodels.api as sm
from ..univariate.metrics import t_value
__eps = np.finfo(np.float32).eps
def fit_robust_lm(x: np.ndarray, y: np.ndarray) -> list:
"""
Fits a robust linear model between Numpy vectors `x` and `y`, with an
intercept. Returns a list: [intercept, slope] of the fit. No extra
checking on data consistency is done.
See also:
regression.repeated_median_slope
"""
rlm_model = sm.RLM(
y, np.vstack([np.ones(x.size), x.ravel()]).T, M=sm.robust.norms.HuberT()
)
rlm_results = rlm_model.fit()
return rlm_results.params
def repeated_median_slope(x, y, nowarn=False):
"""
Robust slope calculation.
https://en.wikipedia.org/wiki/Repeated_median_regression
An elegant (simple) method to compute the robust slope between a vector `x` and `y`.
INVESTIGATE: algorithm speed-ups via these articles:
https://link.springer.com/article/10.1007/PL00009190
http://www.sciencedirect.com/science/article/pii/S0020019003003508
"""
# Slope
medians = []
x = x.copy().ravel()
y = y.copy().ravel()
if not (nowarn):
assert len(x) > 2, "More than two samples are required for this function."
assert len(x) == len(y), "Vectors x and y must have the same length."
for i in np.arange(len(x)):
inner_medians = []
for j in np.arange(len(y)):
den = x[j] - x[i]
if j != i and den != 0:
inner_medians.append((y[j] - y[i]) / den)
medians.append(np.nanmedian(inner_medians))
return np.nanmedian(medians)
def simple_robust_regression(
x, y, na_rm=None, conflevel=0.95, nowarn=False, pi_resolution=50
):
"""
x and y: iterables
na_rm: None; no effect for robust regression. Here for consistency with the non-robust case.
nowarn: If True, then no error checking/warnings are issued. The user is committing to do that
themselves ahead of time.
TODO: handle the missing values case still. See the `multiple_linear_regression` function,
especially for residuals: afterwards there are N residuals expected, even if <N points used
during the fitting. Missing values in either x or y will have missing values.
Simple robust regression between an `x` and a `y` using the `repeated_median_slope` method
to calculate the slope. The intercept is the median intercept, when using that slope and the
provided `x` and `y` values.
The rest of the classical output from a regression are based on these robust parameter
estimates.
"""
x_, y_ = x.copy().ravel(), y.copy().ravel()
x, y = x_[~np.isnan(x_) & ~np.isnan(y_)], y_[~np.isnan(x_) & ~np.isnan(y_)]
slope = repeated_median_slope(x, y, nowarn=nowarn)
intercept = np.nanmedian(y - slope * x)
mean_x, mean_y = np.mean(x), np.mean(y)
total_ssq = np.sum(np.power(y - mean_y, 2))
out = {}
out["N"] = min(
x.size - np.count_nonzero(np.isnan(x)), y.size - np.count_nonzero(np.isnan(y))
)
out["intercept"] = intercept
out["coefficients"] = [
slope,
]
out["fitted_values"] = intercept + slope * x
regression_ssq = np.sum(np.power(out["fitted_values"] - mean_y, 2))
out["residuals"] = y - out["fitted_values"]
residual_ssq = np.sum(out["residuals"] * out["residuals"])
# For robust method, calculate this way, since no guarantee both RegSS or RSS are < TSS.
# So ensure this way that the TSS = RegSS + RSS, and R2 is the ratio of RegSS/TSS
# https://learnche.org/pid/least-squares-modelling/least-squares-model-analysis
total_ssq = regression_ssq + residual_ssq
out["R2"] = regression_ssq / total_ssq
out["SE"] = np.sqrt(residual_ssq / (len(x) - 2))
out["x_ssq"] = np.sum(np.power(x - np.mean(x), 2))
c_t = t_value(1 - (1 - conflevel) / 2, out["N"] - 2) # 2 fitted parameters
# out["t_value"] = np.array([c_t]) # for consistency with other regression models.
# "pi" = prediction interval
pi_range = np.linspace(np.min(x), | np.max(x) | numpy.max |
from .basetrain import BaseTrain
import numpy as np
import os
from sklearn.model_selection import train_test_split
# utilitiy libs
import ntpath
import json
import pickle
import keras
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.callbacks import Callback
from keras.preprocessing.image import ImageDataGenerator
import sklearn.utils
# metrics for postprocessing of the results
import sklearn
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, \
recall_score, classification_report, \
f1_score, roc_auc_score
import pprint
def preprocess_imgwithnoise(image_tensor):
# preprocessing_function: function that will be implied on each input.
# The function will run before any other modification on it.
# The function should take one argument:
# one image (Numpy tensor with rank 3),
# and should output a Numpy tensor with the same shape.
assert image_tensor.shape[0] == image_tensor.shape[1]
stdmult = 0.1
imsize = image_tensor.shape[0]
numchans = image_tensor.shape[2]
for i in range(numchans):
feat = image_tensor[..., i]
image_tensor[..., i] = image_tensor[..., i] + np.random.normal(
scale=stdmult*np.std(feat), size=feat.size).reshape(imsize, imsize)
return image_tensor
class TestCallback(Callback):
def __init__(self):
# self.test_data = test_data
self.aucs = []
def on_epoch_end(self, epoch, logs={}):
# x, y = self.test_data
# x = self.model.validation_data[0]
# y = self.model.validation_data[1]
x = self.validation_data[0]
y = self.validation_data[1]
loss, acc = self.model.evaluate(x, y, verbose=0)
print('\nTesting loss: {}, acc: {}\n'.format(loss, acc))
predicted = self.model.predict(x)
self.aucs.append(roc_auc_score(y, predicted))
predicted = self.model.predict_classes(x)
ytrue = np.argmax(y, axis=1)
print('Mean accuracy score: ', accuracy_score(ytrue, predicted))
print('F1 score:', f1_score(ytrue, predicted))
print('Recall:', recall_score(ytrue, predicted))
print('Precision:', precision_score(ytrue, predicted))
print('\n clasification report:\n',
classification_report(ytrue, predicted))
print('\n confusion matrix:\n', confusion_matrix(ytrue, predicted))
class TrainCNN(BaseTrain):
def __init__(self, dnnmodel, batch_size, NUM_EPOCHS, AUGMENT):
self.dnnmodel = dnnmodel
self.batch_size = batch_size
self.NUM_EPOCHS = NUM_EPOCHS
self.AUGMENT = AUGMENT
def saveoutput(self, modelname, outputdatadir):
modeljsonfile = os.path.join(outputdatadir, modelname + "_model.json")
historyfile = os.path.join(
outputdatadir, modelname + '_history' + '.pkl')
finalweightsfile = os.path.join(
outputdatadir, modelname + '_final_weights' + '.h5')
# save model
if not os.path.exists(modeljsonfile):
# serialize model to JSON
model_json = self.dnnmodel.to_json()
with open(modeljsonfile, "w") as json_file:
json_file.write(model_json)
print("Saved model to disk")
# save history
with open(historyfile, 'wb') as file_pi:
pickle.dump(self.HH.history, file_pi)
# save final weights
self.dnnmodel.save(finalweightsfile)
def summaryinfo(self):
summary = {
'batch_size': self.batch_size,
'epochs': self.NUM_EPOCHS,
'augment': self.AUGMENT,
'class_weight': self.class_weight
}
pprint.pprint(summary)
def configure(self, tempdatadir):
# initialize loss function, SGD optimizer and metrics
loss = 'binary_crossentropy'
optimizer = Adam(lr=1e-5,
beta_1=0.9,
beta_2=0.99,
epsilon=1e-08,
decay=0.0)
metrics = ['accuracy']
self.modelconfig = self.dnnmodel.compile(loss=loss,
optimizer=optimizer,
metrics=metrics)
tempfilepath = os.path.join(
tempdatadir, "weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5")
# callbacks availabble
checkpoint = ModelCheckpoint(tempfilepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=10, min_lr=1e-8)
testcheck = TestCallback()
self.callbacks = [checkpoint, testcheck]
def loaddirs(self, traindatadir, testdatadir, listofpats_train, listofpats_test):
''' Get list of file paths '''
self.testfilepaths = []
for root, dirs, files in os.walk(testdatadir):
for file in files:
if any(pat in file for pat in listofpats_test):
self.testfilepaths.append(os.path.join(root, file))
self.testfilepaths.append(os.path.join(root, file))
''' Get list of file paths '''
self.filepaths = []
for root, dirs, files in os.walk(traindatadir):
for file in files:
if any(pat in file for pat in listofpats_train):
self.filepaths.append(os.path.join(root, file))
# AUGMENTATION OF DATA WITH REAL add data from the real data
listofpats_train.remove(listofpats_test[0])
for root, dirs, files in os.walk(testdatadir):
for file in files:
# if all(pat not in file for pat in listofpats_train):
if any(pat in file for pat in listofpats_train):
self.filepaths.append(os.path.join(root, file))
print('training pats: ', listofpats_train)
print('testing pats: ', listofpats_test)
print("testing data is found in: ", root)
print("training data is found in: ", root)
def _formatdata(self, images):
images = images.swapaxes(1, 3)
# lower sample by casting to 32 bits
images = images.astype("float32")
return images
def loadtestdata(self):
''' LOAD TESTING DATA '''
for idx, datafile in enumerate(self.testfilepaths):
imagedata = np.load(datafile)
image_tensor = imagedata['image_tensor']
metadata = imagedata['metadata'].item()
if idx == 0:
image_tensors = image_tensor
ylabels = metadata['ylabels']
else:
image_tensors = np.append(image_tensors, image_tensor, axis=0)
ylabels = np.append(ylabels, metadata['ylabels'], axis=0)
# load the ylabeled data 1 in 0th position is 0, 1 in 1st position is 1
invert_y = 1 - ylabels
ylabels = np.concatenate((invert_y, ylabels), axis=1)
# format the image tensor correctly
image_tensors = self._formatdata(image_tensors)
self.X_test = image_tensors
self.y_test = ylabels
def loadtrainingdata(self):
''' LOAD TRAINING DATA '''
for idx, datafile in enumerate(self.filepaths):
imagedata = np.load(datafile)
image_tensor = imagedata['image_tensor']
metadata = imagedata['metadata'].item()
if idx == 0:
image_tensors = image_tensor
ylabels = metadata['ylabels']
else:
image_tensors = np.append(image_tensors, image_tensor, axis=0)
ylabels = np.append(ylabels, metadata['ylabels'], axis=0)
# load the ylabeled data 1 in 0th position is 0, 1 in 1st position is 1
invert_y = 1 - ylabels
ylabels = np.concatenate((invert_y, ylabels), axis=1)
# format the data correctly
class_weight = sklearn.utils.compute_class_weight('balanced',
np.unique(
ylabels).astype(int),
np.argmax(ylabels, axis=1))
image_tensors = self._formatdata(image_tensors)
self.X_train = image_tensors
self.y_train = ylabels
self.class_weight = class_weight
def train(self):
self._loadgenerator()
X_train = self.X_train
X_test = self.X_test
y_train = self.y_train
y_test = self.y_test
class_weight = self.class_weight
callbacks = self.callbacks
dnnmodel = self.dnnmodel
print("Training data: ", X_train.shape, y_train.shape)
print("Testing data: ", X_test.shape, y_test.shape)
print("Class weights are: ", class_weight)
test = np.argmax(y_train, axis=1)
print("class imbalance: ", np.sum(test), len(test))
# augment data, or not and then trian the model!
if not self.AUGMENT:
print('Not using data augmentation. Implement Solution still!')
HH = dnnmodel.fit(X_train, y_train,
steps_per_epoch=X_train.shape[0] // self.batch_size,
epochs=self.NUM_EPOCHS,
validation_data=(X_test, y_test),
shuffle=True,
class_weight=class_weight,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# self.generator.fit(X_train)
HH = dnnmodel.fit_generator(self.generator.flow(X_train, y_train, batch_size=self.batch_size),
steps_per_epoch=X_train.shape[0] // self.batch_size,
epochs=self.NUM_EPOCHS,
validation_data=(X_test, y_test),
shuffle=True,
class_weight=class_weight,
callbacks=callbacks, verbose=2)
self.HH = HH
'''
These two functions for directly loading in the test/train datasets
'''
def loadtrainingdata_vars(self, Xmain_train, y_train):
y_train = np.array(y_train)[:,np.newaxis]
# Xmain_train = np.concatenate(Xmain_train, axis=0)
Xmain_train = np.vstack(Xmain_train)[..., np.newaxis]
print(y_train.shape)
print(Xmain_train.shape)
# load the ylabeled data 1 in 0th position is 0, 1 in 1st position is 1
invert_y = 1 - y_train
y_train = np.concatenate((invert_y, y_train), axis=1)
# format the data correctly
class_weight = sklearn.utils.compute_class_weight('balanced',
np.unique(
y_train).astype(int),
np.argmax(y_train, axis=1))
self.X_train = Xmain_train
self.y_train = y_train
self.class_weight = class_weight
def loadtestingdata_vars(self, Xmain_test, y_test):
y_test = np.array(y_test)[:,np.newaxis]
Xmain_test = | np.vstack(Xmain_test) | numpy.vstack |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def render_traj(traj, vf=None, scaler=None):
fig1 = plt.figure(1,figsize=plt.figaspect(0.5))
fig1.clear()
plt.figure(fig1.number)
fig1.set_size_inches(8, 6, forward=True)
gridspec.GridSpec(3,2)
t = np.asarray(traj['t'])
t1 = t[0:-1]
pos = np.asarray(traj['position'])
vel = np.asarray(traj['velocity'])
norm_pos = np.linalg.norm(pos,axis=1)
norm_vel = np.linalg.norm(vel,axis=1)
pos_error = np.asarray(traj['pos_error'])
plt.subplot2grid( (3,2) , (0,0) )
plt.plot(t, pos_error[:,0],'r',label='X')
plt.plot(t, pos_error[:,1],'b',label='Y')
plt.plot(t, pos_error[:,2],'g',label='Z')
plt.legend(bbox_to_anchor=(0., 1.00, 1., .102), loc=3,
ncol=5, mode="expand", borderaxespad=0.)
plt.gca().set_ylabel('Pos Err. (m)')
plt.gca().set_xlabel("Time (s)")
plt.grid(True)
plt.subplot2grid( (3,2) , (0,1) )
tcv = np.asarray(traj['attitude_error'])
plt.plot(t,tcv,'r',label='Att Error')
plt.legend(bbox_to_anchor=(0., 1.00, 1., .102), loc=3,
ncol=5, mode="expand", borderaxespad=0.)
plt.gca().set_xlabel("Time")
plt.gca().set_ylabel('Norm Att Error (rad)')
plt.grid(True)
plt.subplot2grid( (3,2) , (1,0))
plt.plot(t,vel[:,0],'r',label='X')
plt.plot(t,vel[:,1],'b',label='Y')
plt.plot(t,vel[:,2],'g',label='Z')
plt.plot(t,norm_vel,'k',label='N')
plt.legend(bbox_to_anchor=(0., 1.00, 1., .102), loc=3,
ncol=5, mode="expand", borderaxespad=0.)
plt.gca().set_ylabel('Velocity (m/s)')
plt.gca().set_xlabel("Time (s)")
plt.grid(True)
thrust = | np.asarray(traj['thrust']) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 21:47:22 2020
@author: boucher
"""
import numpy as np
import random
import matplotlib.pyplot as plt
class agent:
def __init__(self, x,y,angle):
self.x = x
self.y = y
self.angle = angle
def get_pos(self):
return([self.x,self.y])
def try_go_forward(self,SS): # get is forward position
x1 = self.x + SS*np.cos(self.angle)
y1 = self.y + SS* | np.sin(self.angle) | numpy.sin |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of pyunicorn.
# Copyright (C) 2008--2019 <NAME> and pyunicorn authors
# URL: <http://www.pik-potsdam.de/members/donges/software>
# License: BSD (3-clause)
#
# Please acknowledge and cite the use of this software and its authors
# when results are used in publications or published elsewhere.
#
# You can use the following reference:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# and <NAME>, "Unified functional network and nonlinear time series analysis
# for complex systems science: The pyunicorn package"
"""
Provides classes for the analysis of dynamical systems and time series based
on recurrence plots, including measures of recurrence quantification
analysis (RQA) and recurrence network analysis.
"""
# array object and fast numerics
import numpy as np
from ..core import InteractingNetworks
from .recurrence_plot import RecurrencePlot
from .cross_recurrence_plot import CrossRecurrencePlot
#
# Class definitions
#
class InterSystemRecurrenceNetwork(InteractingNetworks):
"""
Generating and quantitatively analyzing inter-system recurrence networks.
For a inter-system recurrence network, time series x and y do not need to
have the same length! Formally, nodes are identified with state vectors in
the common phase space of both time series. Hence, the time series need to
have the same number of dimensions and identical physical units.
Undirected links are added to describe recurrences within x and y as well
as cross-recurrences between x and y. Self-loops are excluded in this
undirected network representation.
More information on the theory and applications of inter system recurrence
networks can be found in [Feldhoff2012]_.
**Examples:**
- Create an instance of InterSystemRecurrenceNetwork with fixed
recurrence thresholds and without embedding::
InterSystemRecurrenceNetwork(x, y, threshold=(0.1, 0.2, 0.1))
- Create an instance of InterSystemRecurrenceNetwork at a fixed
recurrence rate and using time delay embedding::
InterSystemRecurrenceNetwork(
x, y, dim=3, tau=(2, 1), recurrence_rate=(0.05, 0.05, 0.02))
"""
#
# Internal methods
#
def __init__(self, x, y, metric="supremum",
normalize=False, silence_level=0, **kwds):
"""
Initialize an instance of InterSystemRecurrenceNetwork (ISRN).
.. note::
For an inter system recurrence network, time series x and y need to
have the same number of dimensions!
Creates an embedding of the given time series x and y, calculates a
inter system recurrence matrix from the embedding and then creates
an InteractingNetwork object from this matrix, interpreting the inter
system recurrence matrix as the adjacency matrix of an undirected
complex network.
Either recurrence thresholds ``threshold`` or
recurrence rates ``recurrence_rate`` have to be given as keyword
arguments.
Embedding is only supported for scalar time series. If embedding
dimension ``dim`` and delay ``tau`` are **both** given as keyword
arguments, embedding is applied. Multidimensional time series are
processed as is by default.
:type x: 2D Numpy array (time, dimension)
:arg x: The time series x to be analyzed, can be scalar or
multi-dimensional.
:type y: 2D Numpy array (time, dimension)
:arg y: The time series y to be analyzed, can be scalar or
multi-dimensional.
:type metric: tuple of string
:arg metric: The metric for measuring distances in phase space
("manhattan", "euclidean", "supremum").
:arg bool normalize: Decide whether to normalize the time series to
zero mean and unit standard deviation.
:arg int silence_level: The inverse level of verbosity of the object.
:arg kwds: Additional options.
:type threshold: tuple of number (three numbers)
:keyword threshold: The recurrence threshold keyword for generating
the recurrence plot using fixed thresholds. Give
for each time series and the cross recurrence plot
separately.
:type recurrence_rate: tuple of number (three numbers)
:keyword recurrence_rate: The recurrence rate keyword for generating
the recurrence plot using a fixed recurrence
rate. Give separately for each time series.
:keyword int dim: The embedding dimension. Must be the same for both
time series.
:type tau: tuple of int
:keyword tau: The embedding delay. Give separately for each time
series.
"""
# Store time series
self.x = x.copy().astype("float32")
"""The time series x."""
self.y = y.copy().astype("float32")
"""The time series y."""
# Reshape time series
self.x.shape = (self.x.shape[0], -1)
self.y.shape = (self.y.shape[0], -1)
# Get embedding dimension and delay from **kwds
dim = kwds.get("dim")
tau = kwds.get("tau")
# Check for consistency
if self.x.shape[1] == self.y.shape[1]:
# Set silence_level
self.silence_level = silence_level
"""The inverse level of verbosity of the object."""
# Get number of nodes in subnetwork x
self.N_x = self.x.shape[0]
"""Number of nodes in subnetwork x."""
# Get number of nodes in subnetwork y
self.N_y = self.y.shape[0]
"""Number of nodes in subnetwork y."""
# Get total number of nodes of ISRN
self.N = self.N_x + self.N_y
"""Total number of nodes of ISRN."""
# Store type of metric
self.metric = metric
"""The metric used for measuring distances in phase space."""
# Normalize time series
if normalize:
RecurrencePlot.normalize_time_series(self.x)
RecurrencePlot.normalize_time_series(self.y)
# Embed time series if required
self.dim = dim
if dim is not None and tau is not None and self.x.shape[1] == 1:
self.x_embedded = \
RecurrencePlot.embed_time_series(self.x, dim, tau[0])
"""The embedded time series x."""
self.y_embedded = \
RecurrencePlot.embed_time_series(self.y, dim, tau[1])
"""The embedded time series y."""
else:
self.x_embedded = self.x
self.y_embedded = self.y
# Get threshold or recurrence rate from **kwds, construct
# ISRN accordingly
threshold = kwds.get("threshold")
recurrence_rate = kwds.get("recurrence_rate")
self.threshold = threshold
if threshold is not None:
# Calculate the ISRN using the radius of neighborhood
# threshold
ISRM = self.set_fixed_threshold(threshold)
elif recurrence_rate is not None:
# Calculate the ISRN using a fixed recurrence rate
ISRM = self.set_fixed_recurrence_rate(recurrence_rate)
else:
raise NameError("Please give either threshold or \
recurrence_rate to construct the joint \
recurrence plot!")
InteractingNetworks.__init__(self, adjacency=ISRM, directed=False,
silence_level=self.silence_level)
# No treatment of missing values yet!
self.missing_values = False
else:
raise ValueError("Both time series x and y need to have the same \
dimension!")
def __str__(self):
"""
Returns a string representation.
"""
return ('InterSystemRecurrenceNetwork: time series shapes %s, %s.\n'
'Embedding dimension %i\nThreshold %s, %s metric.\n%s') % (
self.x.shape, self.y.shape, self.dim if self.dim else 0,
self.threshold, self.metric,
InteractingNetworks.__str__(self))
#
# Service methods
#
def clear_cache(self):
"""
Clean up memory by deleting information that can be recalculated from
basic data.
Extends the clean up methods of the parent classes.
"""
# Call clean up of RecurrencePlot objects
self.rp_x.clear_cache()
self.rp_y.clear_cache()
# Call clean up of CrossRecurrencePlot object
self.crp_xy.clear_cache()
# Call clean up of InteractingNetworks
InteractingNetworks.clear_cache(self)
#
# Methods to handle inter system recurrence networks
#
def inter_system_recurrence_matrix(self):
"""
Return the current inter system recurrence matrix :math:`ISRM`.
:rtype: 2D square Numpy array
:return: the current inter system recurrence matrix :math:`ISRM`.
"""
# Shortcuts
N = self.N
N_x = self.N_x
N_y = self.N_y
# Init
ISRM = np.zeros((N, N))
# Combine to inter system recurrence matrix
ISRM[:N_x, :N_x] = self.rp_x.recurrence_matrix()
ISRM[:N_x, N_x:N] = self.crp_xy.recurrence_matrix()
ISRM[N_x:N, :N_x] = self.crp_xy.recurrence_matrix().transpose()
ISRM[N_x:N, N_x:N] = self.rp_y.recurrence_matrix()
return ISRM
def set_fixed_threshold(self, threshold):
"""
Create a inter system recurrence network at fixed thresholds.
:type threshold: tuple of number (three numbers)
:arg threshold: The three threshold parameters. Give for each
time series and the cross recurrence plot separately.
"""
# Compute recurrence matrices of x and y
self.rp_x = RecurrencePlot(time_series=self.x_embedded,
threshold=threshold[0],
metric=self.metric,
silence_level=self.silence_level)
self.rp_y = RecurrencePlot(time_series=self.y_embedded,
threshold=threshold[1],
metric=self.metric,
silence_level=self.silence_level)
# Compute cross-recurrence matrix of x and y
self.crp_xy = CrossRecurrencePlot(x=self.x_embedded, y=self.y_embedded,
threshold=threshold[2],
metric=self.metric,
silence_level=self.silence_level)
# Get combined ISRM
ISRM = self.inter_system_recurrence_matrix()
# Set diagonal of ISRM to zero to avoid self-loops
ISRM.flat[::self.N + 1] = 0
return ISRM
def set_fixed_recurrence_rate(self, density):
"""
Create a inter system recurrence network at fixed link densities (
recurrence rates).
:type density: tuple of number (three numbers)
:arg density: The three recurrence rate parameters. Give for each
time series and the cross recurrence plot separately.
"""
# Compute recurrence matrices of x and y
self.rp_x = RecurrencePlot(time_series=self.x_embedded,
recurrence_rate=density[0],
metric=self.metric,
silence_level=self.silence_level)
self.rp_y = RecurrencePlot(time_series=self.y_embedded,
recurrence_rate=density[1],
metric=self.metric,
silence_level=self.silence_level)
# Compute cross-recurrence matrix of x and y
self.crp_xy = CrossRecurrencePlot(x=self.x_embedded, y=self.y_embedded,
recurrence_rate=density[2],
metric=self.metric,
silence_level=self.silence_level)
# Get combined ISRM
ISRM = self.inter_system_recurrence_matrix()
# Set diagonal of ISRM to zero to avoid self-loops
ISRM.flat[::self.N + 1] = 0
return ISRM
#
# Methods to quantify inter system recurrence networks
#
def internal_recurrence_rates(self):
"""
Return internal recurrence rates of subnetworks x and y.
:rtype: tuple of number (float)
:return: the internal recurrence rates of subnetworks x and y.
"""
return (self.rp_x.recurrence_rate(),
self.rp_y.recurrence_rate())
def cross_recurrence_rate(self):
"""
Return cross recurrence rate between subnetworks x and y.
:rtype: number (float)
:return: the cross recurrence rate between subnetworks x and y.
"""
return self.crp_xy.cross_recurrence_rate()
def cross_global_clustering_xy(self):
"""
Return cross global clustering of x with respect to y.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross global clustering of x with respect to y.
"""
return self.cross_global_clustering( | np.arange(self.N_x) | numpy.arange |
import math
import pathlib as plib
import time
import click
import matplotlib.pyplot as plt
import numpy as np
from util import comparison_plot, plotting_setup, backend_to_label, naive_interp2d
from pyffs.func import dirichlet_fs
from pyffs.interp import fs_interpn
from pyffs.backend import AVAILABLE_MOD, get_module_name
@click.command()
@click.option("--n_interp", type=int, default=1000)
@click.option("--n_trials", type=int, default=10)
def profile_fs_interp2(n_interp, n_trials):
print(f"\nCOMPARING FS_INTERP WITH {n_trials} TRIALS")
fig_path = plotting_setup(linewidth=3, font_size=20)
# parameters of signal
M = n_interp
T, T_c = math.pi, math.e # M^2 number of samples
N_FS_vals = [11, 31, 101, 301, 1001, 3001, 10001] # N_FS^2 coefficients
# sweep over number of interpolation points
a, b = T_c + (T / 2) * np.r_[-1, 1]
n_std = 0.5
# real_x = {"complex": False, "real": True}
real_x = {"complex": False}
proc_time = dict()
proc_time_std = dict()
for N_FS in N_FS_vals:
print("\nNumber of FS coefficients : {}".format(N_FS))
proc_time[N_FS] = dict()
proc_time_std[N_FS] = dict()
# Loop through modules
for mod in AVAILABLE_MOD:
backend = backend_to_label[get_module_name(mod)]
print("-- module : {}".format(backend))
# compute FS coefficients
diric_FS = mod.outer(
dirichlet_fs(N_FS, T, T_c, mod=mod), dirichlet_fs(N_FS, T, T_c, mod=mod)
).astype("complex64")
# Loop through functions
for _f in real_x:
if len(real_x.keys()) > 1:
_key = "{}_{}".format(_f, backend)
else:
_key = backend
timings = []
fs_interpn(
diric_FS, T=[T, T], a=[a, a], b=[b, b], M=[M, M], real_x=real_x[_f]
) # first time is a bit slow sometimes...
for _ in range(n_trials):
start_time = time.time()
fs_interpn(diric_FS, T=[T, T], a=[a, a], b=[b, b], M=[M, M], real_x=real_x[_f])
timings.append(time.time() - start_time)
proc_time[N_FS][_key] = | np.mean(timings) | numpy.mean |
import js
from RobotRaconteur.Client import *
import importlib_resources
import traceback
import numpy as np
import base64
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
class NewImageTemplateDialog:
def __init__(self, new_name, core, device_manager):
self.vue = None
self.core = core
self.device_manager = device_manager
self.new_name = new_name
self.cropper = None
def init_vue(self, vue):
self.vue = vue
async def do_handle_create(self, img_np, w, h):
try:
var_storage = self.device_manager.get_device_subscription("variable_storage").GetDefaultClient()
compressed_img = RRN.NewStructure("com.robotraconteur.image.CompressedImage",var_storage)
compressed_img_info = RRN.NewStructure("com.robotraconteur.image.ImageInfo",var_storage)
compressed_img_info.width = w
compressed_img_info.height = h
compressed_img_info.encoding = 0x6000
compressed_img.image_info = compressed_img_info
compressed_img.data = img_np
var_consts = RRN.GetConstants('tech.pyri.variable_storage', var_storage)
variable_persistence = var_consts["VariablePersistence"]
variable_protection_level = var_consts["VariableProtectionLevel"]
await var_storage.async_add_variable2("globals", self.new_name ,"com.robotraconteur.image.CompressedImage", \
RR.VarValue(compressed_img,"com.robotraconteur.image.CompressedImage"), ["image_template"], {}, \
variable_persistence["const"], None, variable_protection_level["read_write"], \
[], "Image matching template", False, None)
except:
js.alert(f"Save image template failed:\n\n{traceback.format_exc()}")
def handle_create(self, *args):
try:
cropped_canvas = self.cropper.getCroppedCanvas(js.python_to_js({"imageSmoothingEnabled":False}))
img_b64 = cropped_canvas.toDataURL('image/png')
img_b64 = img_b64.replace("data:image/png;base64,","")
img_bytes = base64.b64decode(img_b64)
img_np = | np.frombuffer(img_bytes, np.uint8) | numpy.frombuffer |
#!/usr/bin/python3
import math
import os
import numpy
import numpy as np
import regex as re
from nltk import WordPunctTokenizer
from tensorflow.keras.utils import Sequence
from tensorflow.python.lib.io.file_io import FileIO
from tqdm import tqdm
from unidecode import unidecode
FLOATX = "float32"
def load_vocab(filename, maxwords=0):
"""
Load newline-separated words from file to dict mapping them to unique ids.
:param maxwords: Max number of words to load. Load all by default.
Returns (list of words, word->id map)
"""
pad = "·" # "<#PAD#>"
vocab = dict()
words = []
counter = 1 # start off with 1 so that embedding matrix's first vector is zero and second is for unknown
words.append(pad)
with FileIO(filename, "r") as f:
for i, line in enumerate(f):
if 0 < maxwords < i + 1:
break
word = line.strip()
words.append(word)
vocab[word] = counter
counter += 1
return words, vocab
def load_embeddings(vocab, dim, filename):
"""
Load a subset of embedding vectors from file corresponding to vocabulary provided.
Args:
vocab: string->int map from words to their ids (id corresponds to vector's row in the resulting embedding
matrix). All ids > 0.
dim: embedding vector dimension
filename: file where each line is a word followed by `dim` floats, all space-separated
Returns:
MxN = (len(vocab)+1) x dim numpy embedding matrix.
The +1 for M is because 0th vector is a zero vector for padding.
"""
em = np.zeros((len(vocab) + 1, dim), dtype="float32")
# with FileIO(filename, "r", encoding="utf-8") as f:
with FileIO(filename, "r") as f:
for linenum, line in enumerate(f):
line = unidecode(line)
idx = line.find(' ')
if idx < 0:
print("malformed line, no space found: line", linenum)
continue
word = line[:idx]
if word not in vocab:
continue
i = vocab[word]
em[i, :] = np.array(line.strip().split()[1:], dtype="float32")
return em
class CustomTokenizer:
def __init__(self, unicode_to_ascii=True, punct_one_token_per_char=True):
self.unicode_to_ascii = unicode_to_ascii
self.punct_one_token_per_char = punct_one_token_per_char
self._re_punct = re.compile("(\p{P})")
self._tokenizer = WordPunctTokenizer()
def tokenize(self, text):
if self.unicode_to_ascii:
text = unidecode(text)
if self.punct_one_token_per_char:
text = re.sub(self._re_punct, "\\1 ", text)
return self._tokenizer.tokenize(text)
class Text2Seq:
def __init__(self, vocab, vocab_is_lowercase=False):
"""
Use toseq() method to convert a string to a sequence of token ids
:param vocab: word->int map
"""
self.vocab = vocab
self.vocab_is_lowercase = vocab_is_lowercase
self.tokenizer = CustomTokenizer(unicode_to_ascii=False)
def toseq(self, text, notfound=0):
"""
Converts a string to a sequence of token ids.
Args:
text:
notfound:
Returns:
seq, unknown. Seq is a list of integers, word indices in the vocab. Unknown is a list of integers,
same number of elements as in seq, 1 if the word is not in the vocab and 0 if it is in the vocab. If a word
is unknown, corresponding value in seq will be 0.
"""
seq = []
aux_bits = []
for word in self.tokenizer.tokenize(text):
id, aux_unknown, aux_uppercase = 0, 1, 0
if self.vocab_is_lowercase:
lower_word = word.lower()
if lower_word != word:
aux_uppercase = 1
word = lower_word
id = self.vocab.get(word, 0)
if id != 0: aux_unknown = 0
seq.append(id)
aux_bits.append([aux_unknown, aux_uppercase])
return seq, aux_bits
def seqwindows(seq, seqlen=256, stride=128, dtype="int32"):
# nseq = int(math.ceil(len(seq) / stride))
nseq = int(math.ceil(max(0, len(seq) - seqlen) / stride)) + 1
X = np.zeros((nseq, seqlen), dtype=dtype)
Y = np.copy(X)
seqa = np.array(seq, dtype=dtype)
for i in range(nseq):
startX = i * stride
endX = min(len(seq), startX + seqlen)
startY = min(len(seq), startX + 1)
endY = min(len(seq), endX + 1)
X[i, 0:endX - startX] = seqa[startX:endX]
Y[i, 0:endY - startY] = seqa[startY:endY]
return X, Y
def recursively_list_files(path, ignore=None):
"""Recursively list files under a directory, excluding filenames containing strings in the `ignore` list."""
if ignore is None:
ignore = ['/.hg', '/.git']
results = []
for root, dirs, files in os.walk(path):
for filename in files:
should_append = True
for ig in ignore:
if root.find(ig) > -1 or filename.find(ig) > -1:
should_append = False
if should_append:
results.append(os.path.join(root, filename))
return results
def load_data_sequences(path, vocab, seqlen, stride, numfiles=0):
XX, YY, XXu, YYu = [], [], [], []
t2s = Text2Seq(vocab)
files = recursively_list_files(path)
for i, fname in enumerate(tqdm(files, ascii=True)):
if 0 < numfiles < (i + 1):
break # Process at most `numfiles` files
with FileIO(fname, "r") as f:
seq, unk = t2s.toseq(f.read())
Xi, Yi = seqwindows(seq, seqlen, stride)
Xui, Yui = seqwindows(unk, seqlen, stride, dtype="float32")
XX.append(Xi)
YY.append(Yi)
XXu.append(Xui)
YYu.append(Yui)
X = np.concatenate(XX)
Y = np.concatenate(YY)
Xu = np.concatenate(XXu)
Yu = np.concatenate(YYu)
return X, Y, Xu, Yu
# def load_data_async(path, vocab, pad=32, numfiles=0, lowercase=False):
# loop = asyncio.get_event_loop()
#
# queue = asyncio.Queue(maxsize=10, loop=loop)
#
# async def reader(queue):
# files = recursively_list_files(path)
# files = tqdm(files, ascii=True, miniters=50)
# for i, fname in enumerate(files):
# if numfiles > 0 and (i + 1) > numfiles:
# break
# async with aiofiles.FileIO(fname, "r", encoding="utf-8") as f:
# text = await f.read()
# await queue.put(text)
# await queue.put(None)
#
# X, Xu = [], []
# t2s = Text2Seq(vocab, vocab_is_lowercase=lowercase)
#
# async def processor(queue):
# while True:
# text = await queue.get()
# if text is None: break
# seq, aux = t2s.toseq(text)
# X.extend(seq)
# Xu.extend(aux)
# X.extend([0] * pad)
# Xu.extend([[0, 0]] * pad)
#
# loop.run_until_complete(asyncio.gather(reader(queue), processor(queue)))
#
# X = np.array(X, dtype="int32")
# Xu = np.array(Xu, dtype="float32")
# return X, Xu
def load_data(path, vocab, pad=32, numfiles=0, lowercase=False):
X, Xu = [], []
t2s = Text2Seq(vocab, vocab_is_lowercase=lowercase)
files = recursively_list_files(path)
for i, fname in enumerate(tqdm(files, ascii=True, mininterval=0.5)):
if 0 < numfiles < (i + 1):
break # Process at most `numfiles` files
# with FileIO(fname, "r", encoding="utf-8") as f:
with FileIO(fname, "r") as f:
text = f.read()
seq, aux = t2s.toseq(text)
X.extend(seq)
Xu.extend(aux)
X.extend([0] * pad)
Xu.extend([[0, 0]] * pad)
X = np.array(X, dtype="int32")
Xu = np.array(Xu, dtype="float32")
return X, Xu
eltypemap = {
int: "int32",
float: "float32"
}
def eltype(a):
t = type(a)
if t != list:
return eltypemap.get(t, None)
if len(a) == 0:
return None
return eltype(a[0])
def pad(a, final_length, left=True):
if type(a) == list:
dtype = eltype(a)
if dtype:
a = np.array(a, dtype=dtype)
else:
raise Exception("a should have int32 or float32 elements")
elif type(a) == numpy.ndarray:
dtype = a.dtype
else:
raise Exception("a should be a list or a numpy.ndarray")
if final_length <= len(a): return a
s = list(a.shape)
s[0] = final_length - len(a)
z = np.zeros(tuple(s), dtype=dtype)
if left:
return np.concatenate([z, a])
else:
return np.concatenate([a, z])
def padleft(a, final_length):
return pad(a, final_length, left=True)
def padright(a, final_length):
return pad(a, final_length, left=False)
class ShiftByOneSequence(Sequence):
def __init__(self, data, seqlen, batch_size):
self.data = data # just an N-sized array of ints
self.seqlen = seqlen
self.batch_size = batch_size
self.len = len(data) - seqlen * batch_size
def __getitem__(self, index):
seq = self.data[index: index + self.seqlen * self.batch_size + 1]
X = np.reshape(seq[:-1], (self.batch_size, self.seqlen))
Y = np.expand_dims(seq[[(i + 1) * self.seqlen for i in range(self.batch_size)]], -1)
return X, Y
def __len__(self):
return self.len
class ShiftByOnePermutedSequence(Sequence):
def __init__(self, data, seqlen, batch_size, permutation_map, dtype="int32"):
"""
Args:
data:
seqlen:
batch_size:
permutation_map: `len(data) - seqlen`-sized list of ints
"""
self.data = data # N x `dim` list/array
self.dim = None
eltype = type(data[0])
if eltype == list:
self.dim = [len(data[0])]
elif eltype == np.ndarray:
self.dim = list(data[0].shape)
self.seqlen = seqlen
self.batch_size = batch_size
self.len = len(data) - seqlen * batch_size
self.permutation_map = permutation_map
self.dtype = dtype
def __getitem__(self, index):
shape_x = [self.batch_size, self.seqlen]
shape_y = [self.batch_size, 1]
if self.dim:
shape_x.extend(self.dim)
shape_y.extend(self.dim)
X = np.zeros(tuple(shape_x), dtype=self.dtype)
Y = np.zeros(tuple(shape_y), dtype=self.dtype)
for i in range(self.batch_size):
j = index + i * self.seqlen
if j > len(self.permutation_map):
print("index", index)
print("i", i)
print("j", j)
print("len pm", len(self.permutation_map))
mapped_index = self.permutation_map[j]
X[i, :] = self.data[mapped_index: mapped_index + self.seqlen]
Y[i, 0] = self.data[mapped_index + self.seqlen]
return X, Y
def __len__(self):
return self.len
class SpecialSequence(Sequence):
def __init__(self, data_x, data_xu, seqlen, batch_size):
assert len(data_x) == len(data_xu)
self.datalen = len(data_x)
self.seqlen = seqlen
self.batch_size = batch_size
self.new_permutation_map()
self.seqX = ShiftByOnePermutedSequence(data_x, seqlen, batch_size, self.permutation_map)
self.seqXu = ShiftByOnePermutedSequence(data_xu, seqlen, batch_size, self.permutation_map)
self.batch_size = batch_size
def new_permutation_map(self):
self.permutation_map = np.random.permutation(self.datalen - self.seqlen)
def __getitem__(self, index):
X, Y = self.seqX[index]
Xu, Yu = self.seqXu[index]
Yfake = np.zeros(Yu.shape, dtype=FLOATX)
return [X, Xu, Y, Yu], [Yfake]
def __len__(self):
return len(self.seqX)
def on_epoch_end(self):
super().on_epoch_end()
self.new_permutation_map()
self.seqX.permutation_map = self.permutation_map
self.seqXu.permutation_map = self.permutation_map
class NegativeSamplingPermutedSequence(Sequence):
"""
Takes a sequence of ints.
Produces batches of (i.e add a batch axis at the start):
X = [seq, is_unknown, sample_indices]
Y = [[1] + [0]*sample_size]
where
sample_size: size of sample including one positive example and `sample_size-1` negative examples.
seq: subsequence of `data` of size `seqlen`
is_unknown: same size as seq, 0/1 values for whether the i-th word in seq is unknown/known. If 1, then
corresponding value in seq should be 0
sample_indices: array of ints of size `sample_size`, indices of words in the sample. First index corresponds to the
positive word in ground truth, the rest to negative. Corresponding values in `Y` will always be e.g
[1,0,0,0,0] for `sample_size==5`.
"""
def __init__(self, data_x, data_xu, seqlen, batch_size, sample_size, vocab_size, permutation_map=None,
new_permutation_map_on_epoch_end=True):
"""
Args:
data_x:
data_xu:
seqlen:
batch_size:
sample_size:
vocab_size: Important: output sample_indices will contain index `vocab_size`, standing for <UNKNOWN>
permutation_map: `(len(data_x) - seqlen)`-sized list of ints
new_permutation_map_on_epoch_end:
"""
self.dataX = data_x
self.dataXu = data_xu
self.seqlen = seqlen
self.batch_size = batch_size
self.sample_size = sample_size
self.vocab_size = vocab_size
self.new_permutation_map_on_epoch_end = new_permutation_map_on_epoch_end
if permutation_map:
self.permutation_map = permutation_map
else:
self.permutation_map = self.gen_permutation_map()
self.seqX = ShiftByOnePermutedSequence(self.dataX, self.seqlen, self.batch_size, self.permutation_map,
dtype="float32")
self.seqXu = ShiftByOnePermutedSequence(self.dataXu, self.seqlen, self.batch_size, self.permutation_map,
dtype="int32")
def gen_permutation_map(self):
return np.random.permutation(len(self.dataX) - self.seqlen)
def on_epoch_end(self):
super().on_epoch_end()
if self.new_permutation_map_on_epoch_end:
print("Making new permutation map!")
self.permutation_map = self.gen_permutation_map()
self.seqX.permutation_map = self.permutation_map
self.seqXu.permutation_map = self.permutation_map
def make_sample_indices2(self, aY, aYu):
sample_indices = np.zeros((self.batch_size, self.sample_size), dtype="int32")
for i in range(aY.shape[0]):
correct_word = aY[i][0]
if aYu[i][0] == 1:
correct_word = self.vocab_size # this artificial index stands for "unknown"
while True:
wrong_words = np.random.randint(self.vocab_size + 1, size=self.sample_size - 1)
if correct_word not in wrong_words:
break
sample_indices[i][0] = correct_word
sample_indices[i][1:] = wrong_words
return sample_indices
def make_sample_indices(self, aY, aYu):
sample_indices = | np.zeros((self.batch_size, self.sample_size, 2), dtype="int32") | numpy.zeros |
"""
Helper functions for altimetry algorithms for captoolkit
"""
import xarray as xr
import h5py
import numpy as np
import pyproj
import xarray as xr
import pandas as pd
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
from scipy import stats
from scipy.ndimage import map_coordinates
from scipy import signal
from scipy.linalg import solve
from affine import Affine
from scipy.interpolate import interp1d
from numba import jit
################################################################################
# Function for iterative weighted least squares #
################################################################################
def lstsq(A, y, w=None, n_iter=None, n_sigma=None):
"""
Iterative weighted least-squares
:param A: design matrix (NxM)
:param y: observations (N)
:param w: weights (N)
:param n_iter: number of iterations
:param n_sigma: outlier threshold (i.e. 3-sigma)
:return: model coefficients
:return: index of outliers
"""
i = 0
if n_sigma is None:
n_iter = 1
if w is not None:
W = np.diag(w)
A = np.dot(W,A)
y = np.dot(y,W)
bad = np.ones(y.shape, dtype=bool)
x = np.ones(len(A.T)) * np.nan
while i <= n_iter:
good = ~np.isnan(y)
x = | np.linalg.lstsq(A[good,:], y[good], rcond=None) | numpy.linalg.lstsq |
"""Contains compressive learning algorithms."""
# Main imports
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.optimize import nnls, minimize
# We rely on the sketching functions
from .sketching import SimpleFeatureMap
##########################
### 1: CL-OMPR for GMM ###
##########################
## The CLOMPR algo itself
def CLOMPR_GMM(sketch,featureMap,K,bounds = None,nIterations = None,bestOfRuns=1,GMMoutputFormat=True,verbose=0):
"""Learns a Gaussian Mixture Model (GMM) from the complex exponential sketch of a dataset ("compressively").
The sketch given in argument is asumed to be of the following form (x_i are examples in R^d):
z = (1/n) * sum_{i = 1}^n exp(j*[Omega*x_i + xi]),
and the Gaussian Mixture to estimate will have a density given by (N is the usual Gaussian density):
P(x) = sum_{k=1}^K alpha_k * N(x;mu_k,Sigma_k) s.t. sum_k alpha_k = 1.
Arguments:
- sketch: (m,)-numpy array of complex reals, the sketch z of the dataset to learn from
- featureMap, the sketch the sketch featureMap (Phi), provided as either:
-- a SimpleFeatureMap object (i.e., complex exponential or universal quantization periodic map)
-- (Omega,xi): tuple with the (d,m) Fourier projection matrix and the (m,) dither (see above)
- K: int > 0, the number of Gaussians in the mixture to estimate
- bounds: (lowb,uppd), tuple of are (d,)-np arrays, lower and upper bounds for the centers of the Gaussians.
By default (if bounds is None), the data is assumed to be normalized in the box [-1,1]^d.
- nIterations: int >= K, maximal number of iterations in CL-OMPR (default = 2*K).
- bestOfRuns: int (default 1). If >1 returns the solution with the smallest residual among that many independent runs.
- GMMoutputFormat: bool (defalut True), if False the output is not as described below but a list of atoms (for debug)
- verbose: 0,1 or 2, amount of information to print (default: 0, no info printed). Useful for debugging.
Returns: a tuple (w,mus,Sigmas) of three numpy arrays:
- alpha: (K,) -numpy array containing the weigths ('mixing coefficients') of the Gaussians
- mus: (K,d) -numpy array containing the means of the Gaussians
- Sigmas: (K,d,d)-numpy array containing the covariance matrices of the Gaussians
"""
## 0) Defining all the tools we need
####################################
## 0.1) Handle input
## 0.1.1) sketch feature function
if isinstance(featureMap,SimpleFeatureMap):
Omega = featureMap.Omega
xi = featureMap.xi
d = featureMap.d
m = featureMap.m
scst = featureMap.c_norm # Sketch normalization constant, e.g. 1/sqrt(m)
elif isinstance(featureMap,tuple):
(Omega,xi) = featureMap
(d,m) = Omega.shape
scst = 1. # This type of argument passing does't support different normalizations
else:
raise ValueError('The featureMap argument does not match one of the supported formats.')
## 0.1.2) nb of iterations
if nIterations is None:
nIterations = 2*K # By default: CLOMP-*R* (repeat twice)
## 0.1.3) Bounds of the optimization problems
if bounds is None:
lowb = -np.ones(d) # by default data is assumed normalized
uppb = +np.ones(d)
if verbose > 0: print("WARNING: data is assumed to be normalized in [-1,+1]^d")
else:
(lowb,uppb) = bounds # Bounds for one Gaussian center
# Format the bounds for the optimization solver
boundstheta = np.array([lowb,uppb]).T.tolist() # bounds for the means
varianceLowerBound = 1e-8
for i in range(d): boundstheta.append([varianceLowerBound,None]) # bounds for the variance
## 0.2) util functions to store the atoms easily
def stacktheta(mu,sigma):
'''Stacks all the elements of one atom (mean and diagonal variance of a Gaussian) into one atom vector'''
return np.append(mu,sigma)
def destacktheta(th):
'''Splits one atom (mean and diagonal variance of a Gaussian) into mean and variance separately'''
mu = th[:d]
sigma = th[-d:]
return (mu,sigma)
def stackTheta(Theta,alpha):
'''Stacks *all* the atoms and their weights into one vector'''
(nbthetas,thetadim) = Theta.shape
p = np.empty((thetadim+1)*nbthetas)
for i in range(nbthetas):
theta_i = Theta[i]
p[i*thetadim:(i+1)*thetadim] = theta_i
p[-nbthetas:] = alpha
return p
def destackTheta(p):
thetadim = 2*d # method-dependent
nbthetas = int(p.shape[0]/(thetadim+1))
Theta = p[:thetadim*nbthetas].reshape(nbthetas,thetadim)
alpha = p[-nbthetas:]
return (Theta,alpha)
## 0.3) sketch of a Gaussian A(P_theta) and its gradient wrt theta
def sketchOfGaussian(mu,Sigma,Omega):
'''returns a m-dimensional complex vector'''
return scst*np.exp(1j*(mu@Omega) -np.einsum('ij,ij->i', np.dot(Omega.T, Sigma), Omega.T)/2.)*np.exp(1j*xi) # black voodoo magic to evaluate om_j^T*Sig*om_j forall j
def gradMuSketchOfGaussian(mu,Sigma,Omega):
'''returns a d-by-m-dimensional complex vector'''
return scst*1j*Omega*sketchOfGaussian(mu,Sigma,Omega)
def gradSigmaSketchOfGaussian(mu,Sigma,Omega):
'''returns a d-by-m-dimensional complex vector'''
return -scst*0.5*(Omega**2)*sketchOfGaussian(mu,Sigma,Omega)
def Apth(th): # computes sketh from one atom th
mu,sig = destacktheta(th)
return sketchOfGaussian(mu,np.diag(sig),Omega)
## 0.4) functions that compute the cost and gradient of the optimization sub-problems
def step1funGrad(th,r):
mu,sig = destacktheta(th)
Sig = np.diag(sig)
Apth = sketchOfGaussian(mu,Sig,Omega)
ApthNrm = np.linalg.norm(Apth)
jacobMu = gradMuSketchOfGaussian(mu,Sig,Omega)
jacobSi = gradSigmaSketchOfGaussian(mu,Sig,Omega)
# To avoid division by zero, trick (doesn't change anything because everything will be zero)
if np.isclose(ApthNrm,0):
if verbose > 1: print('ApthNrm is too small ({}), change it to 1e-5.'.format(ApthNrm))
ApthNrm = 1e-5
fun = -np.real(np.vdot(Apth,r))/ApthNrm # - to have a min problem
#gradMu = -np.real(jacobMu@(np.eye(m) - np.outer(Apth,Apth)/(ApthNrm**2))@np.conj(r))/(ApthNrm)
gradMu = -np.real([email protected](r))/(ApthNrm) + np.real(np.real([email protected]())*([email protected](r))/(ApthNrm**3) )
#gradSi = -np.real(jacobSi@(np.eye(m) - np.outer(Apth,Apth)/(ApthNrm**2))@np.conj(r))/(ApthNrm)
gradSi = -np.real([email protected](r))/(ApthNrm) + np.real(np.real([email protected]())*([email protected](r))/(ApthNrm**3) )
grad = np.append(gradMu,gradSi)
return (fun,grad)
def step5funGrad(p,z):
(Theta,alpha) = destackTheta(p)
(nbthetas,thetadim) = Theta.shape
# Compute atoms
A = np.empty([m,0])
for theta_i in Theta:
Apthi = Apth(theta_i)
A = np.c_[A,Apthi]
r = (z - A@alpha) # to avoid re-computing
# Function
fun = np.linalg.norm(r)**2
# Gradient
grad = np.empty((thetadim+1)*nbthetas)
for i in range(nbthetas):
theta_i = Theta[i]
mu,sig = destacktheta(theta_i)
Sig = np.diag(sig)
jacobMu = gradMuSketchOfGaussian(mu,Sig,Omega)
jacobSi = gradSigmaSketchOfGaussian(mu,Sig,Omega)
grad[i*thetadim:i*thetadim+d] = -2*alpha[i]*np.real([email protected](r)) # for mu
grad[i*thetadim+d:(i+1)*thetadim] = -2*alpha[i]*np.real([email protected](r)) # for sigma
grad[-nbthetas:] = -2*np.real((z - A@alpha)@np.conj(A)) # Gradient of the weights
return (fun,grad)
## THE ACTUAL ALGORITHM
####################################
bestResidualNorm = np.inf
bestTheta = None
bestalpha = None
for iRun in range(bestOfRuns):
## 1) Initialization
r = sketch # residual
Theta = np.empty([0,2*d]) # Theta is a nbAtoms-by-atomDimension (= 2*d) array
## 2) Main optimization
for i in range(nIterations):
## 2.1] Step 1 : find new atom theta most correlated with residual
# Initialize the new atom
mu0 = np.random.uniform(lowb,uppb) # initial mean at random
sig0 = | np.ones(d) | numpy.ones |
import torch.utils.data as data
from PIL import Image
import torch
import numpy as np
import h5py
import json
import pdb
import random
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, decode_txt
from misc.readers import ImageFeaturesHdfReader
from torch.nn.functional import normalize
class train(data.Dataset): # torch wrapper
def __init__(self, input_img_h5, input_imgid, input_ques_h5, input_json, negative_sample, num_val, data_split):
print(('DataLoader loading: %s' % data_split))
print(('Loading image feature from %s' % input_img_h5))
if data_split == 'test':
split = 'val'
else:
split = 'train' # train and val split both corresponding to 'train'
f = json.load(open(input_json, 'r'))
self.itow = f['itow']
self.wtoi = f['wtoi']
self.img_info = f['img_' + split]
# get the data split.
total_num = len(self.img_info)
if data_split == 'train':
s = 0
# e = int((total_num) * 1)
e = int((total_num - num_val) * 1)
# e = 1000
elif data_split == 'val':
s = total_num - num_val
e = total_num
else:
s = 0
e = total_num
self.img_info = self.img_info[s:e]
print(('%s number of data: %d' % (data_split, e - s)))
self.hdf_reader = ImageFeaturesHdfReader(
input_img_h5, False)
self.imgid = json.load(open(input_imgid, 'r'))['imgid'][s:e]
print(('Loading txt from %s' % input_ques_h5))
f = h5py.File(input_ques_h5, 'r')
self.ques = f['ques_' + split][s:e]
self.ans = f['ans_' + split][s:e]
self.cap = f['cap_' + split][s:e]
self.ques_len = f['ques_len_' + split][s:e]
self.ans_len = f['ans_len_' + split][s:e]
self.cap_len = f['cap_len_' + split][s:e]
self.ans_ids = f['ans_index_' + split][s:e]
self.opt_ids = f['opt_' + split][s:e]
self.opt_list = f['opt_list_' + split][:]
self.opt_len = f['opt_len_' + split][:]
f.close()
self.ques_length = self.ques.shape[2]
self.ans_length = self.ans.shape[2]
self.his_length = self.ques_length + self.ans_length
self.vocab_size = len(self.itow)
print(('Vocab Size: %d' % self.vocab_size))
self.split = split
self.rnd = 10
self.negative_sample = negative_sample
def __getitem__(self, index):
# get the image
img_id = self.img_info[index]['imgId']
img = self.hdf_reader[img_id]
img = torch.from_numpy(img)
img = normalize(img, dim=0, p=2)
# get the history
his = np.zeros((self.rnd, self.his_length))
his[0, self.his_length - self.cap_len[index]:] = self.cap[index, :self.cap_len[index]]
ques = np.zeros((self.rnd, self.ques_length))
ans = np.zeros((self.rnd, self.ans_length + 1))
ans_target = np.zeros((self.rnd, self.ans_length + 1))
ques_ori = np.zeros((self.rnd, self.ques_length))
opt_ans = | np.zeros((self.rnd, self.negative_sample, self.ans_length + 1)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 08:44:52 2021
@author: gianni
"""
from scipy import constants,optimize
import numpy as np
import matplotlib.pyplot as plt
import os
from astropy.io import fits
import h5py
this_folder = os.path.dirname(os.path.abspath(__file__))
R_Sun = 6.955e8
L_Sun = 3.828e26
Rydberg_J = constants.physical_constants['Rydberg constant times hc in J'][0] #J
ionisation_potential = {'C':11.26030*constants.eV, 'O':13.61806*constants.eV} #J
class RadiationSpectrum():
def flux(self,wavelength,**kwargs):
#W/m2/m
raise NotImplementedError
class DraineISF(RadiationSpectrum):
#interstellar radiation field, original from Draine (1978),
#here in the form of Lee (1984)
#(https://ui.adsabs.harvard.edu/abs/1984ApJ...282..172L/abstract)
lambda_min = 91.2*constants.nano
lambda_max = 200*constants.nano
lambda_grid = np.linspace(lambda_min,lambda_max,1000)
def __init__(self,scaling=(lambda wavelength: 1)):
self.scaling = scaling
def flux(self,wavelength):
#for the power law, the wavelenght has to be in nm
#photons/m2/s/m:
photon_flux= 3.2e13*((wavelength/constants.nano)**-3\
- 1.61e2*(wavelength/constants.nano)**-4\
+ 6.41e3*(wavelength/constants.nano)**-5)\
* constants.centi**-2*constants.nano**-1
photon_energy = constants.h*constants.c/wavelength
flux = photon_flux*photon_energy
valid_region = (wavelength>=self.lambda_min) & (wavelength<=self.lambda_max)
flux = np.where(valid_region,flux,0)
return flux*self.scaling(wavelength=wavelength)
class HabingField(RadiationSpectrum):
def __init__(self,scaling=(lambda wavelength: 1)):
self.scaling = scaling
data_filepath = os.path.join(this_folder,'habing_field.txt')
data = np.loadtxt(data_filepath)
self.lambda_grid = data[:,0]*constants.nano
photon_energy = constants.h*constants.c/self.lambda_grid
self.flux_grid = data[:,1]/constants.centi**2/constants.nano * photon_energy #W/m2/m
def flux(self,wavelength):
return np.interp(x=wavelength,xp=self.lambda_grid,fp=self.flux_grid,
left=0,right=0) * self.scaling(wavelength=wavelength)
class StellarAtmosphere(RadiationSpectrum):
def plot_model(self,label=None):
fig,ax = plt.subplots()
ax.plot(self.lambda_grid/constants.nano,self.modelflux,'.-',label=label)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('lambda [nm]')
ax.set_ylabel('flux at {:g} au [W/m2/m]'.format(self.ref_distance/constants.au))
if label is not None:
ax.legend(loc='best')
return ax
def flux(self,wavelength,distance):
return np.interp(wavelength,self.lambda_grid,self.modelflux,left=0,right=0)\
* (self.ref_distance/distance)**2
def luminosity(self):
flux_at_ref_distance = self.flux(wavelength=self.lambda_grid,
distance=self.ref_distance)
return np.trapz(flux_at_ref_distance,self.lambda_grid)\
* 4*np.pi*self.ref_distance**2
def _scale_spectrum(self,scaling):
self.modelflux *= scaling(wavelength=self.lambda_grid)
def write_modelflux_to_file(self,filepath,distance):
flux = self.flux(wavelength=self.lambda_grid,distance=distance)
np.savez(filepath,wavelength=self.lambda_grid,flux=flux)
class ATLASModelAtmosphere(StellarAtmosphere):
Teff_low_grid = np.arange(3000,12999,250)
Teff_high_grid = np.arange(13000,50001,1000)
Teff_grid = np.concatenate((Teff_low_grid,Teff_high_grid))
metallicity_grid = | np.array((-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.2,0.5)) | numpy.array |
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Tuple, Type
import numpy as np
import pandas as pd
import scipy.sparse
import lale.type_checking
from lale.helpers import _is_spark_df
from lale.type_checking import JSON_TYPE
try:
import torch
torch_installed = True
except ImportError:
torch_installed = False
try:
import py4j.protocol
import pyspark.sql
spark_installed = True
except ImportError:
spark_installed = False
# See instructions for subclassing numpy ndarray:
# https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
class NDArrayWithSchema(np.ndarray):
def __new__(
cls,
shape,
dtype=float,
buffer=None,
offset=0,
strides=None,
order=None,
json_schema=None,
table_name=None,
):
result = super(NDArrayWithSchema, cls).__new__(
cls, shape, dtype, buffer, offset, strides, order # type: ignore
)
result.json_schema = json_schema
result.table_name = table_name
return result
def __array_finalize__(self, obj):
if obj is None:
return
self.json_schema = getattr(obj, "json_schema", None)
self.table_name = getattr(obj, "table_name", None)
# See instructions for subclassing pandas DataFrame:
# https://pandas.pydata.org/pandas-docs/stable/development/extending.html#extending-subclassing-pandas
class DataFrameWithSchema(pd.DataFrame):
_internal_names = pd.DataFrame._internal_names + ["json_schema", "table_name"]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return DataFrameWithSchema
class SeriesWithSchema(pd.Series):
_internal_names = pd.Series._internal_names + ["json_schema", "table_name"]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return SeriesWithSchema
def add_schema(obj, schema=None, raise_on_failure=False, recalc=False) -> Any:
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return obj
if obj is None:
return None
if isinstance(obj, NDArrayWithSchema):
result = obj
elif isinstance(obj, np.ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj
elif isinstance(obj, pd.Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj
elif isinstance(obj, pd.DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif raise_on_failure:
raise ValueError(f"unexpected type(obj) {type(obj)}")
else:
return obj
if recalc:
setattr(result, "json_schema", None)
if getattr(result, "json_schema", None) is None:
if schema is None:
setattr(result, "json_schema", to_schema(obj))
else:
lale.type_checking.validate_is_schema(schema)
setattr(result, "json_schema", schema)
return result
def add_schema_adjusting_n_rows(obj, schema):
assert isinstance(obj, (np.ndarray, pd.DataFrame, pd.Series)), type(obj)
assert schema.get("type", None) == "array", schema
n_rows = obj.shape[0]
mod_schema = {**schema, "minItems": n_rows, "maxItems": n_rows}
result = add_schema(obj, mod_schema)
return result
def add_table_name(obj, name) -> Any:
if obj is None:
return None
if name is None:
return obj
if spark_installed and isinstance(obj, pyspark.sql.DataFrame):
# alias method documentation: https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.alias.html
# Python class DataFrame with method alias(self, alias): https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with method as(alias: String): https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
return obj.alias(name)
if isinstance(obj, NDArrayWithSchema):
result = obj
elif isinstance(obj, np.ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj
elif isinstance(obj, pd.Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj
elif isinstance(obj, pd.DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif isinstance(
obj, (pd.core.groupby.DataFrameGroupBy, pd.core.groupby.SeriesGroupBy)
):
result = obj
elif spark_installed and isinstance(obj, pyspark.sql.GroupedData):
result = obj
else:
raise ValueError(f"unexpected type(obj) {type(obj)}")
setattr(result, "table_name", name)
return result
def get_table_name(obj):
if spark_installed and isinstance(obj, pyspark.sql.DataFrame):
# Python class DataFrame with field self._jdf: https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with field queryExecution: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
# Scala fields turn into Java nullary methods
# Py4J exposes Java methods as Python methods
# Scala class QueryExecution with field analyzed: LogicalPlan: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
spark_query = obj._jdf.queryExecution().analyzed() # type: ignore
try:
# calling spark_df.explain("extended") shows the analyzed contents
# after spark_df.alias("foo"), analyzed contents should be SubqueryAlias
# Scala class SuqueryAlias with field identifier: https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
# str(..) converts the Java string into a Python string
result = str(spark_query.identifier())
except py4j.protocol.Py4JError:
result = None
return result
if (
isinstance(
obj,
(
NDArrayWithSchema,
SeriesWithSchema,
DataFrameWithSchema,
pd.core.groupby.DataFrameGroupBy,
pd.core.groupby.SeriesGroupBy,
),
)
or (spark_installed and isinstance(obj, pyspark.sql.GroupedData))
):
return getattr(obj, "table_name", None)
return None
def strip_schema(obj):
if isinstance(obj, NDArrayWithSchema):
result = | np.array(obj) | numpy.array |
# Phase harmonic sythesis script using pywph package
import numpy as np
import time
import torch
import scipy.optimize as opt
import pywph as pw
import multiprocessing
print('CPUs"',multiprocessing.cpu_count())
print('GPU count: ' + str(torch.cuda.device_count()) + '\n')
class SoftHistogram(torch.nn.Module):
"""
Motivated by https://discuss.pytorch.org/t/differentiable-torch-histc/25865/3
"""
def __init__(self, bins, min_bin_edge, max_bin_edge, sigma):
super(SoftHistogram, self).__init__()
self.sigma = sigma
self.delta = float(max_bin_edge - min_bin_edge) / float(bins)
self.centers = float(min_bin_edge) + self.delta * (torch.arange(bins).float() + 0.5)
self.centers = torch.nn.Parameter(self.centers, requires_grad=False).to(device)
def forward(self, x):
x = torch.unsqueeze(x, 0) - torch.unsqueeze(self.centers, 1)
x = torch.sigmoid(self.sigma * (x + self.delta/2)) - torch.sigmoid(self.sigma * (x - self.delta/2))
x = x.sum(dim=1)
return x
#######
# INPUT PARAMETERS
#######
M, N = 512, 512
J = 8
L = 8
dn = 2
dj=None # default J - 1
dl=None # default L/2
norm = "auto" # Normalization
pbc = False # Periodic boundary conditions
device = 1
print(torch.cuda.device(device))
optim_params = {"maxiter": 2000, "gtol": 1e-20, "ftol": 1e-20, "maxcor": 20}
p_list = [0, 1, 2, 3, 4, 5, 6]
data = | np.load('./data/sim_QiU_stdr_3.npy') | numpy.load |
"""
A test that validates that automatically optimized approximation to the analytical solution
to the real coverage doesn't deviate from precise analytical solution to any significant degree.
See:
• "efficacy of an arbitrary CI method for proportions - analytical solution.jpg"
• "CI_efficacy_proportion.py".
CImethodForProportion_efficacyToolkit.calculate_coverage_analytically()
• "CI_efficacy_diff_betw_two_proportions.py".
CImethodForDiffBetwTwoProportions_efficacyToolkit.calculate_coverage_analytically()
"""
import time
import numpy as np
from CI_methods_analyser.data_functions import float_to_str
from CI_methods_analyser.CI_efficacy_proportion import CImethodForProportion_efficacyToolkit
from CI_methods_analyser.methods_for_CI_for_proportion import wald_interval, wilson_score_interval
from Tests.plot_difference import plot_relative_difference
"""
Comparing to z_precision=9 as the maximum precision. Why 9?
https://www.wolframalpha.com/input/?i=9+sigmas
9 sigmas two-tailed p-value is 2e-19, which is just 2 times more than
the maximum precision of units in mantissa given by 63 bits for mantissa in np.float128:
it's almost 1 in 1e19, which is sensitivty of 1e-19 per unit.
It means that the values outside 9 sigmas all add up to about 2e-19.
Therefore the individual `y` values of a given binomial distribution outside 9 sigmas
don't exceed 2e-19, and only values of at least 1e-19 can be added to a np.float128 value
of approximately from 0.5 to 0.9999999...
Thus, z_precision of 9 behaves here just like a maximum precision.
"""
print("")
print("===== CI test1 ======")
print("")
start_time = time.time()
proportions = ('0.001', '0.999', '0.003')
CI_test_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_1_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90)
CI_test_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_1b_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90)
CI_test_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_1_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_1b_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_2_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95)
CI_test_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_2b_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95)
CI_test_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_2_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_2b_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_3_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99)
CI_test_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_3b_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99)
CI_test_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_3_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99, z_precision=9)
CI_test_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_3b_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99, z_precision=9)
CI_test_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_4_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90)
CI_test_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_4b_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90)
CI_test_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_4_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_4b_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_5_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95)
CI_test_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_5b_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95)
CI_test_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_5_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_5b_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_6_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99)
CI_test_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_6b_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99)
CI_test_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_6_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_6b_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99, z_precision=9)
print("CI test1 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test2 ======")
print("")
start_time = time.time()
proportions = ('0.00001', '0.00999', '0.00004')
CI_test2_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_1_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90)
CI_test2_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_1b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90)
CI_test2_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_1_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_1b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_2_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95)
CI_test2_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_2b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95)
CI_test2_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_2_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_2b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_3_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test2_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_3b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test2_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_3_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test2_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_3b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test2_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_4_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90)
CI_test2_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_4b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90)
CI_test2_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_4_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_4b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_5_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test2_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_5b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test2_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_5_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_5b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_6_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test2_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_6b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test2_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_6_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test2_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_6b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
print("CI test2 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test3 ======")
print("")
start_time = time.time()
proportions = ('0.000001', '0.000999', '0.000004')
CI_test3_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_1_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test3_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_1b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test3_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_1_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_1b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_2_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995)
CI_test3_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_2b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995)
CI_test3_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_2_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_2b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_3_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999)
CI_test3_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_3b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999)
CI_test3_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_3_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test3_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_3b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test3_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_4_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test3_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_4b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test3_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_4_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_4b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_5_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995)
CI_test3_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_5b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995)
CI_test3_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_5_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_5b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_6_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test3_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_6b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test3_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_6_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test3_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_6b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
print("CI test3 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test4 ======")
print("")
start_time = time.time()
proportions = ('0.000001', '0.000999', '0.000007')
CI_test4_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test4_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test4_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995)
CI_test4_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995)
CI_test4_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999)
CI_test4_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999)
CI_test4_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999, z_precision=9)
CI_test4_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999, z_precision=9)
CI_test4_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test4_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test4_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995)
CI_test4_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995)
CI_test4_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999)
CI_test4_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999)
CI_test4_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999, z_precision=9)
CI_test4_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999, z_precision=9)
print("CI test4 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test5 ======")
print("")
start_time = time.time()
proportions = ('0.0000001', '0.0000199', '0.0000002')
CI_test5_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test5_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test5_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test5_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test5_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test5_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test5_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test5_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test5_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test5_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test5_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test5_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test5_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test5_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test5_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test5_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
print("CI test5 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test6 ======")
print("")
start_time = time.time()
proportions = ('0.0000001', '0.0001999', '0.0000011')
CI_test6_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test6_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test6_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test6_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test6_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test6_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test6_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test6_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test6_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test6_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test6_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test6_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test6_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test6_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test6_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test6_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
print("CI test6 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test7 ======")
print("")
start_time = time.time()
proportions = ('0.0001', '0.1999', '0.0019')
CI_test7_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test7_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test7_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test7_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test7_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test7_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test7_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test7_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test7_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test7_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test7_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test7_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test7_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test7_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test7_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test7_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
print("CI test7 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test8 ======")
print("")
start_time = time.time()
proportions = ('0.0001', '0.1999', '0.0019')
CI_test8_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test8_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test8_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test8_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test8_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test8_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test8_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test8_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test8_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95)
CI_test8_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95)
CI_test8_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99)
CI_test8_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99)
CI_test8_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test8_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test8_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test8_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
print("CI test8 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
test_fig1 = plot_relative_difference(
np.array(CI_test_1_auto.coverage), np.array(CI_test_1_max.coverage),
plt_figure_num="CI test 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_1_auto.f.calculation_inputs()}")
test_fig1b = plot_relative_difference(
np.array(CI_test_1b_auto.coverage), np.array(CI_test_1b_max.coverage),
plt_figure_num="CI test 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_1b_auto.f.calculation_inputs()}")
test_fig2 = plot_relative_difference(
np.array(CI_test_2_auto.coverage), np.array(CI_test_2_max.coverage),
plt_figure_num="CI test 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_2_auto.f.calculation_inputs()}")
test_fig2b = plot_relative_difference(
np.array(CI_test_2b_auto.coverage), np.array(CI_test_2b_max.coverage),
plt_figure_num="CI test 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_2b_auto.f.calculation_inputs()}")
test_fig3 = plot_relative_difference(
np.array(CI_test_3_auto.coverage), np.array(CI_test_3_max.coverage),
plt_figure_num="CI test 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_3_auto.f.calculation_inputs()}")
test_fig3b = plot_relative_difference(
np.array(CI_test_3b_auto.coverage), np.array(CI_test_3b_max.coverage),
plt_figure_num="CI test 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_3b_auto.f.calculation_inputs()}")
test_fig4 = plot_relative_difference(
np.array(CI_test_4_auto.coverage), np.array(CI_test_4_max.coverage),
plt_figure_num="CI test 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_4_auto.f.calculation_inputs()}")
test_fig4b = plot_relative_difference(
np.array(CI_test_4b_auto.coverage), np.array(CI_test_4b_max.coverage),
plt_figure_num="CI test 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_4b_auto.f.calculation_inputs()}")
test_fig5 = plot_relative_difference(
np.array(CI_test_5_auto.coverage), np.array(CI_test_5_max.coverage),
plt_figure_num="CI test 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_5_auto.f.calculation_inputs()}")
test_fig5b = plot_relative_difference(
np.array(CI_test_5b_auto.coverage), np.array(CI_test_5b_max.coverage),
plt_figure_num="CI test 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_5b_auto.f.calculation_inputs()}")
test_fig6 = plot_relative_difference(
np.array(CI_test_6_auto.coverage), np.array(CI_test_6_max.coverage),
plt_figure_num="CI test 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_6_auto.f.calculation_inputs()}")
test_fig6b = plot_relative_difference(
np.array(CI_test_6b_auto.coverage), np.array(CI_test_6b_max.coverage),
plt_figure_num="CI test 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_6b_auto.f.calculation_inputs()}")
test2_fig1 = plot_relative_difference(
np.array(CI_test2_1_auto.coverage), np.array(CI_test2_1_max.coverage),
plt_figure_num="CI test2 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_1_auto.f.calculation_inputs()}")
test2_fig1b = plot_relative_difference(
np.array(CI_test2_1b_auto.coverage), np.array(CI_test2_1b_max.coverage),
plt_figure_num="CI test2 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_1b_auto.f.calculation_inputs()}")
test2_fig2 = plot_relative_difference(
np.array(CI_test2_2_auto.coverage), np.array(CI_test2_2_max.coverage),
plt_figure_num="CI test2 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_2_auto.f.calculation_inputs()}")
test2_fig2b = plot_relative_difference(
np.array(CI_test2_2b_auto.coverage), np.array(CI_test2_2b_max.coverage),
plt_figure_num="CI test2 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_2b_auto.f.calculation_inputs()}")
test2_fig3 = plot_relative_difference(
np.array(CI_test2_3_auto.coverage), np.array(CI_test2_3_max.coverage),
plt_figure_num="CI test2 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_3_auto.f.calculation_inputs()}")
test2_fig3b = plot_relative_difference(
np.array(CI_test2_3b_auto.coverage), np.array(CI_test2_3b_max.coverage),
plt_figure_num="CI test2 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_3b_auto.f.calculation_inputs()}")
test2_fig4 = plot_relative_difference(
np.array(CI_test2_4_auto.coverage), np.array(CI_test2_4_max.coverage),
plt_figure_num="CI test2 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_4_auto.f.calculation_inputs()}")
test2_fig4b = plot_relative_difference(
np.array(CI_test2_4b_auto.coverage), np.array(CI_test2_4b_max.coverage),
plt_figure_num="CI test2 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_4b_auto.f.calculation_inputs()}")
test2_fig5 = plot_relative_difference(
np.array(CI_test2_5_auto.coverage), np.array(CI_test2_5_max.coverage),
plt_figure_num="CI test2 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_5_auto.f.calculation_inputs()}")
test2_fig5b = plot_relative_difference(
np.array(CI_test2_5b_auto.coverage), np.array(CI_test2_5b_max.coverage),
plt_figure_num="CI test2 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_5b_auto.f.calculation_inputs()}")
test2_fig6 = plot_relative_difference(
np.array(CI_test2_6_auto.coverage), np.array(CI_test2_6_max.coverage),
plt_figure_num="CI test2 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_6_auto.f.calculation_inputs()}")
test2_fig6b = plot_relative_difference(
np.array(CI_test2_6b_auto.coverage), np.array(CI_test2_6b_max.coverage),
plt_figure_num="CI test2 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_6b_auto.f.calculation_inputs()}")
test3_fig1 = plot_relative_difference(
np.array(CI_test3_1_auto.coverage), np.array(CI_test3_1_max.coverage),
plt_figure_num="CI test3 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_1_auto.f.calculation_inputs()}")
test3_fig1b = plot_relative_difference(
np.array(CI_test3_1b_auto.coverage), np.array(CI_test3_1b_max.coverage),
plt_figure_num="CI test3 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_1b_auto.f.calculation_inputs()}")
test3_fig2 = plot_relative_difference(
np.array(CI_test3_2_auto.coverage), np.array(CI_test3_2_max.coverage),
plt_figure_num="CI test3 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_2_auto.f.calculation_inputs()}")
test3_fig2b = plot_relative_difference(
np.array(CI_test3_2b_auto.coverage), np.array(CI_test3_2b_max.coverage),
plt_figure_num="CI test3 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_2b_auto.f.calculation_inputs()}")
test3_fig3 = plot_relative_difference(
np.array(CI_test3_3_auto.coverage), np.array(CI_test3_3_max.coverage),
plt_figure_num="CI test3 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_3_auto.f.calculation_inputs()}")
test3_fig3b = plot_relative_difference(
np.array(CI_test3_3b_auto.coverage), np.array(CI_test3_3b_max.coverage),
plt_figure_num="CI test3 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_3b_auto.f.calculation_inputs()}")
test3_fig4 = plot_relative_difference(
np.array(CI_test3_4_auto.coverage), np.array(CI_test3_4_max.coverage),
plt_figure_num="CI test3 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_4_auto.f.calculation_inputs()}")
test3_fig4b = plot_relative_difference(
np.array(CI_test3_4b_auto.coverage), np.array(CI_test3_4b_max.coverage),
plt_figure_num="CI test3 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_4b_auto.f.calculation_inputs()}")
test3_fig5 = plot_relative_difference(
np.array(CI_test3_5_auto.coverage), np.array(CI_test3_5_max.coverage),
plt_figure_num="CI test3 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_5_auto.f.calculation_inputs()}")
test3_fig5b = plot_relative_difference(
np.array(CI_test3_5b_auto.coverage), np.array(CI_test3_5b_max.coverage),
plt_figure_num="CI test3 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_5b_auto.f.calculation_inputs()}")
test3_fig6 = plot_relative_difference(
np.array(CI_test3_6_auto.coverage), np.array(CI_test3_6_max.coverage),
plt_figure_num="CI test3 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_6_auto.f.calculation_inputs()}")
test3_fig6b = plot_relative_difference(
np.array(CI_test3_6b_auto.coverage), np.array(CI_test3_6b_max.coverage),
plt_figure_num="CI test3 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_6b_auto.f.calculation_inputs()}")
test4_fig1 = plot_relative_difference(
np.array(CI_test4_1_auto.coverage), np.array(CI_test4_1_max.coverage),
plt_figure_num="CI test4 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_1_auto.f.calculation_inputs()}")
test4_fig1b = plot_relative_difference(
np.array(CI_test4_1b_auto.coverage), np.array(CI_test4_1b_max.coverage),
plt_figure_num="CI test4 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_1b_auto.f.calculation_inputs()}")
test4_fig2 = plot_relative_difference(
np.array(CI_test4_2_auto.coverage), np.array(CI_test4_2_max.coverage),
plt_figure_num="CI test4 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_2_auto.f.calculation_inputs()}")
test4_fig2b = plot_relative_difference(
np.array(CI_test4_2b_auto.coverage), np.array(CI_test4_2b_max.coverage),
plt_figure_num="CI test4 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_2b_auto.f.calculation_inputs()}")
test4_fig3 = plot_relative_difference(
np.array(CI_test4_3_auto.coverage), np.array(CI_test4_3_max.coverage),
plt_figure_num="CI test4 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_3_auto.f.calculation_inputs()}")
test4_fig3b = plot_relative_difference(
| np.array(CI_test4_3b_auto.coverage) | numpy.array |
from .service_region import ServiceRegion
import numpy as np
from ..utils.dim2_distance import dim2_distance
from ..utils.dfs_dict_by_distance import DFSDictByDistance
from ..channel.small_fade_channel import SmallFadeChannel
from ..channel.large_fade_channel import LargeFadeChannel
from sklearn.cluster import KMeans
class CompServiceRegion(ServiceRegion, SmallFadeChannel, LargeFadeChannel):
def __init__(self, x_min, x_max, y_min, y_max, bs_number, ue_number,
layer=1, power=1.0, bs_distribution="uniform",
if_fix_bs=True,
ue_distribution="gaussian", ue_sigma=0,
path_loss_factor=4.0,
small_fade='Rayleigh'):
ServiceRegion.__init__(self, x_min, x_max, y_min, y_max,
bs_number, ue_number,
layer, power,
bs_distribution, ue_distribution, ue_sigma,
if_fix_bs)
LargeFadeChannel.__init__(self, path_loss_factor)
SmallFadeChannel.__init__(self,
self.bs_number_,
self.ue_number_,
small_fade)
self.cluster_set_ = {}
self.cluster_bs_position_ = {}
self.cluster_ue_position_ = {}
self.cluster_ue_set_ = {}
self.sir_array = np.array([])
self.sir_db_array = np.array([])
def cluster_by_kmeans(self, means_num):
self.cluster_set_ = {}
self.cluster_bs_position_ = {}
kmeans = KMeans(n_clusters=means_num, random_state=0).fit(self.bs_position_)
distance = dim2_distance(self.bs_position_, kmeans.cluster_centers_.T)
means_index = np.argmin(distance, axis=1)
# print(means_index)
# TODO: get self.cluster_set_ and self.cluster_bs_position_ by means_index
# Done
for i in range(means_num):
self.cluster_set_[i] = | np.array([], dtype=np.int) | numpy.array |
#%%
import sys
import os
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
sys.path.append('/Users/mwinding/repos/maggot_models')
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
adj_ad = pm.Promat.pull_adj(type_adj='ad', subgraph='brain')
#%%
# pull sensory annotations and then pull associated skids
order = ['olfactory', 'gustatory-external', 'gustatory-pharyngeal', 'enteric', 'thermo-warm', 'thermo-cold', 'visual', 'noci', 'mechano-Ch', 'mechano-II/III', 'proprio', 'respiratory']
sens = [ct.Celltype(name, ct.Celltype_Analyzer.get_skids_from_meta_annotation(f'mw {name}')) for name in order]
input_skids_list = [x.get_skids() for x in sens]
input_skids = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
#%%
# cascades from each sensory modality
import pickle
p = 0.05
max_hops = 10
n_init = 1000
simultaneous = True
adj=adj_ad
'''
input_hit_hist_list = casc.Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, source_names = order, stop_skids=output_skids,
adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous)
pickle.dump(input_hit_hist_list, open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'wb'))
'''
input_hit_hist_list = pickle.load(open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'rb'))
# %%
# plot sensory cascades raw
fig, axs = plt.subplots(len(input_hit_hist_list), 1, figsize=(10, 20))
fig.tight_layout(pad=2.0)
for i, hit_hist in enumerate(input_hit_hist_list):
ax = axs[i]
sns.heatmap(hit_hist.skid_hit_hist, ax=ax)
ax.set_xlabel(hit_hist.get_name())
plt.savefig('cascades/plots/sensory_modality_signals.pdf', format='pdf', bbox_inches='tight')
os.system('say "code executed"')
# %%
# how close are descending neurons to sensory?
# load output types
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dSEZ = pymaid.get_skids_by_annotation('mw dSEZ')
RGN = pymaid.get_skids_by_annotation('mw RGN')
# generate Cascade_Analyzer objects containing name of pathway and the hit_hist to each output type
dVNC_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dVNC', hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list]
dSEZ_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dSEZ', hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list]
RGN_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-RGN', hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list]
dVNC_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dVNC'], hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list]
dSEZ_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dSEZ'], hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list]
RGN_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'RGN'], hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list]
# max possible hits that all output neuron types could receive
max_dVNC_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
max_dSEZ_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
max_RGN_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
# organize data so that each sens -> dVNC, dSEZ, RGN is intercalated
sens_output_data = list(zip(dVNC_hits, dSEZ_hits, RGN_hits))
sens_output_data = [x for sublist in sens_output_data for x in sublist]
sens_output_df = pd.DataFrame([x.skid_hit_hist.sum(axis=0) for x in sens_output_data])
# set up multiindex
sens_output_df['source']=[x.get_name()[0] for x in sens_output_data]
sens_output_df['target']=[x.get_name()[1] for x in sens_output_data]
sens_output_df = sens_output_df.set_index(['source', 'target'])
# normalize by max possible input to each output type (num neurons * n_init)
sens_output_df_plot = sens_output_df.copy()
sens_output_df_plot.loc[(slice(None), 'dVNC'), :] = sens_output_df_plot.loc[(slice(None), 'dVNC'), :]/max_dVNC_hits
sens_output_df_plot.loc[(slice(None), 'dSEZ'), :] = sens_output_df_plot.loc[(slice(None), 'dSEZ'), :]/max_dSEZ_hits
sens_output_df_plot.loc[(slice(None), 'RGN'), :] = sens_output_df_plot.loc[(slice(None), 'RGN'), :]/max_RGN_hits
import cmasher as cmr
fig, ax = plt.subplots(1, 1, figsize=(1.5, 2))
fig.tight_layout(pad=3.0)
vmax = 0.35
cmap = cmr.torch
sns.heatmap(sens_output_df_plot, ax = ax, cmap = cmap, vmax=vmax)
ax.set_title('Signal to brain outputs')
ax.set(xlim = (0, 11))
plt.savefig('cascades/plots/sensory_modality_signals_to_output.pdf', format='pdf', bbox_inches='tight')
# determine mean/median hop distance from sens -> output
def counts_to_list(count_list):
expanded_counts = []
for i, count in enumerate(count_list):
expanded = np.repeat(i, count)
expanded_counts.append(expanded)
return([x for sublist in expanded_counts for x in sublist])
all_sens_output_dist = []
for row in sens_output_df.iterrows():
list_hits = counts_to_list(row[1])
all_sens_output_dist.append([row[0][0], row[0][1], np.mean(list_hits), np.median(list_hits)])
all_sens_output_dist = pd.DataFrame(all_sens_output_dist, columns = ['source', 'target', 'mean_hop', 'median_hop'])
# %%
# plotting visits by modality to each descending to VNC neuron pair
# supplemental figure
dVNC_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dVNC_hits]
dVNC_hits_summed = pd.concat(dVNC_hits_summed, axis=1)
dVNC_hits_pairwise = pm.Promat.convert_df_to_pairwise(dVNC_hits_summed)
dSEZ_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dSEZ_hits]
dSEZ_hits_summed = pd.concat(dSEZ_hits_summed, axis=1)
dSEZ_hits_pairwise = pm.Promat.convert_df_to_pairwise(dSEZ_hits_summed)
RGN_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in RGN_hits]
RGN_hits_summed = pd.concat(RGN_hits_summed, axis=1)
RGN_hits_pairwise = pm.Promat.convert_df_to_pairwise(RGN_hits_summed)
fig, axs = plt.subplots(
3, 1, figsize=(8, 8)
)
fig.tight_layout(pad=3.0)
ax = axs[0]
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual VNC Descending Neurons')
sns.heatmap(dVNC_hits_pairwise.T, ax = ax)
ax = axs[1]
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual SEZ Descending Neurons')
sns.heatmap(dSEZ_hits_pairwise.T, ax = ax)
ax = axs[2]
ax.set_xlabel('Individual Ring Gland Neurons')
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual Ring Gland Neurons')
sns.heatmap(RGN_hits_pairwise.T, ax = ax)
plt.savefig('cascades/plots/signal_to_individual_outputs.pdf', format='pdf', bbox_inches='tight')
#%%
# alternative clustermap plot of descending neurons
# supplemental figure plot
vmax = n_init
fig = sns.clustermap(dVNC_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual dVNCs')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_dVNCs.pdf', format='pdf', bbox_inches='tight')
fig = sns.clustermap(dSEZ_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual dSEZs')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_dSEZs.pdf', format='pdf', bbox_inches='tight')
fig = sns.clustermap(RGN_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual RG neurons')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_RGs.pdf', format='pdf', bbox_inches='tight')
# %%
# distribution summary of signal to output neurons
dVNC_dist = (dVNC_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
dSEZ_dist = (dSEZ_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
RGN_dist = (RGN_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
dist_data = pd.DataFrame(list(zip(dVNC_dist.values, ['dVNC']*len(dVNC_dist))) + list(zip(dSEZ_dist.values, ['dSEZ']*len(dSEZ_dist))) + list(zip(RGN_dist.values, ['RGN']*len(RGN_dist))),
columns = ['combinations', 'type'])
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.stripplot(data = dist_data, y = 'combinations', x='type', s=1, ax=ax)
fig.savefig('cascades/plots/signal_to_outputs_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = dVNC_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_dVNC_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = dSEZ_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_dSEZ_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = RGN_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_RGN_dist.pdf', format='pdf', bbox_inches='tight')
# %%
# parallel coordinates plots
from pandas.plotting import parallel_coordinates
linewidth = 0.75
alpha = 0.8
very_low_color = '#D7DF23'
low_color = '#C2DD26'
med_color = '#8DC63F'
high_color = '#00A651'
data = dVNC_hits_pairwise.groupby('pair_id').sum()
very_low = (dVNC_dist<=1)
low = (dVNC_dist>1) & (dVNC_dist<4)
med = (dVNC_dist>=4) & (dVNC_dist<8)
high = dVNC_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, med_color, low_color, very_low_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-dVNC_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
data = dSEZ_hits_pairwise.groupby('pair_id').sum()
very_low = (dSEZ_dist<=1)
low = (dSEZ_dist>1) & (dSEZ_dist<4)
med = (dSEZ_dist>=4) & (dSEZ_dist<8)
high = dSEZ_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, low_color, med_color, very_low_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-dSEZ_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
data = RGN_hits_pairwise.groupby('pair_id').sum()
very_low = (RGN_dist<=1)
low = (RGN_dist>1) & (RGN_dist<4)
med = (RGN_dist>=4) & (RGN_dist<8)
high = RGN_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, low_color, very_low_color, med_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-RGN_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
# %%
# PCA of descending input
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
data = dVNC_hits_pairwise.groupby('pair_id').sum()
data['type'] = ['dVNC']*len(data)
data2 = dSEZ_hits_pairwise.groupby('pair_id').sum()
data2['type'] = ['dSEZ']*len(data2)
data3 = RGN_hits_pairwise.groupby('pair_id').sum()
data3['type'] = ['RGN']*len(data3)
data = pd.concat([data, data2, data3])
x = data.drop(columns='type').values
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['pc1', 'pc2'], index=data.index)
principalDf['type'] = data['type']
ylim = (-2.25, 2.25)
xlim = (-5, 6)
size = 3
alpha = 0.75
# plot dVNC PCA
plot_data = principalDf[principalDf.type=='dVNC']
low = (dVNC_dist<4)
med = (dVNC_dist>=4) & (dVNC_dist<10)
high = dVNC_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-dVNC_PCA.pdf', format='pdf', bbox_inches='tight')
# plot dSEZ PCA
plot_data = principalDf[principalDf.type=='dSEZ']
low = (dSEZ_dist<4)
med = (dSEZ_dist>=4) & (dSEZ_dist<10)
high = dSEZ_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-dSEZ_PCA.pdf', format='pdf', bbox_inches='tight')
# plot RGN PCA
plot_data = principalDf[principalDf.type=='RGN']
low = (RGN_dist<4)
med = (RGN_dist>=4) & (RGN_dist<10)
high = RGN_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-RGN_PCA.pdf', format='pdf', bbox_inches='tight')
# %%
# bar plot of high, med, low categories for each type of output
integration_data = [['dVNC', 'high', sum(dVNC_dist>=10)],
['dVNC', 'med', sum((dVNC_dist>=4) & (dVNC_dist<10))],
['dVNC', 'low', sum(dVNC_dist<4)],
['dSEZ', 'high', sum(dSEZ_dist>=10)],
['dSEZ', 'med', sum((dSEZ_dist>=4) & (dSEZ_dist<10))],
['dSEZ', 'low', sum(dSEZ_dist<4)],
['RGN', 'high', sum(RGN_dist>=10)],
['RGN', 'med', sum((RGN_dist>=4) & (RGN_dist<10))],
['RGN', 'low', sum(RGN_dist<4)]]
integration_data = pd.DataFrame(integration_data, columns = ['class', 'type', 'count'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.barplot(data = integration_data, x='class', y='count', hue='type', hue_order = ['high', 'med', 'low'], ax=ax)
fig.savefig('cascades/plots/signal-integration-counts_dVNCs.pdf', format='pdf', bbox_inches='tight')
# %%
##########
# **** Note Well: REALLY old code below, deprecated or never used in paper ****
##########
# %%
# num of descendings at each level
# this assumes that thresholding per node is useful; it might not be
threshold = 50
num_dVNC_dsSens = pd.DataFrame(([np.array(dVNC_ORN_hit>threshold).sum(axis = 0),
np.array(dVNC_AN_hit>threshold).sum(axis = 0),
np.array(dVNC_MN_hit>threshold).sum(axis = 0),
np.array(dVNC_A00c_hit>threshold).sum(axis = 0),
np.array(dVNC_vtd_hit>threshold).sum(axis = 0),
np.array(dVNC_thermo_hit>threshold).sum(axis = 0),
np.array(dVNC_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
num_dSEZ_dsSens = pd.DataFrame(([np.array(dSEZ_ORN_hit>threshold).sum(axis = 0),
np.array(dSEZ_AN_hit>threshold).sum(axis = 0),
np.array(dSEZ_MN_hit>threshold).sum(axis = 0),
np.array(dSEZ_A00c_hit>threshold).sum(axis = 0),
np.array(dSEZ_vtd_hit>threshold).sum(axis = 0),
np.array(dSEZ_thermo_hit>threshold).sum(axis = 0),
np.array(dSEZ_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
num_RG_dsSens = pd.DataFrame(([np.array(RG_ORN_hit>threshold).sum(axis = 0),
np.array(RG_AN_hit>threshold).sum(axis = 0),
np.array(RG_MN_hit>threshold).sum(axis = 0),
np.array(RG_A00c_hit>threshold).sum(axis = 0),
np.array(RG_vtd_hit>threshold).sum(axis = 0),
np.array(RG_thermo_hit>threshold).sum(axis = 0),
np.array(RG_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
fig, axs = plt.subplots(
3, 1, figsize=(8, 8)
)
fig.tight_layout(pad=3.0)
vmax = 50
cmap = cmr.heat
ax = axs[0]
ax.set_title('Number of VNC Descending Neurons downstream of Sensory Signal')
sns.heatmap(num_dVNC_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set(xlim = (0, 13))
ax = axs[1]
ax.set_title('Number of SEZ Descending Neurons downstream of Sensory Signal')
sns.heatmap(num_dSEZ_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set(xlim = (0, 13))
ax = axs[2]
ax.set_title('Number of Ring Gland Neurons downstream of Sensory Signal')
sns.heatmap(num_RG_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set_xlabel('Hops from sensory')
ax.set(xlim = (0, 13))
plt.savefig('cascades/plots/number_outputs_ds_each_sensory_modality.pdf', format='pdf', bbox_inches='tight')
# %%
# When modality are each outputs associated with?
dVNC_hits = pd.DataFrame(([ dVNC_skids,
dVNC_ORN_hit.sum(axis = 1),
dVNC_AN_hit.sum(axis = 1),
dVNC_MN_hit.sum(axis = 1),
dVNC_thermo_hit.sum(axis = 1),
dVNC_photo_hit.sum(axis = 1),
dVNC_A00c_hit.sum(axis = 1),
dVNC_vtd_hit.sum(axis = 1)]),
index = ['dVNC_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
dVNC_hits = dVNC_hits.T
dSEZ_hits = pd.DataFrame(([ dSEZ_skids,
dSEZ_ORN_hit.sum(axis = 1),
dSEZ_AN_hit.sum(axis = 1),
dSEZ_MN_hit.sum(axis = 1),
dSEZ_thermo_hit.sum(axis = 1),
dSEZ_photo_hit.sum(axis = 1),
dSEZ_A00c_hit.sum(axis = 1),
dSEZ_vtd_hit.sum(axis = 1)]),
index = ['dSEZ_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
dSEZ_hits = dSEZ_hits.T
RG_hits = pd.DataFrame(([ RG_skids,
RG_ORN_hit.sum(axis = 1),
RG_AN_hit.sum(axis = 1),
RG_MN_hit.sum(axis = 1),
RG_thermo_hit.sum(axis = 1),
RG_photo_hit.sum(axis = 1),
RG_A00c_hit.sum(axis = 1),
RG_vtd_hit.sum(axis = 1)]),
index = ['RG_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
RG_hits = RG_hits.T
# %%
# sensory characterization of each layer of each sensory modality
import plotly.express as px
from pandas.plotting import parallel_coordinates
# replacement if I want to use this later
#sensory_profiles = [hit_hist.skid_hit_hist.sum(axis=1).values for hit_hist in input_hit_hist_list]
#sensory_profiles = pd.DataFrame(sensory_profiles, index=[hit_hist.get_name() for hit_hist in input_hit_hist_list], columns = input_hit_hist_list[0].skid_hit_hist.index)
sensory_profile = pd.DataFrame(([ORN_hit_hist.sum(axis = 1),
AN_hit_hist.sum(axis = 1),
MN_hit_hist.sum(axis = 1),
A00c_hit_hist.sum(axis = 1),
vtd_hit_hist.sum(axis = 1),
thermo_hit_hist.sum(axis = 1),
photo_hit_hist.sum(axis = 1)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile0 = pd.DataFrame(([ORN_hit_hist[:, 0],
AN_hit_hist[:, 0],
MN_hit_hist[:, 0],
A00c_hit_hist[:, 0],
vtd_hit_hist[:, 0],
thermo_hit_hist[:, 0],
photo_hit_hist[:, 0]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile1 = pd.DataFrame(([ORN_hit_hist[:, 1],
AN_hit_hist[:, 1],
MN_hit_hist[:, 1],
A00c_hit_hist[:, 1],
vtd_hit_hist[:, 1],
thermo_hit_hist[:, 1],
photo_hit_hist[:, 1]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile2 = pd.DataFrame(([ORN_hit_hist[:, 2],
AN_hit_hist[:, 2],
MN_hit_hist[:, 2],
A00c_hit_hist[:, 2],
vtd_hit_hist[:, 2],
thermo_hit_hist[:, 2],
photo_hit_hist[:, 2]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile3 = pd.DataFrame(([ORN_hit_hist[:, 3],
AN_hit_hist[:, 3],
MN_hit_hist[:, 3],
A00c_hit_hist[:, 3],
vtd_hit_hist[:, 3],
thermo_hit_hist[:, 3],
photo_hit_hist[:, 3]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile4 = pd.DataFrame(([ORN_hit_hist[:, 4],
AN_hit_hist[:, 4],
MN_hit_hist[:, 4],
A00c_hit_hist[:, 4],
vtd_hit_hist[:, 4],
thermo_hit_hist[:, 4],
photo_hit_hist[:, 4]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile5 = pd.DataFrame(([ORN_hit_hist[:, 5],
AN_hit_hist[:, 5],
MN_hit_hist[:, 5],
A00c_hit_hist[:, 5],
vtd_hit_hist[:, 5],
thermo_hit_hist[:, 5],
photo_hit_hist[:, 5]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile6 = pd.DataFrame(([ORN_hit_hist[:, 6],
AN_hit_hist[:, 6],
MN_hit_hist[:, 6],
A00c_hit_hist[:, 6],
vtd_hit_hist[:, 6],
thermo_hit_hist[:, 6],
photo_hit_hist[:, 6]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile7 = pd.DataFrame(([ORN_hit_hist[:, 7],
AN_hit_hist[:, 7],
MN_hit_hist[:, 7],
A00c_hit_hist[:, 7],
vtd_hit_hist[:, 7],
thermo_hit_hist[:, 7],
photo_hit_hist[:, 7]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile8 = pd.DataFrame(([ORN_hit_hist[:, 8],
AN_hit_hist[:, 8],
MN_hit_hist[:, 8],
A00c_hit_hist[:, 8],
vtd_hit_hist[:, 8],
thermo_hit_hist[:, 8],
photo_hit_hist[:, 8]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile = sensory_profile.T
sensory_profile0 = sensory_profile0.T
sensory_profile1 = sensory_profile1.T
sensory_profile2 = sensory_profile2.T
sensory_profile3 = sensory_profile3.T
sensory_profile4 = sensory_profile4.T
sensory_profile5 = sensory_profile5.T
sensory_profile6 = sensory_profile6.T
sensory_profile7 = sensory_profile7.T
sensory_profile8 = sensory_profile8.T
#%%
# multisensory elements per layer (apples to apples)
threshold = 25
ORN0_indices = np.where(ORN_hit_hist[:, 0]>threshold)[0]
ORN1_indices = np.where(ORN_hit_hist[:, 1]>threshold)[0]
ORN2_indices = np.where(ORN_hit_hist[:, 2]>threshold)[0]
ORN3_indices = np.where(ORN_hit_hist[:, 3]>threshold)[0]
ORN4_indices = np.where(ORN_hit_hist[:, 4]>threshold)[0]
ORN5_indices = np.where(ORN_hit_hist[:, 5]>threshold)[0]
ORN6_indices = np.where(ORN_hit_hist[:, 6]>threshold)[0]
ORN7_indices = np.where(ORN_hit_hist[:, 7]>threshold)[0]
ORN8_indices = np.where(ORN_hit_hist[:, 8]>threshold)[0]
AN0_indices = np.where(AN_hit_hist[:, 0]>threshold)[0]
AN1_indices = np.where(AN_hit_hist[:, 1]>threshold)[0]
AN2_indices = np.where(AN_hit_hist[:, 2]>threshold)[0]
AN3_indices = np.where(AN_hit_hist[:, 3]>threshold)[0]
AN4_indices = np.where(AN_hit_hist[:, 4]>threshold)[0]
AN5_indices = np.where(AN_hit_hist[:, 5]>threshold)[0]
AN6_indices = np.where(AN_hit_hist[:, 6]>threshold)[0]
AN7_indices = np.where(AN_hit_hist[:, 7]>threshold)[0]
AN8_indices = np.where(AN_hit_hist[:, 8]>threshold)[0]
MN0_indices = np.where(MN_hit_hist[:, 0]>threshold)[0]
MN1_indices = np.where(MN_hit_hist[:, 1]>threshold)[0]
MN2_indices = np.where(MN_hit_hist[:, 2]>threshold)[0]
MN3_indices = np.where(MN_hit_hist[:, 3]>threshold)[0]
MN4_indices = np.where(MN_hit_hist[:, 4]>threshold)[0]
MN5_indices = np.where(MN_hit_hist[:, 5]>threshold)[0]
MN6_indices = np.where(MN_hit_hist[:, 6]>threshold)[0]
MN7_indices = np.where(MN_hit_hist[:, 7]>threshold)[0]
MN8_indices = np.where(MN_hit_hist[:, 8]>threshold)[0]
A00c0_indices = np.where(A00c_hit_hist[:, 0]>threshold)[0]
A00c1_indices = np.where(A00c_hit_hist[:, 1]>threshold)[0]
A00c2_indices = np.where(A00c_hit_hist[:, 2]>threshold)[0]
A00c3_indices = np.where(A00c_hit_hist[:, 3]>threshold)[0]
A00c4_indices = np.where(A00c_hit_hist[:, 4]>threshold)[0]
A00c5_indices = np.where(A00c_hit_hist[:, 5]>threshold)[0]
A00c6_indices = np.where(A00c_hit_hist[:, 6]>threshold)[0]
A00c7_indices = np.where(A00c_hit_hist[:, 7]>threshold)[0]
A00c8_indices = np.where(A00c_hit_hist[:, 8]>threshold)[0]
vtd0_indices = np.where(vtd_hit_hist[:, 0]>threshold)[0]
vtd1_indices = | np.where(vtd_hit_hist[:, 1]>threshold) | numpy.where |
import os
import pandas as pd
import numpy as np
# dataset_name = 'realword2016_dataset'
def parseDataset(dataset_name, num_tasks, time_slot, skip_slot, train_split_ratio, single_sensor):
if 'realworld2016' in dataset_name:
root_path = 'data/{}'.format(dataset_name)
# sensors = ['acc', 'gps', 'gyr', 'lig', 'mag', 'mic']
if single_sensor:
sensors = ['acc']
dim = 3
else:
sensors = ['acc', 'gyr', 'mag'] # For simplicity, we only select acc sensor for this case
dim = 9
actions = ['climbingdown', 'climbingup', 'jumping', 'lying', 'running', 'sitting', 'standing', 'walking']
views = ['chest', 'forearm', 'head', 'shin', 'thigh', 'upperarm', 'waist']
skip_window_size = int(skip_slot * 50.)
window_size = int(time_slot * 50.) # Frequency of sensor in Hz
# Determinate the max_len for each action
len_map = {'climbingdown': 20000, 'climbingup': 20000, 'jumping': 4000, 'lying': 30000, 'running': 30000, 'sitting': 30000,
'standing': 30000, 'walking': 30000}
# Save paths
if not os.path.exists(os.path.join(root_path, 'multi_sensors_acc_gyr_mag')):
os.mkdir(os.path.join(root_path, 'multi_sensors_acc_gyr_mag'))
for folder in ['train-sup', 'train-unsup', 'val', 'test']:
os.mkdir(os.path.join(root_path, 'multi_sensors_acc_gyr_mag', folder))
if not os.path.exists(os.path.join(root_path, 'single_sensor_acc')):
os.mkdir(os.path.join(root_path, 'single_sensor_acc'))
for folder in ['train-sup', 'train-unsup', 'val', 'test']:
os.mkdir(os.path.join(root_path, 'single_sensor_acc', folder))
if not single_sensor:
task_save_path_data = os.path.join(root_path, 'multi_sensors_acc_gyr_mag')
task_save_path_label = os.path.join(root_path, 'multi_sensors_acc_gyr_mag')
else:
task_save_path_data = os.path.join(root_path, 'single_sensor_acc')
task_save_path_label = os.path.join(root_path, 'single_sensor_acc')
for taskid in range(1, num_tasks+1):
task_path = os.path.join(root_path, 'proband{}/data'.format(taskid))
train_data, unsup_data, val_data, test_data = [], [], [], []
train_labels, unsup_labels, val_labels, test_labels = [], [], [], []
print(taskid)
try:
for i, action in enumerate(actions):
print(action)
view_data = []
for view in views:
print(view)
max_len = len_map[action]
sensor_data = []
for sensor in sensors:
if sensor == 'gyr':
new_sensor_name = 'Gyroscope'
elif sensor == 'lig':
new_sensor_name = 'Light'
elif sensor == 'mag':
new_sensor_name = 'MagneticField'
elif sensor == 'mic':
new_sensor_name = 'Microphone'
elif sensor == 'gps':
new_sensor_name = 'GPS'
else:
new_sensor_name = 'acc'
temp_path = os.path.join(task_path, '{}_{}_csv'.format(sensor, action))
temp_data_path = os.path.join(temp_path, '{}_{}_{}.csv'.format(new_sensor_name, action, view))
if not os.path.exists(temp_data_path):
temp_data_path = os.path.join(temp_path, '{}_{}_2_{}.csv'.format(new_sensor_name, action, view))
temp_data = pd.read_csv(temp_data_path, header=0)
temp_data = temp_data.iloc[:max_len, 2:].values
sensor_data.append(temp_data)
sensor_data = | np.concatenate(sensor_data, axis=1) | numpy.concatenate |
#!/usr/bin/env python
u"""
read_cryosat_L1b.py
Written by <NAME> (02/2020)
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
OUTPUTS:
Location: Time and Orbit Group
Data: Measurements Group
Geometry: External Corrections Group
Waveform_1Hz: Average Waveforms Group
Waveform_20Hz: Waveforms Group (with SAR/SARIN Beam Behavior Parameters)
METADATA: MPH, SPH and DSD Header data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 02/2020: tilde-expansion of cryosat-2 files before opening
add scale factors function for converting packed units in binary files
convert from hard to soft tabulation
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
will output with same variable names as the binary read functions
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import os
import re
import netCDF4
import numpy as np
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baselines A and B
def cryosat_baseline_AB(fid, n_records, MODE):
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100): converted from telemetry units to be
#-- the noise floor of FBR measurement echoes.
#-- Set to -9999.99 when the telemetry contains zero.
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
#-- CryoSat-2 mode specific waveforms
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [512]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [512]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
#-- Phase Difference [512]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline C
def cryosat_baseline_C(fid, n_records, MODE):
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Star Tracker ID
Location['ST_ID'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
Location['Spares'] = np.zeros((n_records,n_blocks,2),dtype=np.int16)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100)
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Standard deviation as a function of boresight angle (microradians)
Beam_Behavior['SD_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center angle as a function of boresight angle (microradians)
Beam_Behavior['Center_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-7),dtype=np.int16)
#-- CryoSat-2 mode specific waveform variables
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [256]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [1024]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [1024]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int16)
#-- Phase Difference [1024]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['ST_ID'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Location['Roll'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Pitch'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Yaw'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Spares'][r,b,:] = np.fromfile(fid,dtype='>i2',count=2)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_BC_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_BC_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline D (netCDF4)
def cryosat_baseline_D(full_filename, MODE, UNPACK=False):
#-- open netCDF4 file for reading
fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')
#-- use original unscaled units unless UNPACK=True
fid.set_auto_scale(UNPACK)
#-- get dimensions
ind_first_meas_20hz_01 = fid.variables['ind_first_meas_20hz_01'][:].copy()
ind_meas_1hz_20_ku = fid.variables['ind_meas_1hz_20_ku'][:].copy()
n_records = len(ind_first_meas_20hz_01)
n_SARIN_D_RW = 1024
n_SARIN_RW = 512
n_SAR_D_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- MDS Time
Location['Time'] = np.ma.zeros((n_records,n_blocks))
Location['Time'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
time_20_ku = fid.variables['time_20_ku'][:].copy()
#-- Time: day part
Location['Day'] = np.ma.zeros((n_records,n_blocks))
Location['Day'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: second part
Location['Second'] = np.ma.zeros((n_records,n_blocks))
Location['Second'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: microsecond part
Location['Micsec'] = np.ma.zeros((n_records,n_blocks))
Location['Micsec'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- USO correction factor
Location['USO_Corr'] = np.ma.zeros((n_records,n_blocks))
Location['USO_Corr'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
uso_cor_20_ku = fid.variables['uso_cor_20_ku'][:].copy()
#-- Mode ID
Location['Mode_ID'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_op_20_ku =fid.variables['flag_instr_mode_op_20_ku'][:].copy()
#-- Mode Flags
Location['Mode_flags'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_flags'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_flags_20_ku =fid.variables['flag_instr_mode_flags_20_ku'][:].copy()
#-- Platform attitude control mode
Location['Att_control'] = np.ma.zeros((n_records,n_blocks))
Location['Att_control'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_att_ctrl_20_ku =fid.variables['flag_instr_mode_att_ctrl_20_ku'][:].copy()
#-- Instrument configuration
Location['Inst_config'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_config'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_flags_20_ku = fid.variables['flag_instr_conf_rx_flags_20_ku'][:].copy()
#-- acquisition band
Location['Inst_band'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_band'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_bwdt_20_ku = fid.variables['flag_instr_conf_rx_bwdt_20_ku'][:].copy()
#-- instrument channel
Location['Inst_channel'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_channel'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_in_use_20_ku = fid.variables['flag_instr_conf_rx_in_use_20_ku'][:].copy()
#-- tracking mode
Location['Tracking_mode'] = np.ma.zeros((n_records,n_blocks))
Location['Tracking_mode'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_trk_mode_20_ku = fid.variables['flag_instr_conf_rx_trk_mode_20_ku'][:].copy()
#-- Source sequence counter
Location['SSC'] = np.ma.zeros((n_records,n_blocks))
Location['SSC'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
seq_count_20_ku = fid.variables['seq_count_20_ku'][:].copy()
#-- Record Counter
Location['Rec_Count'] = np.ma.zeros((n_records,n_blocks))
Location['Rec_Count'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
rec_count_20_ku = fid.variables['rec_count_20_ku'][:].copy()
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.ma.zeros((n_records,n_blocks))
Location['Lat'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lat_20_ku = fid.variables['lat_20_ku'][:].copy()
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.ma.zeros((n_records,n_blocks))
Location['Lon'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lon_20_ku = fid.variables['lon_20_ku'][:].copy()
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.ma.zeros((n_records,n_blocks))
Location['Alt'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
alt_20_ku = fid.variables['alt_20_ku'][:].copy()
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.ma.zeros((n_records,n_blocks))
Location['Alt_rate'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
orb_alt_rate_20_ku = fid.variables['orb_alt_rate_20_ku'][:].copy()
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3))
Location['Sat_velocity'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
sat_vel_vec_20_ku = fid.variables['sat_vel_vec_20_ku'][:].copy()
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.ma.zeros((n_records,n_blocks,3))
Location['Real_beam'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
beam_dir_vec_20_ku = fid.variables['beam_dir_vec_20_ku'][:].copy()
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.ma.zeros((n_records,n_blocks,3))
Location['Baseline'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
inter_base_vec_20_ku = fid.variables['inter_base_vec_20_ku'][:].copy()
#-- Star Tracker ID
Location['ST_ID'] = np.ma.zeros((n_records,n_blocks))
Location['ST_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_str_in_use_20_ku = fid.variables['flag_instr_conf_rx_str_in_use_20_ku'][:].copy()
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.ma.zeros((n_records,n_blocks))
Location['Roll'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_roll_angle_str_20_ku = fid.variables['off_nadir_roll_angle_str_20_ku'][:].copy()
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.ma.zeros((n_records,n_blocks))
Location['Pitch'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_pitch_angle_str_20_ku = fid.variables['off_nadir_pitch_angle_str_20_ku'][:].copy()
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.ma.zeros((n_records,n_blocks))
Location['Yaw'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_yaw_angle_str_20_ku = fid.variables['off_nadir_yaw_angle_str_20_ku'][:].copy()
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.ma.zeros((n_records,n_blocks))
Location['MCD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_mcd_20_ku = fid.variables['flag_mcd_20_ku'][:].copy()
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
window_del_20_ku = fid.variables['window_del_20_ku'][:].copy()
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['H_0'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_applied_20_ku = fid.variables['h0_applied_20_ku'][:].copy()
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['COR2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
cor2_applied_20_ku = fid.variables['cor2_applied_20_ku'][:].copy()
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['LAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_lai_word_20_ku = fid.variables['h0_lai_word_20_ku'][:].copy()
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['FAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_fai_word_20_ku = fid.variables['h0_fai_word_20_ku'][:].copy()
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH1'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch1_20_ku = fid.variables['agc_ch1_20_ku'][:].copy()
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch2_20_ku = fid.variables['agc_ch2_20_ku'][:].copy()
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_gain_CH1'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
tot_gain_ch1_20_ku = fid.variables['tot_gain_ch1_20_ku'][:].copy()
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_gain_CH2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
tot_gain_ch2_20_ku = fid.variables['tot_gain_ch2_20_ku'][:].copy()
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TX_Power'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
transmit_pwr_20_ku = fid.variables['transmit_pwr_20_ku'][:].copy()
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_cor_20_ku = fid.variables['dop_cor_20_ku'][:].copy()
#-- Value of Doppler Angle for the first single look echo (1e-7 radians)
Data_20Hz['Doppler_angle_start'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_angle_start'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_angle_start_20_ku = fid.variables['dop_angle_start_20_ku'][:].copy()
#-- Value of Doppler Angle for the last single look echo (1e-7 radians)
Data_20Hz['Doppler_angle_stop'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_angle_stop'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_angle_stop_20_ku = fid.variables['dop_angle_stop_20_ku'][:].copy()
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_inst_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_range_tx_rx_20_ku = fid.variables['instr_cor_range_tx_rx_20_ku'][:].copy()
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['R_inst_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_range_rx_20_ku = fid.variables['instr_cor_range_rx_20_ku'][:].copy()
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_inst_gain'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_gain_tx_rx_20_ku = fid.variables['instr_cor_gain_tx_rx_20_ku'][:].copy()
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['R_inst_gain'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_gain_rx_20_ku = fid.variables['instr_cor_gain_rx_20_ku'][:].copy()
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Internal_phase'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_int_ph_cor_20_ku = fid.variables['instr_int_ph_cor_20_ku'][:].copy()
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['External_phase'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_ext_ph_cor_20_ku = fid.variables['instr_ext_ph_cor_20_ku'][:].copy()
#-- Noise Power measurement (dB/100)
Data_20Hz['Noise_power'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Noise_power'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
noise_power_20_ku = fid.variables['noise_power_20_ku'][:].copy()
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Phase_slope'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
ph_slope_cor_20_ku = fid.variables['ph_slope_cor_20_ku'][:].copy()
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Data Record Time (MDSR Time Stamp)
Geometry['Time'] = fid.variables['time_cor_01'][:].copy()
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = fid.variables['mod_dry_tropo_cor_01'][:].copy()
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = fid.variables['mod_wet_tropo_cor_01'][:].copy()
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = fid.variables['inv_bar_cor_01'][:].copy()
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = fid.variables['hf_fluct_total_cor_01'][:].copy()
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = fid.variables['iono_cor_gim_01'][:].copy()
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = fid.variables['iono_cor_01'][:].copy()
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = fid.variables['ocean_tide_01'][:].copy()
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = fid.variables['ocean_tide_eq_01'][:].copy()
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = fid.variables['load_tide_01'][:].copy()
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = fid.variables['solid_earth_tide_01'][:].copy()
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = fid.variables['pole_tide_01'][:].copy()
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = fid.variables['surf_type_01'][:].copy()
#-- Corrections Status Flag
Geometry['Corr_status'] = fid.variables['flag_cor_status_01'][:].copy()
#-- Correction Error Flag
Geometry['Corr_error'] = fid.variables['flag_cor_err_01'][:].copy()
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
Waveform_1Hz = {}
#-- Data Record Time (MDSR Time Stamp)
#-- Time (seconds since 2000-01-01)
time_avg_01_ku = fid.variables['time_avg_01_ku'][:].copy()
Waveform_1Hz['Time'] = time_avg_01_ku.copy()
#-- Time: day part
Waveform_1Hz['Day'] = np.array(time_avg_01_ku/86400.0, dtype=np.int32)
#-- Time: second part
Waveform_1Hz['Second'] = np.array(time_avg_01_ku -
Waveform_1Hz['Day'][:]*86400.0, dtype=np.uint32)
#-- Time: microsecond part
Waveform_1Hz['Micsec'] = np.array((time_avg_01_ku -
Waveform_1Hz['Day'][:]*86400.0 -
Waveform_1Hz['Second'][:])*1e6, dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = fid.variables['lat_avg_01_ku'][:].copy()
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = fid.variables['lon_avg_01_ku'][:].copy()
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = fid.variables['alt_avg_01_ku'][:].copy()
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = fid.variables['window_del_avg_01_ku'][:].copy()
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = fid.variables['pwr_waveform_avg_01_ku'][:].copy()
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = fid.variables['echo_scale_factor_avg_01_ku'][:].copy()
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = fid.variables['echo_scale_pwr_avg_01_ku'][:].copy()
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = fid.variables['echo_numval_avg_01_ku'][:].copy()
Waveform_1Hz['Flags'] = fid.variables['flag_echo_avg_01_ku'][:].copy()
#-- CryoSat-2 Waveforms Groups
Waveform_20Hz = {}
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Linear_Wfm_Multiplier'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
echo_scale_factor_20_ku = fid.variables['echo_scale_factor_20_ku'][:].copy()
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Power2_Wfm_Multiplier'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
echo_scale_pwr_20_ku = fid.variables['echo_scale_pwr_20_ku'][:].copy()
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['N_avg_echoes'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
echo_numval_20_ku = fid.variables['echo_numval_20_ku'][:].copy()
#-- Flags for errors or information about 20Hz waveform
Waveform_20Hz['Flags'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Flags'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_echo_20_ku = fid.variables['flag_echo_20_ku'][:].copy()
#-- CryoSat-2 mode specific waveform variables
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.ma.zeros((n_records,n_blocks,n_LRM_RW))
Waveform_20Hz['Waveform'].mask = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.bool)
pwr_waveform_20_ku = fid.variables['pwr_waveform_20_ku'][:].copy()
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [256]
Waveform_20Hz['Waveform'] = np.ma.zeros((n_records,n_blocks,n_SAR_D_RW))
Waveform_20Hz['Waveform'].mask = np.zeros((n_records,n_blocks,n_SAR_D_RW),dtype=np.bool)
pwr_waveform_20_ku = fid.variables['pwr_waveform_20_ku'][:].copy()
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [1024]
Waveform_20Hz['Waveform'] = np.ma.zeros((n_records,n_blocks,n_SARIN_D_RW))
Waveform_20Hz['Waveform'].mask = np.zeros((n_records,n_blocks,n_SARIN_D_RW),dtype=np.bool)
pwr_waveform_20_ku = fid.variables['pwr_waveform_20_ku'][:].copy()
#-- Coherence [1024]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.ma.zeros((n_records,n_blocks,n_SARIN_D_RW))
Waveform_20Hz['Coherence'].mask = np.zeros((n_records,n_blocks,n_SARIN_D_RW),dtype=np.bool)
coherence_waveform_20_ku = fid.variables['coherence_waveform_20_ku'][:].copy()
#-- Phase Difference [1024]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.ma.zeros((n_records,n_blocks,n_SARIN_D_RW))
Waveform_20Hz['Phase_diff'].mask = np.zeros((n_records,n_blocks,n_SARIN_D_RW),dtype=np.bool)
ph_diff_waveform_20_ku = fid.variables['ph_diff_waveform_20_ku'][:].copy()
#-- Beam Behavior Parameters
if MODE in ('SAR','SIN'):
Waveform_20Hz['Beam'] = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Waveform_20Hz['Beam']['SD'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['SD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_std_20_ku = fid.variables['stack_std_20_ku'][:].copy()
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Waveform_20Hz['Beam']['Center'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Center'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_centre_20_ku = fid.variables['stack_centre_20_ku'][:].copy()
#-- Stack amplitude parameter scaled in dB/100.
Waveform_20Hz['Beam']['Amplitude'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Amplitude'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_scaled_amplitude_20_ku = fid.variables['stack_scaled_amplitude_20_ku'][:].copy()
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Waveform_20Hz['Beam']['Skewness'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Skewness'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_skewness_20_ku = fid.variables['stack_skewness_20_ku'][:].copy()
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Waveform_20Hz['Beam']['Kurtosis'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Kurtosis'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_kurtosis_20_ku = fid.variables['stack_kurtosis_20_ku'][:].copy()
#-- Stack peakiness computed from the range integrated power of the single look echoes
Waveform_20Hz['Beam']['Peakiness'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Peakiness'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_peakiness_20_ku = fid.variables['stack_peakiness_20_ku'][:].copy()
#-- Stack residuals of Gaussian that fits the range integrated power of the single look echoes
Waveform_20Hz['Beam']['RMS'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['RMS'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_gaussian_fitting_residuals_20_ku = fid.variables['stack_gaussian_fitting_residuals_20_ku'][:].copy()
#-- Standard deviation as a function of boresight angle (microradians)
Waveform_20Hz['Beam']['SD_boresight_angle'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['SD_boresight_angle'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_std_angle_20_ku = fid.variables['stack_std_angle_20_ku'][:].copy()
#-- Stack Center angle as a function of boresight angle (microradians)
Waveform_20Hz['Beam']['Center_boresight_angle'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Center_boresight_angle'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_centre_angle_20_ku = fid.variables['stack_centre_angle_20_ku'][:].copy()
#-- Stack Center angle as a function of look angle (microradians)
Waveform_20Hz['Beam']['Center_look_angle'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Center_look_angle'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_centre_look_angle_20_ku = fid.variables['stack_centre_look_angle_20_ku'][:].copy()
#-- Number of contributing beams in the stack before weighting
Waveform_20Hz['Beam']['Number'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Number'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_number_before_weighting_20_ku = fid.variables['stack_number_before_weighting_20_ku'][:].copy()
#-- Number of contributing beams in the stack after weighting
Waveform_20Hz['Beam']['Weighted_Number'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Weighted_Number'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_number_after_weighting_20_ku = fid.variables['stack_number_after_weighting_20_ku'][:].copy()
#-- for each record in the CryoSat file
for r in range(n_records):
#-- index for record r
idx = ind_first_meas_20hz_01[r]
#-- number of valid blocks in record r
cnt = np.count_nonzero(ind_meas_1hz_20_ku == r)
#-- CryoSat-2 Time and Orbit Group
Location['Time'].data[r,:cnt] = time_20_ku[idx:idx+cnt]
Location['Time'].mask[r,:cnt] = False
Location['Day'].data[r,:cnt] = | np.array(time_20_ku[idx:idx+cnt]/86400.0, dtype=np.int) | numpy.array |
"""
Calculation of Earth layers and electron densities.
"""
from __future__ import division
import numpy as np
try:
import numba
except ImportError:
numba = None
from pisa import FTYPE
from pisa.utils.fileio import from_file
from pisa.utils.log import logging, set_verbosity
__all__ = ['extCalcLayers', 'Layers']
__author__ = '<NAME>'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
if numba is None:
class jit(object):
"""Decorator class to mimic Numba's `jit` when Numba is missing"""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args):
return args[0]
else:
jit = numba.jit
ftype = numba.typeof(FTYPE(1))
@jit(nopython=True, nogil=True, cache=True)
def extCalcLayers(
cz,
r_detector,
prop_height,
detector_depth,
max_layers,
min_detector_depth,
rhos,
YeFrac,
YeOuterRadius,
default_elec_frac,
coszen_limit,
radii):
"""Layer density/distance calculator for each coszen specified.
Accelerated with Numba if present.
Parameters
----------
cz
r_detector
prop_height
detector_depth
max_layers
min_detector_depth
rhos
YeFrac
YeOuterRadius
default_elec_frac
coszen_limit
Returns
-------
n_layers : int number of layers
density : array of densities, flattened from (cz, max_layers)
distance : array of distances per layer, flattened from (cz, max_layers)
"""
# Something to store the final results in
shape = (np.int64(len(cz)), np.int64(max_layers))
n_layers = np.zeros(shape[0], dtype=np.int32)
distance = np.zeros(shape=shape, dtype=FTYPE)
density = | np.zeros(shape=shape, dtype=FTYPE) | numpy.zeros |
import sys
import logging
from os import path
import math
from functools import partial
import json
import numpy as np
from shapely.algorithms.polylabel import polylabel
from shapely.geometry import Point
import click
click.option = partial(click.option, show_default=True)
from airsimcollect.helper.helper import import_world, plot_collection_points
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger("GeneratePoi")
logger.setLevel(logging.INFO)
def num_collection_points(yaw_range, yaw_delta, pitch_range, pitch_delta):
# Whether to include endpoint of the yaw and pitch range
yaw_endpoint = True
theta_endpoint = True
if yaw_delta < .01:
num_yaw = 1
else:
num_yaw = int((abs(yaw_range[0] - yaw_range[1])) / yaw_delta) + 1
# Dont sample the last 0 and 360
if int(abs(yaw_range[0] - yaw_range[1])) == 360:
num_yaw -= 1
yaw_endpoint = False
if pitch_delta is None or pitch_delta < .01:
num_phi = 1
else:
num_phi = int((abs(pitch_range[0] - pitch_range[1])) / pitch_delta) + 1
return num_yaw, num_phi, num_phi * num_yaw, yaw_endpoint
def remove_collision(collection_points, collisions):
x = collection_points[:, 0]
y = collection_points[:, 1]
z = collection_points[:, 2]
obstacle_mask = np.zeros_like(x, dtype='bool')
for (minx, miny, maxx, maxy), height in collisions:
z_m = z < height
x_m = (x > minx) & (x < maxx)
y_m = (y > miny) & (y < maxy)
obstacle_mask = obstacle_mask | (z_m & x_m & y_m)
return collection_points[~obstacle_mask]
def sample_circle(focus_point, radius, yaw_range, yaw_delta, fixed_phi=np.pi/2):
num_yaw, num_phi, _, yaw_endpoint = num_collection_points(yaw_range, yaw_delta, None, None)
theta = np.linspace(math.radians(
yaw_range[0]), math.radians(yaw_range[1]), num_yaw, endpoint=yaw_endpoint)
phi = np.ones_like(theta) * fixed_phi
roll = np.zeros_like(theta)
x = np.cos(theta) * radius + focus_point[0]
y = np.sin(theta) * radius + focus_point[1]
z = np.ones_like(phi) * focus_point[2]
collection_points = np.stack((x, y, z, phi, roll, theta), axis=1)
collection_points = np.append(collection_points,[[*focus_point, fixed_phi, 0, 0]], axis=0)
print(fixed_phi)
return collection_points
def sample_sphere(focus_point, radius, pitch_range, pitch_delta, yaw_range, yaw_delta):
num_yaw, num_phi, _, yaw_endpoint = num_collection_points(yaw_range, yaw_delta, pitch_range, pitch_delta)
theta = np.linspace(math.radians(
yaw_range[0]), math.radians(yaw_range[1]), num_yaw, endpoint=yaw_endpoint)
phi = np.linspace(math.radians(
pitch_range[0]), math.radians(pitch_range[1]), num_phi)
theta = np.repeat(theta, num_phi)
phi = np.tile(phi, num_yaw)
roll = np.zeros_like(phi)
x = np.cos(theta) * np.sin(phi) * radius + focus_point[0]
y = np.sin(theta) * np.sin(phi) * radius + focus_point[1]
z = np.cos(phi) * radius + focus_point[2]
collection_points = np.stack((x, y, z, phi, roll, theta), axis=1)
return collection_points
def random_points_within(poly, num_points):
min_x, min_y, max_x, max_y = poly.bounds
points = []
while len(points) < num_points:
random_point = Point(
[np.random.uniform(min_x, max_x), np.random.uniform(min_y, max_y)])
if (random_point.within(poly)):
points.append(random_point)
return points
def genereate_radii(feature, radius_min=0.0, radius_increase=0.0, num_spheres=1, radius_delta=200.0):
"""Generates a list of radii for collection spheres
Arguments:
feature {GeoJSON} -- GeoJSON Feature
Keyword Arguments:
radius_min {float} -- Minimum Radius. If 0 takes on different defaults (default: {0.0})
num_spheres {int} -- Number of collection spheres (default: {1})
radius_delta {float} -- How much to expand each radi from the previous (default: {200.0})
Returns:
list -- list of radi
"""
radius_min_default_point = 500
geom = feature['geometry']
if geom.geom_type == 'Point' or geom.geom_type == 'LineString':
radius_min_ = radius_min_default_point if radius_min == 0.0 else radius_min
radius_min_ += radius_increase
else:
minx, miny, maxx, maxy = geom.bounds
radius_geom = min(maxx - minx, maxy - miny) / 2.0
radius_min_ = radius_geom if radius_min == 0.0 else radius_min
radius_min_ += radius_increase
return [radius_min_ + radius_delta * i for i in range(num_spheres)]
def generate_line_points(geom, num_focus_points):
sections = len(geom.coords) - 1
point_per_section = max(int(math.floor(num_focus_points / sections)), 1)
x_points = []
y_points = []
for i, (x_prev, y_prev) in enumerate(geom.coords[:-1]):
x_next, y_next = geom.coords[i + 1]
x_points.append(np.linspace(x_prev, x_next, num=point_per_section, endpoint=False))
y_points.append(np.linspace(y_prev, y_next, num=point_per_section, endpoint=False))
# Must add the last point
last_point = geom.coords[-1]
x_points.append(np.array([last_point[0]]))
y_points.append(np.array([last_point[1]]))
# Flattten and combine data
x = np.concatenate(x_points)
y = np.concatenate(y_points)
points = np.column_stack((x, y))
return points
def generate_focus_points(feature, focus_point, num_focus_points, height_offset=0.0):
geom = feature['geometry']
height = feature['properties']['height'] + height_offset
# Check if LineString Feature, return early
if geom.geom_type == 'LineString':
points = generate_line_points(geom, num_focus_points)
return [[point[0], point[1], height] for point in points]
# Point or Polygon Feature
if geom.geom_type == 'Point':
points = [geom]
else:
if focus_point == 'random':
points = random_points_within(geom, num_focus_points)
elif focus_point == 'centroid':
points = [geom.centroid]
elif focus_point == 'pia':
points = [polylabel(geom)]
return [[point.x, point.y, height] for point in points]
@click.group()
def cli():
"""Generates points of interest from geojson file from unreal world"""
pass
@cli.command()
@click.option('-m', '--map-path', type=click.Path(exists=True), required=True,
help='GeoJSON map file of points of interests (features) in the UE4 world.')
@click.option('-pr', '--pitch-range', nargs=2, type=float, default=[30, 90],
help='Range in pitch (phi) on a collection sphere to sample each collection point')
@click.option('-pd', '--pitch-delta', type=float, default=15.0,
help='Change in pitch angle (degrees) on collection sphere for each collection point')
@click.option('-yr', '--yaw-range', nargs=2, type=float, default=[0, 360],
help='Range in yaw (theta) on a collection sphere to sample each collection point')
@click.option('-yd', '--yaw-delta', type=float, default=15.0,
help='Change in yaw angle (degrees) on collection sphere for each collection point')
@click.option('-ho', '--height-offset', type=float, default=0.0,
help='Add a height offset to each feature')
@click.option('-ns', '--num-spheres', type=int, default=1,
help='Number of collection spheres to generate and sample from.')
@click.option('-rm', '--radius-min', type=float, default=0.0,
help="Fixed minimum radius of collection sphere (distance from the focus point). " +
"If 0 and map feature is a polygon, will use smallest sized circle to circumscribe polygon. " +
"If 0 and map feature is a point, set to 500.")
@click.option('-ri', '--radius-increase', type=float, default=0.0,
help="Increase (bias) from minimum radius of collection sphere (distance from the focus point). ")
@click.option('-rd', '--radius-delta', type=float, default=500.0,
help='Change in growing collection sphere radius. Only applicable for -ns > 1.')
@click.option('-fp', '--focus-point', type=click.Choice(['pia', 'centroid', 'random']), default='centroid',
help='Only applicable to polygon features. Determines what point on a 2D polygon ' +
'should be used as the center of the collection sphere')
@click.option('-nf', '--num-focus-points', type=int, default=1,
help='Number of focus points to randomly generate on 2D polygon. Only applicable to -fp random.')
@click.option('-rfn', '--record-feature-name', type=str, default=None,
help='Set to geojson property name if you want to record a label associated to each point')
@click.option('-o', '--out', type=click.Path(exists=False), default="collection_points.npy",
help="Output numpy array of position and angles")
@click.option('-ao', '--append-out', is_flag=True,
help="If output file already exists, just append to it")
@click.option('--seed', type=int, default=1, help="Random seed")
@click.option('-ic', '--ignore-collision', is_flag=True,
help="By default this module ensures the collection point does not collide with any known features " +
"in the map. Set this flag to ignore this check.")
@click.option('-sc', '--sampling-method', type=click.Choice(['sphere', 'circle']), default='sphere',
help='Whether we are sampling on a sphere or on a 2D circle at a height offset from the focus point')
@click.option('-pp', '--plot-points', is_flag=True,
help="Whether to plot points for viewing. Debug only.")
@click.option('-d', '--debug', is_flag=True,
help="Whether to print debug statements")
def generate(map_path, pitch_range, pitch_delta, yaw_range, yaw_delta, height_offset, num_spheres, radius_min, radius_increase, radius_delta,
focus_point, num_focus_points, record_feature_name, out, append_out, seed, ignore_collision, sampling_method, plot_points, debug):
if debug:
logger.setLevel(logging.DEBUG)
logger.debug("{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}".format(
map_path, pitch_range, pitch_delta, yaw_delta, num_spheres, radius_min, radius_increase, radius_delta, focus_point,
num_focus_points, ignore_collision, out, seed, sampling_method, plot_points))
click.secho("Generating collection points...")
# generate_collection_points(map_path, pitch_range, pitch_delta, yaw_delta, num_spheres, radius_min, radius_delta, focus_point, ignore_collision)
try:
features, collisions = import_world(map_path)
except Exception as e:
click.secho("Error parsing GeoJSON file. Is it valid?", fg='red')
logger.exception(e)
sys.exit()
all_points = []
all_feature_names = []
for feature in features:
logger.debug("Inspecting feature: %s", feature)
focus_points = generate_focus_points(feature, focus_point, num_focus_points, height_offset=height_offset)
radii = genereate_radii(feature, radius_min, radius_increase, num_spheres, radius_delta)
for focus_point_ in focus_points:
logger.debug("At focus point: %s", focus_point_)
for radius in radii:
if sampling_method == 'sphere':
collection_points = sample_sphere(focus_point_, radius, pitch_range,
pitch_delta, yaw_range, yaw_delta)
else:
fixed_phi = pitch_range[0]
print(fixed_phi)
collection_points = sample_circle(focus_point_, radius, yaw_range, yaw_delta, fixed_phi=fixed_phi)
logger.debug("At radius level: %s", radius)
if not ignore_collision:
prev_shape = collection_points.shape
collection_points = remove_collision(collection_points, collisions)
if collection_points.shape != prev_shape:
logger.debug("Collisions removed for feature %r", feature['properties']['class_label'])
all_points.append(collection_points)
if record_feature_name:
all_feature_names.extend([feature['properties'][record_feature_name]] * collection_points.shape[0])
if plot_points:
plot_collection_points(collection_points, focus_point_, radius, feature, sampling_method)
all_points = np.vstack(all_points)
click.echo(
"Finished generating {:d} collection points for {:d} points of interests!".format(
all_points.shape[0],
len(features)))
if append_out and path.isfile(out):
old_data = np.load(out)
all_points = np.vstack((old_data, all_points))
| np.save(out, all_points) | numpy.save |
import numpy as np
# REFERENCES
#https://towardsdatascience.com/fast-fourier-transform-937926e591cb
#https://pythonnumericalmethods.berkeley.edu/notebooks/chapter24.03-Fast-Fourier-Transform.html
#https://jakevdp.github.io/blog/2013/08/28/understanding-the-fft/
def show_M(N):
"""
N: int
"""
n = np.arange(N)
k = n.reshape((N,1))
M = k*n
print("M:", M)
def get_data(len):
"""
len: int
lenght of data
"""
data = np.random.random(len)
return data
def get_circular_terms(N):
"""
N: int
"""
terms = np.exp(-1j *2*np.pi * | np.arange(N) | numpy.arange |
"""
Least-squares fitting and nearest rotation matrix
"""
import numpy as np
import scipy.linalg as la
from .trafo import Transformation
from .rotation import Rotation, Quaternion, map_to_quat
def qfit(target, source):
"""Least-squares fitting of source onto target using unit quaternions.
Parameters
----------
target : (N, 3) array
3D point cloud onto which the source will be transformed
source : (N, 3) array
3D point cloud that will be transformed so as to fit the target
optimally in a least-squares sense
Returns
-------
R : (3, 3) array
Optimal rotation matrix
t : (3, ) array
Optimal translation vector
"""
assert target.ndim == 2
assert np.shape(target)[1] == 3
assert np.shape(target) == np.shape(source)
x = target.mean(0)
y = source.mean(0)
A = np.dot((target-x).T, source-y)
M = map_to_quat(A)
_, q = la.eigh(M, eigvals=[3, 3])
R = Quaternion(q.flatten()).matrix
t = x - R.dot(y)
return R, t
class LeastSquares(object):
"""LeastSquares
Objective function using a least-squares criterion
"""
def __init__(self, target, source, trafo=Rotation()):
"""
Parameters
----------
target, source : rank-2 numpy arrays
N x 3 coordinate arrays
trafo : instance of Transformation class
Optional parameterization of the rotation matrix
"""
if target.shape != source.shape or target.ndim != 2 \
or target.shape[1] != 3:
msg = 'input coordinate arrays must have rank 2 and shape (n,3)'
raise ValueError(msg)
if not isinstance(trafo, Transformation):
msg = 'trafo must be instance of Transformation'
raise TypeError(msg)
self.target = target
self.source = source
self.trafo = trafo
self.values = []
def forces(self, params):
"""Displacement vectors between both coordinate arrays after rotation
of the second array.
"""
self.trafo.dofs = params
return self.trafo(self.source) - self.target
def __call__(self, dofs):
"""Least-squares residual.
"""
residual = 0.5 * np.sum(self.forces(dofs)**2)
self.values.append(residual)
return residual
def gradient(self, dofs):
"""Gradient of least-squares residual with respect to rotational
parameters.
"""
forces = self.forces(dofs)
coords = self.source
return self.trafo.map_forces(coords, forces)
def rmsd(self, dofs):
"""
Root mean square deviation between coordinate arrays
after rotation given by rotational parameters
"""
return np.sqrt(2 * self(dofs) / len(self.target))
def optimum(self):
"""
Optimal rotation minimizing the least-squares residual calculated
by singular value decomposition
"""
U, L, V = np.linalg.svd(np.dot(self.target.T, self.source))
R = np.dot(U, V)
if np.linalg.det(R) < 0:
R *= -np.eye(3)[2]
L[2] *= -1
rmsd = np.sum(self.target**2) + np.sum(self.source**2) - 2 * L.sum()
return self.trafo.__class__(R), rmsd
class NearestRotation(object):
"""NearestRotation
Finding the rotation matrix that is closest (in a least-squares sense)
to some general 3x3 matrix.
"""
def __init__(self, A, trafo=Rotation()):
"""
Parameters
----------
A : 3 x 3 array
Input matrix to which the closest rotation matrix shall be computed
trafo : instance of Rotation class
Optional parameterization of the rotation matrix
"""
if A.shape != (3, 3):
msg = 'Shape of input matrix must be (3,3)'
raise ValueError(msg)
if not isinstance(trafo, Rotation):
msg = 'trafo must be instance of Rotation'
raise TypeError(msg)
self.A = A
self.trafo = trafo
self.values = []
def __call__(self, dofs):
"""
Inner product between rotation matrix and input target matrix
"""
self.trafo.dofs = dofs
return np.sum(self.A * self.trafo.matrix)
def gradient(self, dofs):
"""
Gradient of inner product with respect to rotational parameters
"""
self.trafo.dofs = dofs
if hasattr(self.trafo, 'jacobian'):
return np.array([np.sum(self.A * J) for J in self.trafo.jacobian])
else:
return self.A
def optimum(self):
"""
Closest rotation matrix determined by singular value decomposition
"""
U, L, V = | np.linalg.svd(self.A) | numpy.linalg.svd |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' This module contains a class for controlling list of StreamerHead objects.
Its purpose is to create heads, find heads to add or remove, find scale.
'''
# General imports
import numpy as np
import logging
import scipy.special # bessel function
# Import from project files
from ..core import coordinate_functions
from .streamer_head import SHList
from .streamer_head import StreamerHead
# settings
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
eps = | np.finfo(float) | numpy.finfo |
########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
"""
date: 2021-11-02
author: matz
Test the behavior and attributes of unrodded DASSH Region instances
"""
########################################################################
import copy
import os
import dassh
import numpy as np
import pytest
def test_simple_unrodded_reg_instantiation(c_lrefl_simple):
"""Test that the unrodded region has all the right stuffs"""
assert c_lrefl_simple.vf['coolant'] == 0.25
assert c_lrefl_simple.vf['struct'] == 0.75
assert c_lrefl_simple.duct_ftf[1] == 0.116
assert len(c_lrefl_simple.temp['coolant_int']) == 1
assert c_lrefl_simple.temp['duct_mw'].shape == (1, 6)
# If it don't fail, it pass
c_lrefl_simple.temp['coolant_int'] *= 623.15
c_lrefl_simple._update_coolant_params(623.15)
def test_ur_reg_instantiation_fancy(testdir):
"""Make sure a fancy unrodded region can be instantiated"""
inp = dassh.DASSH_Input(
os.path.join(testdir, 'test_inputs', 'input_ur_conv_factor.txt'),
empty4c=True)
mat = {'coolant': dassh.Material('sodium'),
'duct': dassh.Material('ht9')}
# Test fully unrodded assembly
ur1 = dassh.region_unrodded.make_ur_asm(
'testboi', inp.data['Assembly']['fuel'], mat, 1.0)
print(ur1.mratio)
print(ur1._mratio)
print(inp.data['Assembly']['fuel']['convection_factor'])
assert ur1.mratio is not None
assert ur1.mratio != 1.0
# Test default in unrodded axial regions
ur2 = dassh.region_unrodded.make_ur_axialregion(
inp.data['Assembly']['control'], 'empty_cr', mat, 1.0)
assert ur2.mratio == 1.0
# Test nondefault in unrodded axial regions
ur2 = dassh.region_unrodded.make_ur_axialregion(
inp.data['Assembly']['control'], 'upper_cr', mat, 1.0)
assert ur2.mratio == 0.8
def test_unrodded_reg_clone_shallow(c_lrefl_simple):
"""Test that region attributes are properly copied"""
clone = c_lrefl_simple.clone(15.0)
non_matches = []
# Shallow copies
for attr in ['z', 'duct_ftf', 'duct_thickness', 'duct_perim',
'vf', 'area', 'total_area', '_params', 'x_pts']:
id_clone = id(getattr(clone, attr))
id_original = id(getattr(c_lrefl_simple, attr))
if id_clone == id_original: # They should be the same
continue
else:
non_matches.append(attr)
print(attr, id_clone, id_original)
assert len(non_matches) == 0
def test_unrodded_reg_clone_deep(c_lrefl_simple):
"""Test that region attributes are properly copied"""
clone = c_lrefl_simple.clone(15.0)
non_matches = []
# Shallow copies
for attr in ['temp', 'flow_rate', 'coolant_params']:
id_clone = id(getattr(clone, attr))
id_original = id(getattr(c_lrefl_simple, attr))
if id_clone != id_original: # They should be different
continue
else:
non_matches.append(attr)
print(attr, id_clone, id_original)
assert len(non_matches) == 0
def test_simple_unrodded_reg_zero_power(c_lrefl_simple):
"""Test that no power temp calc returns no change"""
in_temp = c_lrefl_simple.temp['coolant_int']
t_gap = np.ones(6) * c_lrefl_simple.avg_duct_mw_temp
c_lrefl_simple.calculate(
0.1, {'refl': 0.0}, t_gap, 0.0, adiabatic_duct=True)
assert c_lrefl_simple.temp['coolant_int'] == pytest.approx(in_temp)
assert c_lrefl_simple.pressure_drop > 0.0
def test_simple_unrodded_reg_none_power(c_lrefl_simple):
"""Test that giving power=None returns no change in temps"""
in_temp = c_lrefl_simple.temp['coolant_int']
t_gap = np.ones(6) * c_lrefl_simple.avg_duct_mw_temp
c_lrefl_simple.calculate(
0.1, {'refl': None}, t_gap, 0.0, adiabatic_duct=True)
assert c_lrefl_simple.temp['coolant_int'] == pytest.approx(in_temp)
assert c_lrefl_simple.pressure_drop > 0.0
def test_simple_unrodded_reg_qmcdt(c_lrefl_simple):
"""Test that simple coolant calc returns proper result"""
# Set up some stuff
c_lrefl_simple.temp['coolant_int'] *= 623.15
c_lrefl_simple.temp['duct_mw'] *= 623.15
c_lrefl_simple.temp['duct_surf'] *= 623.15
in_temp = copy.deepcopy(c_lrefl_simple.temp['coolant_int'])
power = 10000.0
dz = 0.1
qlin = power / dz
# Calculate dT and estimate Q
c_lrefl_simple._update_coolant_params(623.15)
dT = c_lrefl_simple._calc_coolant_temp(dz, {'refl': qlin})
q_est = (c_lrefl_simple.coolant.heat_capacity *
c_lrefl_simple.flow_rate * dT)
print('m =', c_lrefl_simple.flow_rate)
print('Cp =', c_lrefl_simple.coolant.heat_capacity)
# print('dT =', c_lrefl_simple.temp['coolant_int'] - in_temp)
print('dT =', dT)
print('q (est) = ', q_est)
assert power == pytest.approx(q_est)
assert c_lrefl_simple.temp['coolant_int'] == in_temp
def test_simple_unrodded_reg_duct(c_lrefl_simple):
"""Test that simple homog duct calc returns proper result"""
# Set up some stuff
c_lrefl_simple.temp['coolant_int'] *= 633.15
# Calculate dT and estimate Q
gap_temp = np.ones(6) * 623.15
gap_htc = np.ones(6) * 7.5e4 # made this up
# print(c_lrefl_simple.temp['duct_mw'][0])
c_lrefl_simple._update_coolant_params(633.15)
c_lrefl_simple._calc_duct_temp(gap_temp, gap_htc)
print('inner', c_lrefl_simple.temp['duct_surf'][0, 0])
print('midwall', c_lrefl_simple.temp['duct_mw'][0])
print('outer', c_lrefl_simple.temp['duct_surf'][0, 1])
assert all([623.15 < x < 633.15 for x in
c_lrefl_simple.temp['duct_mw'][0]])
# Coolant temp is greater than inner duct surface temp, which
# is greater than duct midwall temp
assert all([633.15
> c_lrefl_simple.temp['duct_surf'][0, 0, i]
> c_lrefl_simple.temp['duct_mw'][0, i]
for i in range(6)])
# Duct midwall temp is greater than outer duct surface temp,
# which is greater than gap coolant temp
assert all([c_lrefl_simple.temp['duct_mw'][0, i]
> c_lrefl_simple.temp['duct_surf'][0, 1, i]
> 623.15
for i in range(6)])
def test_mnh_ur_ebal_adiabatic(shield_ur_mnh):
"""Test multi-node homogeneous unrodded region energy balance
with adiabatic duct wall"""
n_steps = 100
dz = 0.001
power = {'refl': 100.0}
gap_temp = np.arange(625, 775, 25) # [625, 650, 675, 700, 725, 750]
fake_htc = np.ones(6) * 2e4
for i in range(n_steps):
shield_ur_mnh.calculate(dz, power, gap_temp, fake_htc,
ebal=True, adiabatic_duct=True)
assert np.sum(shield_ur_mnh.ebal['duct']) == 0.0
# Check power added real quick
tot_power_added = n_steps * dz * power['refl']
assert shield_ur_mnh.ebal['power'] - tot_power_added <= 1e-12
print('ENERGY ADDED (W): ', shield_ur_mnh.ebal['power'])
print('ENERGY FROM DUCT (W)', np.sum(shield_ur_mnh.ebal['duct']))
total = (np.sum(shield_ur_mnh.ebal['duct'])
+ shield_ur_mnh.ebal['power'])
print('TOTAL ENERGY INPUT (W)', total)
e_temp_rise = (shield_ur_mnh.flow_rate
* shield_ur_mnh.coolant.heat_capacity
* (shield_ur_mnh.avg_coolant_temp - 623.15))
print('ENERGY COOLANT DT (W):', e_temp_rise)
bal = total - e_temp_rise
print('DIFFERENCE (W)', bal)
assert bal <= 1e-7
def test_mnh_ur_ebal(shield_ur_mnh):
"""Test multi-node homogeneous unrodded region energy balance"""
dz = dassh.region_unrodded.calculate_min_dz(
shield_ur_mnh, 623.15, 773.15)
n_steps = 100
dz = 0.001 # less than dz calculated above
power = {'refl': 0.0}
gap_temp = np.arange(625, 775, 25) # [625, 650, 675, 700, 725, 750]
fake_htc = np.ones(6) * 2e4
for i in range(n_steps):
shield_ur_mnh.calculate(dz, power, gap_temp, fake_htc, ebal=True)
# Check power added real quick
tot_power_added = n_steps * dz * power['refl']
assert shield_ur_mnh.ebal['power'] - tot_power_added <= 1e-12
print('ENERGY ADDED (W): ', shield_ur_mnh.ebal['power'])
print('ENERGY FROM DUCT (W):', np.sum(shield_ur_mnh.ebal['duct']))
total = (np.sum(shield_ur_mnh.ebal['duct'])
+ shield_ur_mnh.ebal['power'])
print('TOTAL ENERGY INPUT (W):', total)
e_temp_rise = (shield_ur_mnh.flow_rate
* shield_ur_mnh.coolant.heat_capacity
* (shield_ur_mnh.avg_coolant_temp - 623.15))
print('ENERGY COOLANT DT (W):', e_temp_rise)
bal = total - e_temp_rise
print('DIFFERENCE (W):', bal)
assert bal <= 1e-7
def test_ur_asm_pressure_drop(c_shield_rr_params):
"""Test that the pressure drop calculation gives the same result
in RR and UR objects"""
input, mat = c_shield_rr_params
mat['coolant'] = dassh.Material('sodium') # get dynamic proeprties
fr = 0.50
# Make rodded region
rr = dassh.region_rodded.make_rr_asm(input, 'dummy', mat, fr)
# Make unrodded region; manually set UR params
input['use_low_fidelity_model'] = True
input['convection_factor'] = 'calculate'
ur = dassh.region_unrodded.make_ur_asm('testboi', input, mat, fr)
T_in = 623.15
dz = 0.01
dp_rr = 0.0
dp_ur = 0.0
for i in range(50):
T = T_in + i
rr._update_coolant_int_params(T)
ur._update_coolant_params(T)
dp_rr += rr.calculate_pressure_drop(dz)
dp_ur += ur.calculate_pressure_drop(dz)
print('dp_rr:', dp_rr)
print('dp_ur:', dp_ur)
diff = dp_rr - dp_ur
print(diff)
assert np.abs(diff) < 1e-8
def test_ur_dp_rr_equiv(testdir):
"""Test that the RR equivalent UR returns the same pressure drop"""
# Get answer to compare with
path_ans = os.path.join(
testdir, 'test_data', 'test_single_asm', 'dassh_reactor.pkl')
if os.path.exists(path_ans):
r_ans = dassh.reactor.load(path_ans)
else:
inpath = os.path.join(testdir, 'test_inputs', 'input_single_asm.txt')
outpath = os.path.join(testdir, 'test_results', 'test_single_asm')
inp = dassh.DASSH_Input(inpath)
r_ans = dassh.Reactor(inp, path=outpath, write_output=True)
r_ans.temperature_sweep()
ans = np.zeros(4)
for i in range(len(r_ans.assemblies[0].region)):
ans[i] = r_ans.assemblies[0].region[i].pressure_drop
ans[-1] = r_ans.assemblies[0].pressure_drop
# Get result to compare
inpath = os.path.join(testdir, 'test_inputs', 'input_single_asm_lf.txt')
outpath = os.path.join(testdir, 'test_results', 'test_single_asm_lf')
inp = dassh.DASSH_Input(inpath)
r_res = dassh.Reactor(inp, path=outpath, write_output=True)
r_res.temperature_sweep()
res = np.zeros(4)
for i in range(len(r_res.assemblies[0].region)):
res[i] = r_res.assemblies[0].region[i].pressure_drop
res[-1] = r_res.assemblies[0].pressure_drop
# Compare them
diff = (res - ans) / ans
assert np.max(np.abs(diff)) < 1e-3
def test_ur_dp(testdir):
"""Test that the pressure drop calculation for the unrodded region
is similar to that of the pin bundle when comparable parameters
are used"""
# Get answer to compare with
path_ans = os.path.join(
testdir, 'test_data', 'test_single_asm', 'dassh_reactor.pkl')
if os.path.exists(path_ans):
r_ans = dassh.reactor.load(path_ans)
else:
inpath = os.path.join(testdir, 'test_inputs', 'input_single_asm.txt')
outpath = os.path.join(testdir, 'test_results', 'test_single_asm')
inp = dassh.DASSH_Input(inpath)
r_ans = dassh.Reactor(inp, path=outpath, write_output=True)
r_ans.temperature_sweep()
# Just want pressure drop per unit length of rod bundle region
asm = r_ans.assemblies[0]
ans = asm.rodded.pressure_drop
ans /= asm.region_bnd[2] - asm.region_bnd[1]
# Get result to compare
inpath = os.path.join(testdir, 'test_inputs', 'input_single_asm_lf.txt')
outpath = os.path.join(testdir, 'test_results', 'test_single_asm_lf-2')
inp = dassh.DASSH_Input(inpath)
k = ('Assembly', 'fuel', 'AxialRegion', 'lower_refl')
inp.data[k[0]][k[1]][k[2]][k[3]]['hydraulic_diameter'] = \
asm.rodded.bundle_params['de']
inp.data[k[0]][k[1]][k[2]][k[3]]['vf_coolant'] = \
(asm.rodded.bundle_params['area']
/ (0.5 * np.sqrt(3) * asm.rodded.duct_ftf[0][0]**2))
# print('de', inp.data[k[0]][k[1]][k[2]][k[3]]['hydraulic_diameter'])
# print('vfc', inp.data[k[0]][k[1]][k[2]][k[3]]['vf_coolant'])
r_res = dassh.Reactor(inp, path=outpath, write_output=True)
r_res.temperature_sweep()
asm = r_res.assemblies[0]
res = asm.region[0].pressure_drop
res /= asm.region_bnd[1] - asm.region_bnd[0]
print('ans', ans)
print('res', res)
# Compare them
diff = (res - ans) / ans
print('rel diff', diff)
assert abs(diff) < 0.05 # 5 % difference is tolerable
@pytest.mark.skip(reason='toy problem for milos')
def test_ur_ctrl_asm_sweep(simple_ctrl_params):
"""Test the simple model approximation on a double-duct assembly"""
input, mat = simple_ctrl_params
mat = {'coolant': dassh.Material('sodium_se2anl_425'),
'duct': dassh.Material('ht9_se2anl_425')}
fr = 1.0
# Make rodded region
rr = dassh.region_rodded.make_rr_asm(input, 'dummy', mat.copy(), fr)
# Make unrodded region; manually set UR params
input['use_low_fidelity_model'] = True
input['convection_factor'] = "calculate"
ur = dassh.region_unrodded.make_ur_asm('testboi', input, mat.copy(), fr)
# Manual activation
for k in rr.temp.keys():
rr.temp[k] *= 623.15
try:
ur.temp[k] *= 623.15
except KeyError:
continue
# Calculate mesh size
dz_rr = dassh.region_rodded.calculate_min_dz(rr, 623.15, 773.15)
dz_ur = dassh.region_unrodded.calculate_min_dz(ur, 623.15, 773.15)
dz = min([dz_rr[0], dz_ur[0]])
print('dz_rr', dz_rr)
print('dz_ur (simple)', dz_ur)
print(rr.coolant.thermal_conductivity * rr._sf)
print(rr.coolant.density * rr.coolant.heat_capacity
* rr.coolant_int_params['eddy'])
print(1 - rr.pin_diameter / rr.pin_pitch)
assert 0
# Sweep
length = 1.0
n_steps = np.ceil(length / dz)
print(n_steps)
p_lin = 0.15e6
power_ur = {'refl': p_lin}
power_rr = make_rr_power(rr, power_ur)
gap_temp_ur = np.ones(6) * (350.0 + 273.15)
gap_temp_rr = make_rr_gap_temps_rr(rr, gap_temp_ur)
fake_htc = np.ones(2) * 2e4
for i in range(int(n_steps)):
ur._update_coolant_params(ur.avg_coolant_int_temp)
ur.calculate(dz, power_ur, gap_temp_ur, fake_htc, ebal=True)
rr._update_coolant_int_params(rr.avg_coolant_int_temp)
rr._update_coolant_byp_params(rr.avg_coolant_byp_temp)
rr.calculate(dz, power_rr, gap_temp_rr, fake_htc, ebal=True)
cp = ur.coolant.heat_capacity
print()
print('UR ENERGY FROM DUCT (W):', ur.ebal['from_duct'])
print('RR ENERGY FROM DUCT (W):', rr.ebal['from_duct'])
print()
print('UR COOLANT DT (C): ', ur.avg_coolant_temp - 623.15)
print('RR COOLANT DT (C): ', rr.avg_coolant_temp - 623.15)
print()
print('UR EBAL PER HEX SIDE')
print(ur.ebal['per_hex_side'])
print('RR EBAL PER HEX SIDE')
print(rr.ebal['per_hex_side'])
print()
print('UR EBAL')
print('added:', ur.ebal['power'])
print('from duct:', ur.ebal['from_duct'])
tot = ur.ebal['power'] + ur.ebal['from_duct']
print('sum:', tot)
dT = ur.avg_coolant_temp - 623.15
print('coolant rise:', dT * ur.flow_rate * cp)
print('bal:', tot - dT * ur.flow_rate * cp)
print()
print('RR EBAL')
print('added:', rr.ebal['power'])
print('from duct:', rr.ebal['from_duct'])
print('to byp:', rr.ebal['from_duct_byp'])
tot = rr.ebal['power'] + rr.ebal['from_duct_byp'] + rr.ebal['from_duct']
print('sum:', tot)
dT = rr.avg_coolant_temp - 623.15
print('coolant rise:', dT * rr.total_flow_rate * cp)
print('bal:', tot - dT * rr.total_flow_rate * cp)
print()
print('UR AVG COOLANT // DUCT TEMP')
print(ur.temp['coolant_int'])
print(ur.avg_coolant_temp - 273.15)
print(ur.avg_duct_mw_temp[0] - 273.15)
print(np.average(ur.temp['duct_surf'][-1, -1]) - 273.15)
print('RR AVG COOLANT // DUCT TEMP')
print(rr.avg_coolant_int_temp - 273.15)
print(rr.avg_coolant_temp - 273.15)
print(rr.avg_duct_mw_temp[0] - 273.15)
print(np.average(rr.temp['duct_surf'][-1, -1]) - 273.15)
print()
# print(c_shield_rr.temp['coolant_int'])
assert 0
@pytest.mark.skip(reason='lol')
def test_ur_vs_rr_ebal(shield_ur_simple, shield_ur_mnh, c_shield_rr,
c_shield_simple_rr):
"""Compare energy balance in rodded and un-rodded regions"""
c_shield_rr._conv_approx = True
c_shield_simple_rr._conv_approx = True
# shield_ur_mnh._params['xhtc'] = shield_ur_mnh.vf['coolant']
# shield_ur_simple._params['xhtc'] = shield_ur_mnh.vf['coolant']
# shield_ur_mnh._params['xhtc'] = 0.577442107490257
# shield_ur_simple._params['xhtc'] = 0.577442107490257
# shield_ur_mnh._params['xhtc'] = 0.12
# shield_ur_simple._params['xhtc'] = 0.12
shield_ur_mnh._params['lowflow'] = True
shield_ur_simple._params['lowflow'] = True
# print(c_shield_rr.params['area'][0]
# * c_shield_rr.subchannel.n_sc['coolant']['interior'])
# print(c_shield_rr.params['area'][1]
# * c_shield_rr.subchannel.n_sc['coolant']['edge']
# + c_shield_rr.params['area'][2]
# * c_shield_rr.subchannel.n_sc['coolant']['corner'])
# print(c_shield_rr._sf)
c_shield_rr._sf = 1.0
dz_rr = dassh.region_rodded.calculate_min_dz(
c_shield_rr, 623.15, 773.15)
# dz_rr2 = dassh.region_rodded.calculate_min_dz(
# c_shield_simple_rr, 623.15, 773.15)
dz_ur1 = dassh.region_unrodded.calculate_min_dz(
shield_ur_simple, 623.15, 773.15)
dz_ur2 = dassh.region_unrodded.calculate_min_dz(
shield_ur_mnh, 623.15, 773.15)
dz = min([dz_rr[0], dz_ur1[0], dz_ur2[0]])
print('dz_rr (m)', dz_rr)
# print('dz_rr_7pin (m)', dz_rr2)
print('dz_ur (simple)', dz_ur1)
print('dz_ur (6 node)', dz_ur2)
n_steps = 100
# p_lin = 1000.0
p_lin = 0.0
power_ur = {'refl': p_lin}
power_rr = {'pins': np.ones(61) * p_lin / 61,
'duct': np.zeros(
c_shield_rr.subchannel.n_sc['duct']['total']),
'cool': np.zeros(
c_shield_rr.subchannel.n_sc['coolant']['total'])
}
power_rr2 = {'pins': np.ones(7) * p_lin / 7,
'duct': np.zeros(c_shield_simple_rr
.subchannel.n_sc['duct']['total']),
'cool': np.zeros(c_shield_simple_rr
.subchannel.n_sc['coolant']['total'])}
# gap_temp_ur = np.linspace(625, 750, 6) # [625, 650, 675, 700, 725, 750]
# gap_temp_rr = np.linspace(625, 750, (c_shield_rr.subchannel
# .n_sc['duct']['total']))
# gap_temp_ur = 623.15 * np.ones(6)
# gap_temp_rr = 623.15 * np.ones((c_shield_rr.subchannel
# .n_sc['duct']['total']))
gap_temp_ur = np.ones(6) * 700.0
# gap_temp_ur = np.array([623.15 + 10, 623.15 - 10, 623.15 - 20,
# 623.15 - 10, 623.15 + 10, 623.15 + 20])
duct_per_side = int(c_shield_rr.subchannel.n_sc['duct']['total'] / 6)
gap_temp_rr = np.linspace(np.roll(gap_temp_ur, 1),
gap_temp_ur,
duct_per_side + 1)
gap_temp_rr = gap_temp_rr.transpose()
gap_temp_rr = gap_temp_rr[:, 1:]
gap_temp_rr = np.hstack(gap_temp_rr)
duct_per_side = int(c_shield_simple_rr.subchannel.n_sc['duct']['total'] / 6)
print(duct_per_side)
gap_temp_rr2 = np.linspace(np.roll(gap_temp_ur, 1),
gap_temp_ur,
duct_per_side + 1)
gap_temp_rr2 = gap_temp_rr2.transpose()
print(gap_temp_rr2.shape)
gap_temp_rr2 = gap_temp_rr2[:, 1:]
gap_temp_rr2 = np.hstack(gap_temp_rr2)
fake_htc = np.ones(2) * 2e4
# shield_ur_mnh._params['hde'] /= 2
# wp_ur = shield_ur.duct_perim
total_area = np.sqrt(3) * 0.5 * shield_ur_mnh.duct_ftf[0]**2
struct_area = shield_ur_mnh.vf['struct'] * total_area
struct_r = np.sqrt(struct_area / np.pi)
struct_perim = 2 * np.pi * struct_r
# print('ORIGINAL DE:', shield_ur._params['de'])
# print('ORIGINAL WP:', wp_ur)
# print('ADDED WP:', struct_perim)
# print('INCREASE:', struct_perim / wp_ur)
# shield_ur._params['de'] = (4 * shield_ur.total_area['coolant_int']
# / (2 * (struct_perim + wp_ur)))
for i in range(n_steps):
# gap_temp_ur = np.linspace(
# shield_ur_mnh.avg_coolant_temp,
# shield_ur_mnh.avg_coolant_temp - 10.0,
# 6)
# gap_temp_rr = np.linspace(
# c_shield_rr.avg_coolant_temp,
# c_shield_rr.avg_coolant_temp - 10.0,
# c_shield_rr.subchannel.n_sc['duct']['total'])
shield_ur_mnh.calculate(
dz, power_ur, gap_temp_ur, fake_htc, ebal=True)
shield_ur_simple.calculate(
dz, power_ur, gap_temp_ur, fake_htc, ebal=True)
c_shield_rr.calculate(
dz, power_rr, gap_temp_rr, fake_htc, ebal=True)
c_shield_simple_rr.calculate(
dz, power_rr2, gap_temp_rr2, fake_htc, ebal=True)
print('UNRODDED (MNH)')
# print('AREA:', shield_ur_mnh.total_area['coolant_int'])
# print('DUCT PERIM', shield_ur_mnh.duct_perim)
# print('STRUCT PERIM', struct_perim)
print('DE:', shield_ur_mnh._params['de'])
# print('RE:', shield_ur_mnh.coolant_params['Re'])
# print('HTC:', shield_ur_mnh.coolant_params['htc'])
print('ENERGY ADDED (W): ', shield_ur_mnh.ebal['power'])
print('ENERGY FROM DUCT (W):', shield_ur_mnh.ebal['from_duct'])
total = (shield_ur_mnh.ebal['from_duct']
+ shield_ur_mnh.ebal['power'])
print('TOTAL ENERGY INPUT (W):', total)
print('COOLANT DT (C): ', shield_ur_mnh.avg_coolant_temp - 623.15)
e_temp_rise = (shield_ur_mnh.flow_rate
* shield_ur_mnh.coolant.heat_capacity
* (shield_ur_mnh.avg_coolant_temp - 623.15))
print('ENERGY COOLANT DT (W):', e_temp_rise)
bal = total - e_temp_rise
print('DIFFERENCE (W):', bal)
# print(shield_ur_mnh.temp['coolant_int'])
print(shield_ur_mnh.ebal['per_hex_side'])
# print()
# print('UNRODDED (SIMPLE)')
# print('ENERGY ADDED (W): ', shield_ur_simple.ebal['power'])
# print('ENERGY FROM DUCT (W):', shield_ur_simple.ebal['from_duct'])
# total = (shield_ur_simple.ebal['from_duct']
# + shield_ur_simple.ebal['power'])
# print('TOTAL ENERGY INPUT (W):', total)
# print('COOLANT DT (C): ', shield_ur_simple.avg_coolant_temp - 623.15)
# e_temp_rise = (shield_ur_simple.flow_rate
# * shield_ur_simple.coolant.heat_capacity
# * (shield_ur_simple.avg_coolant_temp - 623.15))
# print('ENERGY COOLANT DT (W):', e_temp_rise)
# bal = total - e_temp_rise
# print('DIFFERENCE (W):', bal)
# print(shield_ur_simple.ebal['per_hex_side'])
# print()
# print('RODDED 7')
# print('AREA:', c_shield_simple_rr.params['area'])
# print('BUNDLE AREA:', c_shield_simple_rr.bundle_params['area'])
# print('BUNDLE WP:', c_shield_simple_rr.bundle_params['wp'])
# # print('DE:', c_shield_rr.params['de'])
# print('BUNDLE DE:', c_shield_simple_rr.bundle_params['de'])
# # print('RE:', c_shield_rr.coolant_int_params['Re'])
# # print('RE_sc:', c_shield_rr.coolant_int_params['Re_sc'])
# # print('HTC:', c_shield_rr.coolant_int_params['htc'])
# print('ENERGY ADDED (W): ', c_shield_simple_rr.ebal['power'])
# print('ENERGY FROM DUCT (W):', c_shield_simple_rr.ebal['from_duct'])
# total = (c_shield_simple_rr.ebal['from_duct']
# + c_shield_simple_rr.ebal['power'])
# print('TOTAL ENERGY INPUT (W):', total)
# print('COOLANT DT (C): ', c_shield_simple_rr.avg_coolant_temp - 623.15)
# e_temp_rise = (c_shield_simple_rr.int_flow_rate
# * c_shield_simple_rr.coolant.heat_capacity
# * (c_shield_simple_rr.avg_coolant_temp - 623.15))
# print('ENERGY COOLANT DT (W):', e_temp_rise)
# bal = total - e_temp_rise
# print('DIFFERENCE (W):', bal)
# print(c_shield_simple_rr.temp['coolant_int'])
# print(c_shield_simple_rr.ebal['per_hex_side'])
print()
print('RODDED 61')
# print('AREA:', c_shield_rr.params['area'])
# print('BUNDLE AREA:', c_shield_rr.bundle_params['area'])
# print('BUNDLE WP:', c_shield_rr.bundle_params['wp'])
print('DE:', c_shield_rr.params['de'])
print('BUNDLE DE:', c_shield_rr.bundle_params['de'])
# print('RE:', c_shield_rr.coolant_int_params['Re'])
# print('RE_sc:', c_shield_rr.coolant_int_params['Re_sc'])
# print('HTC:', c_shield_rr.coolant_int_params['htc'])
print('ENERGY ADDED (W): ', c_shield_rr.ebal['power'])
print('ENERGY FROM DUCT (W):', c_shield_rr.ebal['from_duct'])
total = (c_shield_rr.ebal['from_duct']
+ c_shield_rr.ebal['power'])
print('TOTAL ENERGY INPUT (W):', total)
print('COOLANT DT (C): ', c_shield_rr.avg_coolant_temp - 623.15)
e_temp_rise = (c_shield_rr.int_flow_rate
* c_shield_rr.coolant.heat_capacity
* (c_shield_rr.avg_coolant_temp - 623.15))
print('ENERGY COOLANT DT (W):', e_temp_rise)
bal = total - e_temp_rise
# print('DIFFERENCE (W):', bal)
print(c_shield_rr.temp['coolant_int'][:4])
# print(c_shield_rr.temp['coolant_int'][-4:])
# print(c_shield_rr.subchannel.n_sc['coolant'])
print(c_shield_rr.temp['coolant_int'][93: 100])
print(c_shield_rr.ebal['per_hex_side'])
assert 0
def make_rr_gap_temps_rr(rr, gap_temp_ur):
duct_per_side = int(rr.subchannel.n_sc['duct']['total'] / 6)
gap_temp_rr = np.linspace(np.roll(gap_temp_ur, 1),
gap_temp_ur,
duct_per_side + 1)
gap_temp_rr = gap_temp_rr.transpose()
gap_temp_rr = gap_temp_rr[:, 1:]
gap_temp_rr = np.hstack(gap_temp_rr)
return gap_temp_rr
def make_rr_power(rr, power_ur):
n_pin = rr.pin_lattice.n_pin
power_rr = {}
power_rr['pins'] = np.ones(n_pin) * power_ur['refl'] / n_pin
power_rr['duct'] = np.zeros(rr.n_duct *
rr.subchannel.n_sc['duct']['total'])
power_rr['cool'] = | np.zeros(rr.subchannel.n_sc['coolant']['total']) | numpy.zeros |
# External imports
import numpy as np
import cv2 as cv
# Own imports
import sim
# End connexion
sim.simxFinish(-1)
# Create new connexion
clientID = sim.simxStart("127.0.0.1", 19999, True, True, 5000, 5)
if (clientID != -1):
print("Connexion OK")
else:
print("Fatal error - No connexion")
# Create motors
motorWheel = [0, 1, 2, 3]
motorError = [False, False, False, False]
motorError[0], motorWheel[0] = sim.simxGetObjectHandle(
clientID,
"joint_back_left_wheel",
sim.simx_opmode_oneshot_wait
)
motorError[1], motorWheel[1] = sim.simxGetObjectHandle(
clientID,
"joint_back_right_wheel",
sim.simx_opmode_oneshot_wait
)
err_code,camera = sim.simxGetObjectHandle(
clientID,
"Vision_sensor",
sim.simx_opmode_blocking
)
returnCode, resolution, image = sim.simxGetVisionSensorImage(
clientID,
camera,
0,
sim.simx_opmode_streaming
)
# Define the speed
speed = 0.0
# Move the motors
sim.simxSetJointTargetVelocity(
clientID,
motorWheel[0],
speed,
sim.simx_opmode_oneshot_wait
)
sim.simxSetJointTargetVelocity(
clientID,
motorWheel[1],
-speed,
sim.simx_opmode_oneshot_wait
)
def getting_started_filters():
while (True):
_, resolution, image = sim.simxGetVisionSensorImage(
clientID,
camera,
0,
sim.simx_opmode_buffer
)
kernelD = np.ones((5,9), np.uint8)
kernelA = cv.getStructuringElement(cv.MORPH_RECT, (9,9))
kernelB = np.array([[1,1,1],[1,1,1],[1,1,1]])
if (len(resolution) > 1):
img = np.array(image, dtype = np.uint8)
img.resize([resolution[0], resolution[1], 3])
img = np.rot90(img,2)
img = np.fliplr(img)
img = cv.cvtColor(img, cv.COLOR_RGB2BGR)
# medianBlur filter - to smooth out an image
blur_image = cv.medianBlur(img, 15)
gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_,binary_image = cv.threshold(gray_image, 128, 255, cv.THRESH_BINARY)
# This line works as an automatic binarizator (finding an umbral by itself)
# _,binary_image = cv.threshold(gray_image, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# dilate filter - to dilate an image
dilate_image = cv.dilate(binary_image, kernelD, iterations = 1)
# erode filter - to erode an image
erode_image = cv.erode(binary_image, kernelD, iterations=2)
# Others import filters to take into account
img_open = cv.morphologyEx(binary_image,cv.MORPH_OPEN,kernelB, iterations=1)
img_close = cv.morphologyEx(binary_image,cv.MORPH_CLOSE,kernelB, iterations=1)
img_gradient = cv.morphologyEx(binary_image, cv.MORPH_GRADIENT, kernelB, iterations=1)
# cv.imshow("Original Image", img)
# cv.imshow("Dilate Image", dilate_image)
# cv.imshow("Blur Image", blur_image)
cv.imshow("Binary Image", binary_image)
cv.imshow("Erode Image", erode_image)
key = cv.waitKey(1) & 0xFF
if key == 27:
break
def getting_started_contours():
while (True):
_, resolution, image = sim.simxGetVisionSensorImage(
clientID,
camera,
0,
sim.simx_opmode_buffer
)
if (len(resolution) > 1):
img = np.array(image, dtype = np.uint8)
img.resize([resolution[0], resolution[1], 3])
img = np.rot90(img,2)
img = np.fliplr(img)
img = cv.cvtColor(img, cv.COLOR_RGB2BGR)
hsv_image = cv.cvtColor(img, cv.COLOR_BGR2HSV)
lower_values = | np.array([83, 0, 55]) | numpy.array |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates a small-scale subset of SVHN dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.io as sio
np.random.seed(seed=0)
def sample(input_path,
output_path,
is_test=False,
num_classes=10,
n_train_per_class=60,
n_test_per_class=1200):
"""Samples from the given input path and saves the sampled dataset."""
train_data = sio.loadmat(input_path)
new_data = []
new_data_y = []
new_data_1 = []
new_data_y_1 = []
for i in range(num_classes):
label_id = i + 1
ori_index = np.array(np.where(train_data['y'] == label_id)[0])
np.random.shuffle(ori_index)
index = ori_index[:n_train_per_class]
label_data = np.array(train_data['X'][:, :, :, index])
new_data.append(label_data)
new_data_y.append(np.array(train_data['y'][index, :]))
if is_test:
index = ori_index[n_train_per_class:n_train_per_class + n_test_per_class]
label_data = np.array(train_data['X'][:, :, :, index])
new_data_1.append(label_data)
new_data_y_1.append(np.array(train_data['y'][index, :]))
new_data = np.concatenate(new_data, 3)
new_data_y = | np.concatenate(new_data_y, 0) | numpy.concatenate |
"""Some functions to generate simulated data"""
try:
from os import link
except ImportError:
# Hack for windows
from shutil import copy2
def link(src, dst):
copy2(src, dst)
from numpy.random import randn, rand, permutation, randint, seed
from numpy import where, nonzero, sqrt, real
from numpy import zeros, ones, eye, mean, kron, sign
from numpy import array, equal, argsort
from numpy import flatnonzero
from numpy.linalg import norm
from optwok.mldata import DatasetFileCSV
from optwok.testtools import flatten_list, list2matrix
from numpy import matrix, vstack, hstack, concatenate
from optwok.kernel import GaussKernel, JointKernel
from optwok.io_pickle import load, save
from optwok.mldata import DatasetBase
from scipy.linalg.matfuncs import sqrtm
#####################################################################
# Binary classification
#####################################################################
def cloudgen(numpoint, numfeat, numnoise, fracpos, width):
"""Generate two Gaussian point clouds, centered around one and minus one.
Gaussian clouds are in numfeat dimensions.
Generate uniform noise in numnoise dimensions.
"""
numpos = int(round(fracpos*numpoint))
numneg = numpoint - numpos
metadata = 'cloudgen(%d,%d,%d,%d,%3.2f)' % (numpos, numneg, numfeat, numnoise, width)
print(metadata)
datapos = ones((numfeat, numpos)) + width*randn(numfeat, numpos)
dataneg = -ones((numfeat, numneg)) + width*randn(numfeat, numneg)
noise = (2.0+width)*(rand(numnoise, numpos+numneg)
- 0.5 * ones((numnoise, numpos+numneg)))
pointcloud = 0.2*concatenate((concatenate((datapos, dataneg), axis=1),
noise), axis=0)
labels = concatenate((ones(numpos), -ones(numneg)))
return metadata, pointcloud, labels
def cloudgen_weak(numpoint, numfeat, numweak, fracpos, width):
"""Generate two Gaussian point clouds, centered around one and minus one.
Gaussian clouds are in numfeat dimensions.
Overlapping Gaussian clouds are in numweak dimensions.
"""
numpos = int(round(fracpos*numpoint))
numneg = numpoint - numpos
metadata = 'cloudgen_weak(%d,%d,%d,%d,%3.2f)' %\
(numpos, numneg, numfeat, numweak, width)
print(metadata)
strongpos = ones((numfeat, numpos)) + width*randn(numfeat, numpos)
strongneg = -ones((numfeat, numneg)) + width*randn(numfeat, numneg)
weakpos = 0.1*ones((numweak, numpos)) + 2.0*width*randn(numweak, numpos)
weakneg = -0.1*ones((numweak, numneg)) + 2.0*width*randn(numweak, numneg)
datapos = concatenate((strongpos, weakpos), axis=0)
dataneg = concatenate((strongneg, weakneg), axis=0)
pointcloud = 0.2*concatenate((datapos, dataneg), axis=1)
labels = concatenate((ones(numpos), -ones(numneg)))
return metadata, pointcloud, labels
def create_data(num_train=500, num_test=200, num_feat=10, num_noise_feat=10, width=0.8,
frac_pos=0.5, frac_flip=0.0, symm_flip=True):
"""Create mixture of Gaussians distribution"""
(metadata, train_ex, train_labels) = cloudgen(num_train, num_feat,
num_noise_feat, frac_pos, width)
(metadata, test_ex, test_labels) = cloudgen(num_test, num_feat,
num_noise_feat, frac_pos, width)
random_classification_noise(train_labels, frac_flip, symm_flip)
return (train_ex, train_labels, test_ex, test_labels)
def _get_hyperplane(num_feat, in_simplex, homogeneous, min_val=0.01):
"""Randomly create a hyperplane"""
if in_simplex:
w = rand(num_feat, 1)
w += min_val+1.0/num_feat
w = w/sum(abs(w))
if homogeneous:
b = 0.0
else:
b = rand()
else:
w = rand(num_feat, 1) - 0.5
w = sign(w)*(abs(w)+min_val+1.0/num_feat)
w = w/norm(w)
if homogeneous:
b = 0.0
else:
b = 2.0*(rand()-0.5)
return w, b
def _get_hyperplane_sparse(num_feat, num_nz, homogeneous, min_val=0.01):
"""Randomly create a hyperplane"""
w = rand(num_feat, 1)-0.5
w = sign(w)*(abs(w)+min_val+1.0/num_feat)
w /= norm(w)
small_idx = argsort(abs(w[:, 0]))[:(num_feat-num_nz)]
w[small_idx] = 0.0
if homogeneous:
b = 0.0
else:
b = 2.0*( | rand() | numpy.random.rand |
import numpy as np
# Sort and remove spurious eigenvalues
def print_evals(evals,n=None):
if n is None:n=len(evals)
print('{:>4s} largest eigenvalues:'.format(str(n)))
print('\n'.join('{:4d}: {:10.4e} {:10.4e}j'.format(n-c,np.real(k),np.imag(k))
for c,k in enumerate(evals[-n:])))
def sort_evals(evals,evecs,which="M"):
assert which in ["M", "I", "R"]
if which=="I":
idx = np.imag(evals).argsort()
if which=="R":
idx = | np.real(evals) | numpy.real |
import abc
from copy import copy
from math import ceil
from os import PathLike
from random import shuffle
from itertools import product
from collections import Iterable
import torch
import numpy as np
from typing import List
from keras.utils import to_categorical
from python_research.io import load_data
HEIGHT = 0
WIDTH = 1
DEPTH = 2
class Dataset:
def __init__(self, data: np.ndarray, labels: np.ndarray):
self.data = data
self.labels = labels
def get_data(self) -> np.ndarray:
"""
:return: Data from a given dataset
"""
return self.data
def get_labels(self) -> np.ndarray:
"""
:return: Labels from a given dataset
"""
return self.labels
def get_one_hot_labels(self, classes_count: int=None):
if classes_count is None:
classes_count = len(np.unique(self.labels))
return to_categorical(self.labels, classes_count)
@property
def min(self):
return np.amin(self.data)
@property
def max(self):
return np.amax(self.data)
@property
def shape(self):
return self.data.shape
def vstack(self, to_stack: np.ndarray):
self.data = np.vstack([self.data, to_stack])
def hstack(self, to_stack: np.ndarray):
self.labels = np.hstack([self.labels, to_stack])
def expand_dims(self, axis: int=0, inplace: bool=True):
if inplace:
self.data = np.expand_dims(self.data, axis=axis)
else:
return np.expand_dims(self.data, axis=axis)
def normalize_min_max(self, min_: float=None, max_: float=None,
inplace: bool=True):
"""
Normalize data using Min Max normalization: (data - min) / (max - min)
:param min_: Minimal value for normalization, if not specified,
it will be deducted from data
:param max_: Maximal value for normalization, if not specified,
it will be deducted from data
:param inplace: Whether to change data in-place (True) or return
normalized data and labels
:return: If inplace is True - return None,
if inplace is False - return normalized (data, labels)
"""
if min_ is None and max_ is None:
min_ = np.amin(self.get_data())
max_ = np.amax(self.get_data())
if inplace:
self.data = (self.get_data() - min_) / (max_ - min_)
else:
return (self.get_data() - min_) / (max_ - min_)
elif min_ is not None and max_ is not None:
if inplace:
self.data = (self.get_data() - min_) / (max_ - min_)
else:
return(self.get_data() - min_) / (max_ - min_)
def standardize(self, mean: float=None, std: float=None,
inplace: bool=True):
"""
Standardize data using mean and std.
:param mean: Mean value for standardization, if not specified,
it will be deducted from data
:param std: Std value for standardization, if not specified,
it will be deducted from data
:param inplace: Whether to change data in-place (True) or return
normalized data and labels
:return: If inplace is True - return None,
if inplace is False - return normalized (data, labels)
"""
if mean is None and std is None:
mean = np.mean(self.get_data())
std = np.std(self.get_data())
if inplace:
self.data = (self.data - mean) / std
else:
return (self.data - mean) / std
def normalize_labels(self):
"""
Normalize label values so that they start from 0.
:return: None
"""
self.labels = self.labels - 1
def delete_by_indices(self, indices: Iterable):
"""
Delete a chunk of data given as indices
:param indices: Indices to delete from both data and labels arrays
:return: None
"""
self.data = np.delete(self.data, indices, axis=HEIGHT)
self.labels = np.delete(self.labels, indices, axis=HEIGHT)
def convert_to_tensors(self, inplace: bool=True, device: str='cpu'):
"""
Convert data and labels from torch tensors.
:param inplace: Whether to change data in-place (True) or return
normalized data and labels
:param device: Device on which tensors should be alocated
:return:
"""
if inplace:
self.data = torch.from_numpy(self.get_data()).float().to(device)
self.labels = torch.from_numpy(self.get_labels()).float().to(device)
else:
return torch.from_numpy(self.get_data()).to(device), \
torch.from_numpy(self.get_labels()).to(device)
def convert_to_numpy(self, inplace: bool=True):
"""
Convert data and labels to numpy
:param inplace: Whether to change data in-place (True) or return
normalized data and labels
:return:
"""
if inplace:
self.data = self.data.numpy()
self.labels = self.labels.numpy()
else:
return self.data.numpy(), self.labels.numpy()
def __len__(self) -> int:
"""
Method providing a size of the dataaset (number of samples)
:return: Size of the dataset
"""
return len(self.labels)
def __getitem__(self, item) -> [np.ndarray, np.ndarray]:
"""
Method supporting integer indexing
:param item: Index or Iterable of indices pointing at elements to be
returned
:return: Data at given indexes
"""
sample_x = self.data[item, ...]
sample_y = self.labels[item]
return sample_x, sample_y
class HyperspectralDataset(Dataset):
"""
Class representing hyperspectral data in a form of samples prepared for
training and classification (1D or 3D). For 1D samples, data will have
the following dimensions: [SAMPLES_COUNT, NUMBER_OF_BANDS], where for 3D
samples dimensions will be [SAMPLES_COUNT,
NEIGHBOURHOOD_SIZE,
NEIGHBOURHOOD_SIZE,
NUMBER_OF_BANDS].
"""
def __init__(self, dataset: [np.ndarray, PathLike],
ground_truth: [np.ndarray, PathLike],
neighbourhood_size: int = 1,
background_label: int = 0):
if type(dataset) is np.ndarray and type(ground_truth) is np.ndarray:
raw_data = dataset
ground_truth = ground_truth
elif type(dataset) is str and type(ground_truth) is str:
raw_data = load_data(dataset)
ground_truth = load_data(ground_truth)
else:
raise TypeError("Dataset and ground truth should be "
"provided either as a string or a numpy array, "
"not {}".format(type(dataset)))
data, labels = self._prepare_samples(raw_data,
ground_truth,
neighbourhood_size,
background_label)
super(HyperspectralDataset, self).__init__(data, labels)
@staticmethod
def _get_padded_cube(data, padding_size):
x = copy(data)
v_padding = np.zeros((padding_size, x.shape[WIDTH], x.shape[DEPTH]))
x = | np.vstack((v_padding, x)) | numpy.vstack |
# -*- coding: utf-8 -*-
import unittest
import numpy
"""
*******************************************************************************
Tests of the quantarhei.qm.LindbladForm class
*******************************************************************************
"""
from quantarhei.qm import LindbladForm
from quantarhei.qm import ElectronicLindbladForm
from quantarhei.qm import Operator
from quantarhei.qm import SystemBathInteraction
from quantarhei.qm import ReducedDensityMatrixPropagator
from quantarhei.qm import ReducedDensityMatrix
from quantarhei.qm import ProjectionOperator
from quantarhei import Hamiltonian
from quantarhei import energy_units
from quantarhei import TimeAxis
from quantarhei import eigenbasis_of, Manager
class TestLindblad(unittest.TestCase):
"""Tests for the LindbladForm class
"""
def setUp(self,verbose=False):
self.verbose = verbose
#
# Lindblad projection operators
#
K12 = numpy.array([[0.0, 1.0],[0.0, 0.0]],dtype=numpy.float)
K21 = numpy.array([[0.0, 0.0],[1.0, 0.0]],dtype=numpy.float)
KK12 = Operator(data=K12)
KK21 = Operator(data=K21)
self.KK12 = KK12
self.KK21 = KK21
#
# Linbdlad rates
#
self.rates = (1.0/100.0, 1.0/200.0)
#
# System-bath interaction using operators and rates in site basis
#
self.sbi1 = SystemBathInteraction([KK12,KK21],
rates=self.rates)
self.sbi2 = SystemBathInteraction([KK12,KK21],
rates=self.rates)
#
# Test Hamiltonians
#
with energy_units("1/cm"):
h1 = [[100.0, 0.0],[0.0, 0.0]]
h2 = [[100.0, 0.0],[0.0, 0.0]]
self.H1 = Hamiltonian(data=h1)
self.H2 = Hamiltonian(data=h2)
h3 = [[100.0, 20.0],[20.0, 0.0]]
self.H3 = Hamiltonian(data=h3)
# less trivial Hamiltonian
h4 = [[100.0, 200.0, 30.0 ],
[200.0, 50.0, -100.0],
[30.0, -100.0, 0.0 ]]
self.H4 = Hamiltonian(data=h4)
h4s = [[100.0, 0.0, 0.0 ],
[0.0, 50.0, 0.0],
[0.0, 0.0, 0.0 ]]
self.H4s = Hamiltonian(data=h4s)
#
# Projection operators in eigenstate basis
#
with eigenbasis_of(self.H3):
K_12 = ProjectionOperator(0, 1, dim=2)
K_21 = ProjectionOperator(1, 0, dim=2)
self.K_12 = K_12
self.K_21 = K_21
with eigenbasis_of(self.H4):
Ke_12 = ProjectionOperator(0, 1, dim=3)
Ke_21 = ProjectionOperator(1, 0, dim=3)
Ke_23 = ProjectionOperator(1, 2, dim=3)
Ke_32 = ProjectionOperator(2, 1, dim=3)
Ks_12 = ProjectionOperator(0, 1, dim=3)
Ks_21 = ProjectionOperator(1, 0, dim=3)
Ks_23 = ProjectionOperator(1, 2, dim=3)
Ks_32 = ProjectionOperator(2, 1, dim=3)
self.rates4 = [1.0/100, 1.0/200, 1.0/150, 1.0/300]
#
# System-bath operators defined in exciton basis
#
self.sbi3 = SystemBathInteraction([K_12, K_21],
rates=self.rates)
self.sbi4e = SystemBathInteraction([Ke_12, Ke_21, Ke_23, Ke_32],
rates=self.rates4)
self.sbi4s = SystemBathInteraction([Ks_12, Ks_21, Ks_23, Ks_32],
rates=self.rates4)
def test_comparison_of_rates(self):
"""Testing that Lindblad tensor and rate matrix are compatible
"""
tensor = True
# matrix = True
dim = self.H1.dim
KT = numpy.zeros((dim,dim), dtype=numpy.float64)
KM = numpy.zeros((dim,dim), dtype=numpy.float64)
if tensor:
#print(self.H1)
LT = LindbladForm(self.H1, self.sbi1, as_operators=False)
for n in range(2):
for m in range(2):
#print(n,m,numpy.real(RT.data[n,n,m,m]))
KT[n,m] = numpy.real(LT.data[n,n,m,m])
KM = numpy.zeros((dim,dim))
KM[0,0] = -self.rates[1]
KM[1,1] = -self.rates[0]
KM[0,1] = self.rates[0]
KM[1,0] = self.rates[1]
numpy.testing.assert_allclose(KT,KM, rtol=1.0e-2)
def test_comparison_of_dynamics(self):
"""Testing site basis dynamics by Lindblad
"""
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True)
LT2 = LindbladForm(self.H1, self.sbi1, as_operators=False)
time = TimeAxis(0.0, 1000, 1.0)
prop1 = ReducedDensityMatrixPropagator(time, self.H1, LT1)
prop2 = ReducedDensityMatrixPropagator(time, self.H1, LT2)
rho0 = ReducedDensityMatrix(dim=self.H1.dim)
rho0.data[1,1] = 1.0
rhot1 = prop1.propagate(rho0)
rhot2 = prop2.propagate(rho0)
numpy.testing.assert_allclose(rhot1.data,rhot2.data) #, rtol=1.0e-2)
def test_propagation_in_different_basis(self):
"""(LINDBLAD) Testing comparison of propagations in different bases
"""
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True)
LT2 = LindbladForm(self.H1, self.sbi1, as_operators=False)
time = TimeAxis(0.0, 1000, 1.0)
prop1 = ReducedDensityMatrixPropagator(time, self.H1, LT1)
prop2 = ReducedDensityMatrixPropagator(time, self.H1, LT2)
rho0 = ReducedDensityMatrix(dim=self.H1.dim)
rho0.data[1,1] = 1.0
with eigenbasis_of(self.H1):
rhot1_e = prop1.propagate(rho0)
with eigenbasis_of(self.H1):
rhot2_e = prop2.propagate(rho0)
rhot1_l = prop1.propagate(rho0)
rhot2_l = prop2.propagate(rho0)
numpy.testing.assert_allclose(rhot1_l.data, rhot1_e.data)
numpy.testing.assert_allclose(rhot2_l.data, rhot2_e.data)
numpy.testing.assert_allclose(rhot1_e.data, rhot2_e.data) #, rtol=1.0e-2)
def test_transformation_in_different_basis(self):
"""(LINDBLAD) Testing transformations into different bases
"""
#Manager().warn_about_basis_change = True
#Manager().warn_about_basis_changing_objects = True
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True, name="LT1")
LT2 = LindbladForm(self.H1, self.sbi1, as_operators=False, name="LT2")
rho0 = ReducedDensityMatrix(dim=self.H1.dim, name="ahoj")
with eigenbasis_of(self.H1):
rho0.data[1,1] = 0.7
rho0.data[0,0] = 0.3
with eigenbasis_of(self.H1):
rhot1_e = LT1.apply(rho0, copy=True)
with eigenbasis_of(self.H1):
rhot2_e = LT2.apply(rho0, copy=True)
rhot1_l = LT1.apply(rho0, copy=True)
rhot2_l = LT2.apply(rho0, copy=True)
numpy.testing.assert_allclose(rhot1_l.data, rhot1_e.data)
numpy.testing.assert_allclose(rhot2_l.data, rhot2_e.data)
numpy.testing.assert_allclose(rhot1_e.data, rhot2_e.data) #, rtol=1.0e-2)
def test_comparison_of_exciton_dynamics(self):
"""Testing exciton basis dynamics by Lindblad
"""
# site basis form to be compared with
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True)
# exciton basis forms
LT13 = LindbladForm(self.H3, self.sbi3, as_operators=True)
LT23 = LindbladForm(self.H3, self.sbi3, as_operators=False)
LT4e = LindbladForm(self.H4, self.sbi4e, as_operators=True)
LT4s = LindbladForm(self.H4s, self.sbi4s, as_operators=True)
time = TimeAxis(0.0, 1000, 1.0)
#
# Propagators
#
prop0 = ReducedDensityMatrixPropagator(time, self.H1, LT1)
prop1 = ReducedDensityMatrixPropagator(time, self.H3, LT13)
prop2 = ReducedDensityMatrixPropagator(time, self.H3, LT23)
prop4e = ReducedDensityMatrixPropagator(time, self.H4, LT4e)
prop4s = ReducedDensityMatrixPropagator(time, self.H4s, LT4s)
#
# Initial conditions
#
rho0 = ReducedDensityMatrix(dim=self.H3.dim)
rho0c = ReducedDensityMatrix(dim=self.H1.dim) # excitonic
with eigenbasis_of(self.H3):
rho0c.data[1,1] = 1.0
rho0.data[1,1] = 1.0
rho04e = ReducedDensityMatrix(dim=self.H4.dim)
rho04s = ReducedDensityMatrix(dim=self.H4.dim)
with eigenbasis_of(self.H4):
rho04e.data[2,2] = 1.0
rho04s.data[2,2] = 1.0
#
# Propagations
#
rhotc = prop0.propagate(rho0c)
rhot1 = prop1.propagate(rho0)
rhot2 = prop2.propagate(rho0)
rhot4e = prop4e.propagate(rho04e)
rhot4s = prop4s.propagate(rho04s)
# propagation with operator- and tensor forms should be the same
numpy.testing.assert_allclose(rhot1.data,rhot2.data) #, rtol=1.0e-2)
#
# Population time evolution by Lindblad is independent
# of the level structure and basis, as long as I compare
# populations in basis in which the Lindblad form was defined
#
P = numpy.zeros((2, time.length))
Pc = numpy.zeros((2, time.length))
P4e = numpy.zeros((3, time.length))
P4s = | numpy.zeros((3, time.length)) | numpy.zeros |
''' Utils for io, language, connectivity graphs etc '''
import os
import sys
import re
from param import args
if args.upload:
sys.path.insert(0, '/R2R-Aux/build')
else:
sys.path.insert(0, 'build')
import MatterSim
import string
import json
import time
import math
from collections import Counter, defaultdict
import numpy as np
import networkx as nx
from param import args
if args.upload:
sys.path.insert(0, '/R2R-Aux/build')
else:
sys.path.insert(0, 'build')
import subprocess
from polyaxon_client.tracking import get_data_paths
# padding, unknown word, end of sentence
base_vocab = ['<PAD>', '<UNK>', '<EOS>']
padding_idx = base_vocab.index('<PAD>')
def load_nav_graphs(scans):
''' Load connectivity graph for each scan '''
def distance(pose1, pose2):
''' Euclidean distance between two graph poses '''
return ((pose1['pose'][3]-pose2['pose'][3])**2\
+ (pose1['pose'][7]-pose2['pose'][7])**2\
+ (pose1['pose'][11]-pose2['pose'][11])**2)**0.5
graphs = {}
for scan in scans:
with open('connectivity/%s_connectivity.json' % scan) as f:
G = nx.Graph()
positions = {}
data = json.load(f)
for i,item in enumerate(data):
if item['included']:
for j,conn in enumerate(item['unobstructed']):
if conn and data[j]['included']:
positions[item['image_id']] = np.array([item['pose'][3],
item['pose'][7], item['pose'][11]]);
assert data[j]['unobstructed'][i], 'Graph should be undirected'
G.add_edge(item['image_id'],data[j]['image_id'],weight=distance(item,data[j]))
nx.set_node_attributes(G, values=positions, name='position')
graphs[scan] = G
return graphs
def progress_generator(mask):
mask = ~mask # [True, True, False]
counter = mask.clone()
counter = torch.sum(counter, dim=1).float()
unit = 1 / counter
progress = torch.ones_like(mask).cuda()
progress = torch.cumsum(progress, dim=1).float()
progress = progress * unit.unsqueeze(1).expand(mask.shape)
progress = progress * mask.float()
return progress
def gt_words(obs):
"""
See "utils.Tokenizer.encode_sentence(...)" for "instr_encoding" details
"""
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
return torch.from_numpy(seq_tensor).cuda()
def load_datasets(splits):
"""
:param splits: A list of split.
if the split is "something@5000", it will use a random 5000 data from the data
:return:
"""
import random
data = []
old_state = random.getstate()
for split in splits:
# It only needs some part of the dataset?
components = split.split("@")
number = -1
if len(components) > 1:
split, number = components[0], int(components[1])
# Load Json
# if split in ['train', 'val_seen', 'val_unseen', 'test',
# 'val_unseen_half1', 'val_unseen_half2', 'val_seen_half1', 'val_seen_half2']: # Add two halves for sanity check
if "/" not in split:
if args.upload:
with open(get_sync_dir(os.path.join(args.upload_path,'tasks/R2R/data/R2R_%s.json' % split))) as f:
new_data = json.load(f)
else:
with open(os.path.join(args.R2R_Aux_path,'tasks/R2R/data/R2R_%s.json' % split)) as f:
new_data = json.load(f)
else:
with open(split) as f:
new_data = json.load(f)
# Partition
if number > 0:
random.seed(0) # Make the data deterministic, additive
random.shuffle(new_data)
new_data = new_data[:number]
# Join
data += new_data
random.setstate(old_state) # Recover the state of the random generator
return data
class Tokenizer(object):
''' Class to tokenize and encode a sentence. '''
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)') # Split on any non-alphanumeric character
def __init__(self, vocab=None, encoding_length=20):
self.encoding_length = encoding_length
self.vocab = vocab
self.word_to_index = {}
self.index_to_word = {}
if vocab:
for i,word in enumerate(vocab):
self.word_to_index[word] = i
new_w2i = defaultdict(lambda: self.word_to_index['<UNK>'])
new_w2i.update(self.word_to_index)
self.word_to_index = new_w2i
for key, value in self.word_to_index.items():
self.index_to_word[value] = key
old = self.vocab_size()
self.add_word('<BOS>')
assert self.vocab_size() == old+1
print("OLD_VOCAB_SIZE", old)
print("VOCAB_SIZE", self.vocab_size())
if vocab:
print("VOCAB", len(vocab))
def finalize(self):
"""
This is used for debug
"""
self.word_to_index = dict(self.word_to_index) # To avoid using mis-typing tokens
def add_word(self, word):
assert word not in self.word_to_index
self.word_to_index[word] = self.vocab_size() # vocab_size() is the
self.index_to_word[self.vocab_size()] = word
@staticmethod
def split_sentence(sentence):
''' Break sentence into a list of words and punctuation '''
toks = []
for word in [s.strip().lower() for s in Tokenizer.SENTENCE_SPLIT_REGEX.split(sentence.strip()) if len(s.strip()) > 0]:
# Break up any words containing punctuation only, e.g. '!?', unless it is multiple full stops e.g. '..'
if all(c in string.punctuation for c in word) and not all(c in '.' for c in word):
toks += list(word)
else:
toks.append(word)
return toks
def vocab_size(self):
return len(self.index_to_word)
def encode_sentence(self, sentence, max_length=None):
if max_length is None:
max_length = self.encoding_length
if len(self.word_to_index) == 0:
sys.exit('Tokenizer has no vocab')
encoding = [self.word_to_index['<BOS>']]
for word in self.split_sentence(sentence):
encoding.append(self.word_to_index[word]) # Default Dict
encoding.append(self.word_to_index['<EOS>'])
if len(encoding) <= 2:
return None
#assert len(encoding) > 2
if len(encoding) < max_length:
encoding += [self.word_to_index['<PAD>']] * (max_length-len(encoding)) # Padding
elif len(encoding) > max_length:
encoding[max_length - 1] = self.word_to_index['<EOS>'] # Cut the length with EOS
return np.array(encoding[:max_length])
def decode_sentence(self, encoding, length=None):
sentence = []
if length is not None:
encoding = encoding[:length]
for ix in encoding:
if ix == self.word_to_index['<PAD>']:
break
else:
sentence.append(self.index_to_word[ix])
return " ".join(sentence)
def shrink(self, inst):
"""
:param inst: The id inst
:return: Remove the potential <BOS> and <EOS>
If no <EOS> return empty list
"""
if len(inst) == 0:
return inst
end = np.argmax(np.array(inst) == self.word_to_index['<EOS>']) # If no <EOS>, return empty string
if len(inst) > 1 and inst[0] == self.word_to_index['<BOS>']:
start = 1
else:
start = 0
# print(inst, start, end)
return inst[start: end]
def build_vocab(splits=['train'], min_count=5, start_vocab=base_vocab):
''' Build a vocab, starting with base vocab containing a few useful tokens. '''
count = Counter()
t = Tokenizer()
data = load_datasets(splits)
for item in data:
for instr in item['instructions']:
count.update(t.split_sentence(instr))
vocab = list(start_vocab)
for word,num in count.most_common():
if num >= min_count:
vocab.append(word)
else:
break
return vocab
def write_vocab(vocab, path):
print('Writing vocab of size %d to %s' % (len(vocab),path))
with open(path, 'w') as f:
for word in vocab:
f.write("%s\n" % word)
def read_vocab(path):
with open(path) as f:
vocab = [word.strip() for word in f.readlines()]
return vocab
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def read_img_features(feature_store):
import csv
import base64
from tqdm import tqdm
print("Start loading the image feature")
start = time.time()
if "detectfeat" in args.features:
views = int(args.features[10:])
else:
views = 36
args.views = views
tsv_fieldnames = ['scanId', 'viewpointId', 'image_w', 'image_h', 'vfov', 'features']
features = {}
with open(feature_store, "r") as tsv_in_file: # Open the tsv file.
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=tsv_fieldnames)
for item in reader:
long_id = item['scanId'] + "_" + item['viewpointId']
features[long_id] = np.frombuffer(base64.decodestring(item['features'].encode('ascii')),
dtype=np.float32).reshape((views, -1)) # Feature of long_id is (36, 2048)
print("Finish Loading the image feature from %s in %0.4f seconds" % (feature_store, time.time() - start))
return features
def read_obj_dense_features(dense_obj_feat1, dense_obj_feat2, bbox, sparse_obj_feat, th):
# CARGA LAS FEATURES DENSAS
print("Start loading the object dense feature")
start = time.time()
obj_d_feat1 = np.load(dense_obj_feat1, allow_pickle=True).item()
obj_d_feat2 = np.load(dense_obj_feat2, allow_pickle=True).item()
obj_s_feat = np.load(sparse_obj_feat, allow_pickle=True).item()
obj_d_feat = {**obj_d_feat1, **obj_d_feat2}
bbox_feat = np.load(bbox, allow_pickle=True).item()
viewpointIds = load_viewpointids()
sum_feature = np.zeros(2048)
sum_bbox = np.zeros(4)
sum_e = np.zeros(4)
sum_h = np.zeros(4)
obj_num = 0
none_num = 0
# calculate the average feature&bbox for padding
for n,(scanId, viewpointId) in enumerate(viewpointIds):
long_id = scanId + '_' + viewpointId
if len(obj_d_feat[long_id]['concat_prob']) > 0:
for i,feat in enumerate(obj_d_feat[long_id]['concat_feature']):
if obj_d_feat[long_id]['concat_prob'][i] < th:
continue
sum_feature += feat
assert bbox_feat[long_id]['concat_viewIndex'] == obj_d_feat[long_id]['concat_viewIndex']
sum_bbox += bbox_feat[long_id]['concat_bbox'][i]
sum_h += obj_s_feat[long_id]['concat_angles_h'][i]
sum_e += obj_s_feat[long_id]['concat_angles_e'][i]
obj_num +=1
avg_feature = sum_feature/obj_num
avg_b = sum_bbox/obj_num
avg_e = sum_e/obj_num
avg_h = sum_h/obj_num
avg_angle = obj_rad2reg_feature(avg_e, avg_h, 'dense')
objs = {}
for n, (scanId, viewpointId) in enumerate(viewpointIds):
long_id = scanId + '_' + viewpointId
flag = 0
viewpoint_object = []
for bb,bb_viewIndex, features, txt, viewIndex, prob, angles_h, angles_e in zip(
bbox_feat[long_id]['concat_bbox'], bbox_feat[long_id]['concat_viewIndex'],
obj_d_feat[long_id]['concat_feature'], obj_d_feat[long_id]['concat_text'],
obj_d_feat[long_id]['concat_viewIndex'], obj_d_feat[long_id]['concat_prob'],
obj_s_feat[long_id]['concat_angles_h'], obj_s_feat[long_id]['concat_angles_e']
):
if prob < th:
continue
assert bb_viewIndex == viewIndex
flag = 1
angles = obj_rad2reg_feature(angles_h, angles_e, 'dense')
viewpoint_object.append({'bbox':bb,'angles': angles, 'features': features,
'text': txt, 'viewIndex': viewIndex, 'prob': prob})
if not flag and (th!=1): # thresh out and pad average feature
viewpoint_object.append({'bbox':avg_b, 'angles': avg_angle, 'features': avg_feature,
'text': 'average', 'viewIndex':None, 'prob': None})
none_num += 1
if th == 1:
viewpoint_object.append({'bbox': np.zeros(4),'angles': np.zeros(128), 'features': np.zeros(2048),
'text': 'zero', 'viewIndex': None, 'prob': None})
none_num += 1
num_obj = len(viewpoint_object)
concat_angles = np.zeros((num_obj,128), np.float32)
concat_bbox = np.zeros((num_obj, 4), np.float32)
concat_dense_feature = np.zeros((num_obj, 2048))
concat_text = [None] * num_obj
concat_viewIndex = [None] * num_obj
concat_prob = [None] * num_obj
for n_obj, obj in enumerate(viewpoint_object):
concat_bbox[n_obj] = obj['bbox']
concat_angles[n_obj] = obj['angles']
concat_dense_feature[n_obj] = obj['features']
concat_text[n_obj] = obj['text']
concat_viewIndex[n_obj] = obj['viewIndex']
concat_prob[n_obj] = obj['prob']
objs[long_id] = {
'concat_bbox': concat_bbox,
'concat_angles': concat_angles,
'concat_feature': concat_dense_feature,
'concat_text': concat_text,
'concat_viewIndex': concat_viewIndex,
'concat_prob': concat_prob
}
print("Finish loading the prob larger than %0.2f dense object dense feature from %s and %s in %0.4f seconds" % (
th,dense_obj_feat1, dense_obj_feat2, time.time()-start))
print("%d viewpoint get None object prob > % 0.2f"%(none_num, th))
return objs
def read_obj_sparse_features(sparse_obj_feat, th):
print("Start loading the object sparse feature")
start = time.time()
obj_s_feat = np.load(sparse_obj_feat, allow_pickle=True).item()
viewpointIds = load_viewpointids()
sum_feature = | np.zeros(300) | numpy.zeros |
"""
Code for loading data
"""
import os, sys
import shutil
import argparse
import functools
import multiprocessing
import gzip
import inspect
import glob
import json
import itertools
import collections
import logging
from typing import *
import torch
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
import Levenshtein
import featurization as ft
import utils
LOCAL_DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
assert os.path.isdir(LOCAL_DATA_DIR)
EXTERNAL_EVAL_DIR = os.path.join(os.path.dirname(LOCAL_DATA_DIR), "external_eval")
assert os.path.join(EXTERNAL_EVAL_DIR)
# Names of datasets
DATASET_NAMES = {"LCMV", "VDJdb", "PIRD", "TCRdb"}
logging.basicConfig(level=logging.INFO)
class TcrABSupervisedIdxDataset(Dataset):
"""Dataset that returns TcrAB and label"""
def __init__(
self,
tcr_table: pd.DataFrame,
label_col: str = "tetramer",
pos_labels: Collection[str] = ["TetMid", "TetPos"],
idx_encode: bool = False,
max_a_len: Optional[int] = None,
max_b_len: Optional[int] = None,
disambiguate_labels: bool = True,
):
self.table = tcr_table
self.label_col = label_col
if disambiguate_labels:
logging.info("Deduping and removing examples with conflicting labels")
lcmv_dedup_ab, self.labels = dedup_lcmv_table(tcr_table)
self.tras, self.trbs = zip(*lcmv_dedup_ab)
else:
raise NotImplementedError(
"Running withough disambiguating labels causes duplicated and conflicting labels! This was the prior behavior, but is now deprecated"
)
tcr_a_lengths = [len(self.get_ith_tcr_a(i)) for i in range(len(self))]
tcr_b_lengths = [len(self.get_ith_tcr_b(i)) for i in range(len(self))]
self.max_a_len = max(tcr_a_lengths) if max_a_len is None else max_a_len
self.max_b_len = max(tcr_b_lengths) if max_b_len is None else max_b_len
self.idx_encode = idx_encode
logging.info(f"Maximum TCR A/B lengths: {self.max_a_len} {self.max_b_len}")
self.pos_labels = pos_labels
logging.info(f"Positive {label_col} labels: {pos_labels}")
def __len__(self) -> int:
return len(self.labels)
def get_ith_tcr_a(self, idx: int, pad: bool = False) -> str:
"""Gets the ith TRA sequence"""
seq = self.tras[idx]
if pad:
seq = ft.pad_or_trunc_sequence(seq, self.max_a_len, right_align=False)
return seq
def get_ith_tcr_b(self, idx: int, pad: bool = False) -> str:
"""Gets the ith TRB sequence"""
seq = self.trbs[idx]
if pad:
seq = ft.pad_or_trunc_sequence(seq, self.max_b_len, right_align=False)
return seq
def get_ith_sequence(self, idx: int) -> Tuple[str, str]:
"""Get the ith TRA/TRB pair"""
return self.tras[idx], self.trbs[idx]
def get_ith_label(self, idx: int, idx_encode: Optional[bool] = None) -> np.ndarray:
"""Get the ith label"""
label = self.labels[idx]
retval = float(np.any([l in label for l in self.pos_labels]))
retval = np.array([1.0 - retval, retval], dtype=np.float32)
idx_encode = self.idx_encode if idx_encode is None else idx_encode
if idx_encode:
retval = np.where(retval)[0]
return retval
def __getitem__(self, idx: int):
tcr_a_idx = ft.idx_encode(self.get_ith_tcr_a(idx, pad=True))
tcr_b_idx = ft.idx_encode(self.get_ith_tcr_b(idx, pad=True))
label = self.get_ith_label(idx)
return (
{
"tcr_a": torch.from_numpy(tcr_a_idx),
"tcr_b": torch.from_numpy(tcr_b_idx),
},
torch.from_numpy(label).type(torch.long).squeeze(),
)
class TcrABSupervisedOneHotDataset(TcrABSupervisedIdxDataset):
"""Dataset that encodes tcrAB as one hot encoded vectors"""
def __getitem__(self, idx: int):
tcr_a_idx = ft.one_hot(self.get_ith_tcr_a(idx, pad=True))
tcr_b_idx = ft.one_hot(self.get_ith_tcr_b(idx, pad=True))
label = self.get_ith_label(idx)
return (
{
"tcr_a": torch.from_numpy(tcr_a_idx),
"tcr_b": torch.from_numpy(tcr_b_idx),
},
torch.from_numpy(label).type(torch.long).squeeze(),
)
class TCRSupervisedIdxDataset(Dataset):
"""Dataset meant for either TRA or TRB supervised learning"""
def __init__(
self,
tcrs: Sequence[str],
labels: Sequence[bool],
idx_encode_labels: bool = True,
max_len: Optional[int] = None,
):
self.tcrs = tcrs
self.labels = labels
assert len(self.tcrs) == len(self.labels)
self.max_len = max_len # Defaults to None
determined_max_len = max([len(t) for t in tcrs])
if self.max_len is not None:
# If a max_len is explicitly given, check that it is greater than the actual max len
assert isinstance(self.max_len, int)
assert determined_max_len <= self.max_len
logging.info(
f"Given max_len of {self.max_len} exceeds (as expected) empirical max_len of {determined_max_len}"
)
else:
# If max_len is not given, directly set the max_len
logging.info(
f"Max len not set, using empirical max len of {determined_max_len}"
)
self.max_len = determined_max_len
logging.info(f"Using maximum length of {self.max_len}")
self.idx_encode_labels = idx_encode_labels
def all_labels(self) -> Sequence[bool]:
"""Return all labels"""
return self.labels
def __len__(self) -> int:
return len(self.tcrs)
def get_ith_tcr(self, idx: int, pad: bool = True) -> str:
"""Returns the ith tcr sequence, padded with null residues"""
retval = self.tcrs[idx]
if pad:
retval = ft.pad_or_trunc_sequence(retval, self.max_len, right_align=False)
return retval
def get_ith_sequence(self, idx: int) -> str:
return self.tcrs[idx]
def get_ith_label(self, idx: int) -> np.ndarray:
retval = float(self.labels[idx])
if not self.idx_encode_labels:
retval = np.array([1.0 - retval, retval], dtype=np.float32)
return np.atleast_1d(retval)
def __getitem__(self, idx: int) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
tcr_idx = ft.idx_encode(self.get_ith_tcr(idx, pad=True))
label = self.get_ith_label(idx)
return (
{"seq": torch.from_numpy(tcr_idx)},
torch.from_numpy(label).type(torch.long).squeeze(),
)
class TcrSelfSupervisedDataset(TcrABSupervisedIdxDataset):
"""
Mostly for compatibility with transformers library
LineByLineTextDataset returns a dict of "input_ids" -> input_ids
"""
# Reference: https://github.com/huggingface/transformers/blob/447808c85f0e6d6b0aeeb07214942bf1e578f9d2/src/transformers/data/datasets/language_modeling.py
def __init__(self, tcr_seqs: Iterable[str], tokenizer, round_len: bool = True):
self.tcr_seqs = utils.dedup(tcr_seqs)
logging.info(
f"Creating self supervised dataset with {len(self.tcr_seqs)} sequences"
)
self.max_len = max([len(s) for s in self.tcr_seqs])
logging.info(f"Maximum sequence length: {self.max_len}")
if round_len:
self.max_len = int(utils.min_power_greater_than(self.max_len, 2))
logging.info(f"Rounded maximum length to {self.max_len}")
self.tokenizer = tokenizer
self._has_logged_example = False
def __len__(self) -> int:
return len(self.tcr_seqs)
def __getitem__(self, i: int) -> Dict[str, torch.Tensor]:
tcr = self.tcr_seqs[i]
retval = self.tokenizer.encode(ft.insert_whitespace(tcr))
if not self._has_logged_example:
logging.info(f"Example of tokenized input: {tcr} -> {retval}")
self._has_logged_example = True
return {"input_ids": torch.tensor(retval, dtype=torch.long)}
def merge(self, other):
"""Merge this dataset with the other dataset"""
all_tcrs = utils.dedup(self.tcr_seqs + other.tcr_seqs)
logging.info(
f"Merged two self-supervised datasets of sizes {len(self)} {len(other)} for dataset of {len(all_tcrs)}"
)
return TcrSelfSupervisedDataset(all_tcrs)
class TcrNextSentenceDataset(Dataset):
"""
Dataset for next sentence prediction. Input is two lists of pairwise
corresponding TRA TRB sequences
Note that the labelling scheme here is (False, True)
This DIFFERS from the convention used in the transformers library for NSP
Note that TRA/TRB naming convention is somewhat of a minomoer - in reality, these are
just first/second pairs.
This also supports generating NEGATIVE examples dynamically. This is automatically
enabled when this is wrapped in a DatasetSplit object with training split. This
may yield improved sampling of the negative manifold and yield a more general model
"""
def __init__(
self,
tra_seqs: List[str],
trb_seqs: List[str],
neg_ratio: float = 1.0,
labels: Optional[Iterable[bool]] = None,
tra_blacklist: Optional[Iterable[str]] = None,
mlm: float = 0.0,
max_len: int = 64,
seed: int = 4242,
remove_null: bool = True,
shuffle: bool = True,
):
assert len(tra_seqs) == len(trb_seqs)
# Remove cases of nan
logging.info(f"Build NSP dataset with {len(tra_seqs)} pairs")
if remove_null:
bad_idx_a = [
i
for i, seq in enumerate(tra_seqs)
if seq is None or pd.isnull(seq) or seq == ""
]
bad_idx_b = [
i
for i, seq in enumerate(trb_seqs)
if seq is None or pd.isnull(seq) or seq == ""
]
bad_idx = set(bad_idx_a).union(bad_idx_b)
logging.info(
f"Removing {len(bad_idx)} bad pairs: {len(bad_idx_a)} union {len(bad_idx_b)}"
)
tra_seqs = [a for i, a in enumerate(tra_seqs) if i not in bad_idx]
trb_seqs = [a for i, a in enumerate(trb_seqs) if i not in bad_idx]
if tra_blacklist is not None:
bad_idx = [i for i, seq in enumerate(tra_seqs) if seq in set(tra_blacklist)]
logging.info(f"Removing {len(bad_idx)} blacklisted items")
tra_seqs = [a for i, a in enumerate(tra_seqs) if i not in bad_idx]
trb_seqs = [a for i, a in enumerate(trb_seqs) if i not in bad_idx]
logging.info(f"Building NSP datset with {len(tra_seqs)} pairs after filtering")
# Insert whitespace as we store the sequences
# Whitespace separated inputs is expected by tokenizer
# These are never shuffled, regardless of the shuffle param
self.tra = [ft.insert_whitespace(aa) for aa in tra_seqs]
self.trb = [ft.insert_whitespace(aa) for aa in trb_seqs]
assert 0.0 <= mlm <= 1.0
self.mlm = mlm
self.neg_ratio = neg_ratio
self.rng = np.random.default_rng(seed=seed)
if self.neg_ratio > 0:
assert labels is None, "Cannot sample negatives if labels are given"
pos_pairs = list(zip(self.tra, self.trb))
num_negs = int(round(len(pos_pairs) * neg_ratio))
logging.info(f"Sampling {num_negs} negatives")
neg_pairs = [self.__sample_negative() for _i in range(num_negs)]
logging.info(f"Positive pairs: {len(pos_pairs)}")
logging.info(f"Sampled negative pairs: {len(neg_pairs)}")
# WARNING in tokenizers convention, output is (True, False)
# This means that a correct pair is a "0" and a wrong pair is a "1"
# we DO NOT adhere to this convention, rather using a conventional labelling
self.labels = np.array([1] * len(pos_pairs) + [0] * len(neg_pairs))
self.all_pairs = pos_pairs + neg_pairs
elif labels is not None:
logging.info(f"Taking given labels with {np.mean(labels)} positive rate")
self.labels = labels
self.all_pairs = list(zip(self.tra, self.trb))
else:
# raise RuntimeError("Must provide either neg_ratio or labels argument")
logging.warn(
"No labels or negative ratio provided, defaulting to all negative labels"
)
self.all_pairs = list(zip(self.tra, self.trb))
self.labels = np.array([0.0] * len(self.all_pairs))
assert len(self.labels) == len(self.all_pairs)
self.max_len = max_len
max_len_actual = max(
max([len(aa.split()) for aa in self.tra]),
max([len(aa.split()) for aa in self.trb]),
)
logging.info(f"Maximum length of NSP single sequence: {max_len_actual}")
self.tok = ft.get_aa_bert_tokenizer(max_len=max_len_actual)
# Shuffle the examples
if shuffle:
logging.info("Shuffling NSP dataset")
shuf_idx = np.arange(len(self.labels))
self.rng.shuffle(shuf_idx)
self.labels = self.labels[shuf_idx] # Contains whether this is a valid pair
self.all_pairs = [self.all_pairs[i] for i in shuf_idx]
logging.info(
f"NSP dataset of {len(self.all_pairs)} pairs, {np.sum(self.labels)} positive examples"
)
logging.info(f"Example training example")
for k, v in self[0].items():
logging.info(f"{k}: {v}")
def __sample_negative(self) -> Tuple[str, str]:
"""
Generate a negative example
"""
if self.neg_ratio <= 0.0:
raise RuntimeError("Cannot sample negatives for labelled dataset")
i, j = self.rng.integers(len(self.tra), size=2)
while self.tra[i] == self.tra[j]: # Is not a valid pair
j = self.rng.integers(len(self.tra))
return self.tra[i], self.trb[j]
def __len__(self) -> int:
assert len(self.labels) == len(self.all_pairs)
return len(self.labels)
def get_ith_label(self, idx):
return self.labels[idx]
def get_ith_sequence(self, idx) -> Tuple[str, str]:
return self.all_pairs[idx]
def __getitem__(self, idx: int, dynamic: bool = False) -> Dict[str, torch.Tensor]:
"""
dynamic is a general flag for generating examples dynamically
"""
label = self.labels[idx]
label_tensor = torch.LongTensor(np.atleast_1d(label))
if dynamic and label == 0:
# Dynamically generate a negative example
pair = self.__sample_negative()
else: # Positive example OR not dynamic
pair = self.all_pairs[idx]
if self.mlm > 0.0:
# Mask out each sequence BEFORE we pad/concatenate them
# This ensures that the mask is always an amino acid
mlm_targets, pair = zip(*[ft.mask_for_training(a) for a in pair])
t = np.atleast_1d(-100).astype(np.int64)
# CLS seq1 SEP seq2 SEP
mlm_targets_combined = np.concatenate(
[t, mlm_targets[0], t, mlm_targets[1], t]
)
mlm_targets_padded = torch.LongTensor(
np.pad(
mlm_targets_combined,
(0, self.max_len - len(mlm_targets_combined)),
mode="constant",
constant_values=-100,
)
)
enc = self.tok(
text=pair[0],
text_pair=pair[1],
padding="max_length",
max_length=self.max_len,
return_tensors="pt",
)
# Default tokenization has (batch, ...) as first dim
# Since __getitem__ only gets a single example, remove this
enc = {k: v.squeeze() for k, v in enc.items()}
if self.mlm > 0.0: # NSP + MLM
assert (
mlm_targets_padded.size() == enc["input_ids"].size()
), f"Mismatched sizes {mlm_targets_padded.size()} {enc['input_ids'].size()}"
enc["next_sentence_label"] = label_tensor
enc["labels"] = torch.LongTensor(mlm_targets_padded)
else: # NSP only
enc["labels"] = label_tensor
return enc
def get_all_items(self) -> Dict[str, torch.Tensor]:
"""
Get all the data instead of individual entries
"""
collector = collections.defaultdict(list)
for i in range(len(self)):
x = self[i]
for k, v in x.items():
collector[k].append(v.reshape(1, -1))
retval = {k: torch.cat(v, dim=0) for k, v in collector.items()}
return retval
class TcrFineTuneSingleDataset(TcrSelfSupervisedDataset):
"""Dataset for fine tuning from only TRA or TRB sequences"""
def __init__(
self,
aa: Sequence[str],
labels: MutableSequence[float],
label_continuous: bool = False,
label_labels: Optional[Sequence[str]] = None,
drop_rare_labels: bool = True,
):
assert len(aa) == len(
labels
), f"Got differing lengths for aa and labels: {len(aa)}, {len(labels)}"
self.aa = [ft.insert_whitespace(item) for item in aa]
self.tokenizer = ft.get_aa_bert_tokenizer(64)
self.continuous = label_continuous
label_dtype = np.float32 if self.continuous else np.int64
self.labels = np.array(labels, dtype=label_dtype).squeeze()
assert len(self.labels) == len(self.aa)
self.label_labels = label_labels
if self.continuous:
assert self.label_labels is None
if drop_rare_labels and not self.continuous and not self.is_multilabel:
# Get the mean positive rate for each label
labels_expanded = np.zeros((len(labels), np.max(labels) + 1))
labels_expanded[np.arange(len(labels)), self.labels] = 1
per_label_prop = np.mean(labels_expanded, axis=0)
# Find the labels with high enough positive rate
good_idx = np.where(per_label_prop >= 1e-3)[0]
if len(good_idx) < labels_expanded.shape[1]:
logging.info(
f"Retaining {len(good_idx)}/{labels_expanded.shape[1]} labels with sufficient examples"
)
# Reconstruct labels based only on retained good_idx
# nonzero returns indices of element that are nonzero
self.labels = np.array(
[
np.nonzero(good_idx == label)[0][0]
if label in good_idx
else len(good_idx) # "other" labels
for label in self.labels
],
dtype=label_dtype,
)
assert np.max(self.labels) == len(good_idx)
# Subset label labels
self.label_labels = [self.label_labels[i] for i in good_idx] + ["other"]
assert len(self.label_labels) == len(good_idx) + 1
@property
def is_multilabel(self) -> bool:
"""Return True if labels represent multilabel classification"""
return len(self.labels.shape) > 1
def get_ith_sequence(self, idx: int) -> str:
"""Get the ith sequence"""
return self.aa[idx]
def get_ith_label(self, idx: int) -> np.ndarray:
"""Gets the ith label"""
return np.atleast_1d(self.labels[idx])
def __len__(self) -> int:
return len(self.labels)
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
label = torch.tensor(self.get_ith_label(idx))
if self.is_multilabel:
# Multilabel -> BCEWithLogitsLoss which wants float target
label = label.float()
# We already inserted whitespaces in init
enc = self.tokenizer(
self.aa[idx], padding="max_length", max_length=64, return_tensors="pt"
)
enc = {k: v.squeeze() for k, v in enc.items()}
enc["labels"] = label
return enc
class TcrFineTuneDataset(TcrSelfSupervisedDataset):
"""Can supply tokenizer to work with ESM"""
def __init__(
self,
tcr_a_seqs: Sequence[str],
tcr_b_seqs: Sequence[str],
labels: Optional[np.ndarray] = None,
label_continuous: bool = False,
tokenizer: Optional[Callable] = None,
skorch_mode: bool = True,
idx_encode: bool = False,
):
assert len(tcr_a_seqs) == len(tcr_b_seqs)
self.tcr_a = list(tcr_a_seqs)
self.tcr_b = list(tcr_b_seqs)
self.max_len = max([len(s) for s in self.tcr_a + self.tcr_b]) + 2
if tokenizer is None:
tokenizer = ft.get_aa_bert_tokenizer(self.max_len)
self.tcr_a_tokenized = [
tokenizer.encode(
ft.insert_whitespace(aa),
padding="max_length",
max_length=self.max_len,
)
for aa in self.tcr_a
]
self.tcr_b_tokenized = [
tokenizer.encode(
ft.insert_whitespace(aa),
padding="max_length",
max_length=self.max_len,
)
for aa in self.tcr_b
]
else:
logging.info(f"Using pre-supplied tokenizer: {tokenizer}")
_label, _seq, self.tcr_a_tokenized = tokenizer(list(enumerate(self.tcr_a)))
_label, _seq, self.tcr_b_tokenized = tokenizer(list(enumerate(self.tcr_b)))
if labels is not None:
assert len(labels) == len(tcr_a_seqs)
self.labels = np.atleast_1d(labels.squeeze())
else:
logging.warning(
"Labels not given, defaulting to False labels (DO NOT USE FOR TRAINING)"
)
self.labels = None
self.continuous = label_continuous
self.skorch_mode = skorch_mode
self.idx_encode = idx_encode
def get_ith_sequence(self, idx: int) -> Tuple[str, str]:
"""Get the ith TRA/TRB pair"""
return self.tcr_a[idx], self.tcr_b[idx]
def get_ith_label(self, idx: int, idx_encode: Optional[bool] = None) -> np.ndarray:
"""Get the ith label"""
if self.labels is None:
return np.array([0]) # Dummy value
if not self.continuous:
label = self.labels[idx]
if not isinstance(label, np.ndarray):
label = np.atleast_1d(label)
if self.skorch_mode and len(label) == 1:
label = np.array([1.0 - label, label]).squeeze()
# Take given value if supplied, else default to self.idx_encode
idx_encode = self.idx_encode if idx_encode is None else idx_encode
if idx_encode:
label = np.where(label)[0]
return label
else:
# For the continuous case we simply return the ith value(s)
return self.labels[idx]
def __len__(self) -> int:
return len(self.tcr_a)
def __getitem__(
self, idx: int
) -> Union[Dict[str, torch.Tensor], Tuple[Dict[str, torch.Tensor], torch.Tensor]]:
label_dtype = torch.float if self.continuous else torch.long
tcr_a = self.tcr_a_tokenized[idx]
tcr_b = self.tcr_b_tokenized[idx]
label = self.get_ith_label(idx)
if not self.skorch_mode:
retval = {
"tcr_a": utils.ensure_tensor(tcr_a, dtype=torch.long),
"tcr_b": utils.ensure_tensor(tcr_b, dtype=torch.long),
"labels": utils.ensure_tensor(label, dtype=label_dtype),
}
else:
model_inputs = {
"tcr_a": utils.ensure_tensor(tcr_a, dtype=torch.long),
"tcr_b": utils.ensure_tensor(tcr_b, dtype=torch.long),
}
retval = (model_inputs, torch.tensor(label, dtype=label_dtype).squeeze())
return retval
class DatasetSplit(Dataset):
"""
Dataset split. Thin wrapper on top a dataset to provide data split functionality.
Can also enable dynamic example generation for train fold if supported by
the wrapped dataset (NOT for valid/test folds) via dynamic_training flag
kwargs are forwarded to shuffle_indices_train_valid_test
"""
def __init__(
self,
full_dataset: Dataset,
split: str,
dynamic_training: bool = False,
**kwargs,
):
self.dset = full_dataset
split_to_idx = {"train": 0, "valid": 1, "test": 2}
assert split in split_to_idx
self.split = split
self.dynamic = dynamic_training
if self.split != "train":
assert not self.dynamic, "Cannot have dynamic examples for valid/test"
self.idx = shuffle_indices_train_valid_test(
np.arange(len(self.dset)), **kwargs
)[split_to_idx[self.split]]
logging.info(f"Split {self.split} with {len(self)} examples")
def all_labels(self, **kwargs) -> np.ndarray:
"""Get all labels"""
if not hasattr(self.dset, "get_ith_label"):
raise NotImplementedError("Wrapped dataset must implement get_ith_label")
labels = [
self.dset.get_ith_label(self.idx[i], **kwargs) for i in range(len(self))
]
return np.stack(labels)
def all_sequences(self, **kwargs) -> Union[List[str], List[Tuple[str, str]]]:
"""Get all sequences"""
if not hasattr(self.dset, "get_ith_sequence"):
raise NotImplementedError(
f"Wrapped dataset {type(self.dset)} must implement get_ith_sequence"
)
# get_ith_sequence could return a str or a tuple of two str (TRA/TRB)
sequences = [
self.dset.get_ith_sequence(self.idx[i], **kwargs) for i in range(len(self))
]
return sequences
def to_file(self, fname: str, compress: bool = True) -> str:
"""
Write to the given file
"""
if not (
hasattr(self.dset, "get_ith_label")
and hasattr(self.dset, "get_ith_sequence")
):
raise NotImplementedError(
"Wrapped dataset must implement both get_ith_label & get_ith_sequence"
)
assert fname.endswith(".json")
all_examples = []
for idx in range(len(self)):
seq = self.dset.get_ith_sequence(self.idx[idx])
label_list = self.dset.get_ith_label(self.idx[idx]).tolist()
all_examples.append((seq, label_list))
with open(fname, "w") as sink:
json.dump(all_examples, sink, indent=4)
if compress:
with open(fname, "rb") as source:
with gzip.open(fname + ".gz", "wb") as sink:
shutil.copyfileobj(source, sink)
os.remove(fname)
fname += ".gz"
assert os.path.isfile(fname)
return os.path.abspath(fname)
def __len__(self) -> int:
return len(self.idx)
def __getitem__(self, idx: int):
if (
self.dynamic
and self.split == "train"
and "dynamic" in inspect.getfullargspec(self.dset.__getitem__).args
):
return self.dset.__getitem__(self.idx[idx], dynamic=True)
return self.dset.__getitem__(self.idx[idx])
class DatasetSplitByAttribute(DatasetSplit):
"""
Dataset split. Thin wrapper on top of a datset to provide data split functionality.
Unlike the above, which is a purely random split, this splits by a given attribute.
attr_getter function should take the dataset and return a list of attrs to split by
"""
def __init__(
self,
full_datset: Dataset,
attr_getter: Callable,
split: str,
dynamic_training: bool = False,
valid: float = 0.15,
test: float = 0.15,
seed: int = 1234,
):
self.dset = full_datset
self.dynamic = dynamic_training
self.split = split
self.split_attr = attr_getter(self.dset)
assert len(self.split_attr) == len(self.dset)
# Get the unique attrs and count occurrences of each
split_attr_counts = collections.Counter(self.split_attr)
assert (
len(split_attr_counts) >= 2
), f"Must have at least two classes of attribute to split, but got {len(split_attr_counts)}"
# Sort the attrs by most counts to least
_, self.train_attrs = zip(
*sorted(
[(count, attr) for attr, count in split_attr_counts.items()],
reverse=True,
)
)
self.train_attrs = list(self.train_attrs)
# Build valid, then test sets, by greedily taking the largest groups
# until we have at least the required number of examples
valid_n, test_n = len(self.dset) * valid, len(self.dset) * test
self.valid_attrs, self.test_attrs = [], []
while sum([split_attr_counts[a] for a in self.valid_attrs]) < valid_n:
# Take the biggest item in the list
self.valid_attrs.append(self.train_attrs.pop(0))
while sum([split_attr_counts[a] for a in self.test_attrs]) < test_n:
# Take the biggest item in the list
self.test_attrs.append(self.train_attrs.pop(0))
train_idx = np.array(
[
i
for i, attr in enumerate(self.split_attr)
if attr in set(self.train_attrs)
]
)
valid_idx = np.array(
[
i
for i, attr in enumerate(self.split_attr)
if attr in set(self.valid_attrs)
]
)
test_idx = np.array(
[
i
for i, attr in enumerate(self.split_attr)
if attr in set(self.test_attrs)
]
)
assert len(train_idx) + len(valid_idx) + len(test_idx) == len(self.dset)
logging.info(
f"Train split with {len(train_idx)} examples across {len(self.train_attrs)} attrs"
)
logging.info(
f"Valid split with {len(valid_idx)} examples across {len(self.valid_attrs)} attrs"
)
logging.info(
f"Test split with {len(test_idx)} examples across {len(self.test_attrs)} attrs"
)
rng = np.random.default_rng(seed)
rng.shuffle(train_idx)
rng.shuffle(valid_idx)
rng.shuffle(test_idx)
self.idx = {"train": train_idx, "valid": valid_idx, "test": test_idx}[split]
class DownsampledDataset(Dataset):
"""
Downsampled and shuffled dataset. Useful for evaluating impact of having less data.
Downsampling is done to a *fixed* subset of the original dataset
"""
def __init__(self, dset: Dataset, downsample: float = 0.1, seed: int = 3939):
assert 0.0 < downsample < 1.0
self.dset = dset
self.downsample = downsample
self.idx = np.arange(len(self.dset))
np.random.seed(seed)
np.random.shuffle(self.idx)
self.idx = self.idx[: int(np.round(downsample * len(self.dset)))]
logging.info(f"Downsampled from {len(self.dset)} -> {len(self)} samples")
def __len__(self) -> int:
return len(self.idx)
def __getitem__(self, idx: int):
return self.dset[self.idx[idx]]
def shuffle_indices_train_valid_test(
idx: np.ndarray, valid: float = 0.15, test: float = 0.15, seed: int = 1234
) -> Tuple[np.ndarray]:
"""
Given an array of indices, return indices partitioned into train, valid, and test indices
The following tests ensure that ordering is consistent across different calls
>>> np.all(shuffle_indices_train_valid_test(np.arange(100))[0] == shuffle_indices_train_valid_test(np.arange(100))[0])
True
>>> np.all(shuffle_indices_train_valid_test(np.arange(10000))[1] == shuffle_indices_train_valid_test(np.arange(10000))[1])
True
>>> np.all(shuffle_indices_train_valid_test(np.arange(20000))[2] == shuffle_indices_train_valid_test(np.arange(20000))[2])
True
>>> np.all(shuffle_indices_train_valid_test(np.arange(1000), 0.1, 0.1)[1] == shuffle_indices_train_valid_test(np.arange(1000), 0.1, 0.1)[1])
True
"""
np.random.seed(seed) # For reproducible subsampling
indices = np.copy(idx) # Make a copy because shuffling occurs in place
np.random.shuffle(indices) # Shuffles inplace
num_valid = int(round(len(indices) * valid)) if valid > 0 else 0
num_test = int(round(len(indices) * test)) if test > 0 else 0
num_train = len(indices) - num_valid - num_test
assert num_train > 0 and num_valid >= 0 and num_test >= 0
assert num_train + num_valid + num_test == len(
indices
), f"Got mismatched counts: {num_train} + {num_valid} + {num_test} != {len(indices)}"
indices_train = indices[:num_train]
indices_valid = indices[num_train : num_train + num_valid]
indices_test = indices[-num_test:]
assert indices_train.size + indices_valid.size + indices_test.size == len(idx)
return indices_train, indices_valid, indices_test
def split_arr(
arr: Union[np.ndarray, pd.DataFrame, list, tuple],
split: Literal["train", "valid", "test"],
**kwargs,
) -> Union[np.ndarray, pd.DataFrame, list]:
"""
Randomly split the array into the given split
kwargs are fed to shuffle_indices_train_valid_test
"""
split_to_idx = {"train": 0, "valid": 1, "test": 2}
assert split in split_to_idx, f"Unrecognized split: {split}"
n = len(arr) if isinstance(arr, (list, tuple)) else arr.shape[0]
indices = np.arange(n)
keep_idx = shuffle_indices_train_valid_test(indices, **kwargs)[split_to_idx[split]]
if isinstance(arr, pd.DataFrame):
return arr.iloc[keep_idx]
if isinstance(arr, (list, tuple)):
return [arr[i] for i in keep_idx]
return arr[keep_idx]
def sample_unlabelled_tcrdb_trb(
n: int, blacklist: Optional[Collection[str]] = None, seed: int = 6464
) -> List[str]:
"""
Convenience function to sample the given number of TRBs from TCRdb
Blacklist can be given to exclude certain sequences from sampling
The following tests ensure reproducibility
>>> all([a == b for a, b in zip(sample_unlabelled_tcrdb_trb(10), sample_unlabelled_tcrdb_trb(10))])
True
"""
tcrdb = load_tcrdb()
rng = | np.random.default_rng(seed=seed) | numpy.random.default_rng |
# Project: AttractionRepulsionModel
# Filename: arm.py
# Authors: <NAME> (<EMAIL>).
"""
arm: An agent-based model of ideological polarization utilizing both attractive
and repulsive interactions.
"""
import math
import numpy as np
from tqdm import trange
def arm(N=100, D=1, E=[0.1], T=0.25, R=0.25, K=math.inf, S=500000, P=0, \
shock=(None, None), init='norm', seed=None, silent=False):
"""
Execute a simulation of the Attraction-Repulsion Model.
Inputs:
N (int): number of agents
D (int): number of ideological dimensions
E ([float]): list of exposures
T (float): tolerance
R (float): responsiveness
K (float): steepness of stochastic attraction-repulsion
S (int): number of steps to simulate
P (float): self-interest probability
shock ((float, float)): external shock step and strength
init (str): 'norm' for Gaussian normal initialization, 'emp' for empirical
seed (int): random seed
silent (bool): True if progress should be shown on command line
Returns (init_config, config, history):
init_config: N x D array of initial agent ideological positions
config: N x D array of agent ideological positions after S steps
history: S x (D + 2) array detailing interaction history
"""
# Initialize the random number generation.
rng = np.random.default_rng(seed)
# Initialize the agent population and their initial ideological positions.
if init == 'norm':
if D == 1:
config = np.zeros(N)
for i in np.arange(N):
while True:
config[i] = rng.normal(0.5, 0.2)
if 0 <= config[i] and config[i] <= 1:
break
config = config.reshape(-1, 1)
else: # Higher dimensions.
means, covs = 0.5 + np.zeros(D), 0.04 * np.eye(D)
config = np.zeros((N, D))
for i in np.arange(N):
while True:
config[i] = rng.multivariate_normal(means, covs)
clip = np.maximum(np.zeros(D), np.minimum(np.ones(D), config[i]))
if np.allclose(config[i], clip):
break
else: # Empirical initialization.
assert D == 1, 'ERROR: CCES 2020 data is 1-dimensional'
with open('CCES_2020_dist.npy', 'rb') as f:
emp = np.load(f)
vals, probs = emp[0], emp[1]
config = rng.choice(vals, N, p=probs) + (0.005 * rng.random(N) - 0.0025)
config = config.reshape(-1, 1)
init_config = np.copy(config)
# Create an S x (D + 2) array to store the interaction history. Each step i
# records the active agent [i][0], the passive agent [i][1], and the active
# agent's new position [i][2:].
history = | np.zeros((S, D + 2)) | numpy.zeros |
from typing import Iterable, Union
import numpy as np
from scipy.linalg import block_diag, eigh
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.validation import check_is_fitted
from cca_zoo.models import rCCA
from cca_zoo.utils.check_values import _process_parameter, _check_views
class MCCA(rCCA):
r"""
A class used to fit MCCA model. For more than 2 views, MCCA optimizes the sum of pairwise correlations.
:Maths:
.. math::
w_{opt}=\underset{w}{\mathrm{argmax}}\{\sum_i\sum_{j\neq i} w_i^TX_i^TX_jw_j \}\\
\text{subject to:}
(1-c_i)w_i^TX_i^TX_iw_i+c_iw_i^Tw_i=1
:Citation:
Kettenring, <NAME>. "Canonical analysis of several sets of variables." Biometrika 58.3 (1971): 433-451.
:Example:
>>> from cca_zoo.models import MCCA
>>> import numpy as np
>>> rng=np.random.RandomState(0)
>>> X1 = rng.random((10,5))
>>> X2 = rng.random((10,5))
>>> X3 = rng.random((10,5))
>>> model = MCCA()
>>> model.fit((X1,X2,X3)).score((X1,X2,X3))
array([0.97200847])
"""
def __init__(
self,
latent_dims: int = 1,
scale: bool = True,
centre=True,
copy_data=True,
random_state=None,
c: Union[Iterable[float], float] = None,
eps=1e-3,
):
"""
Constructor for MCCA
:param latent_dims: number of latent dimensions to fit
:param scale: normalize variance in each column before fitting
:param centre: demean data by column before fitting (and before transforming out of sample
:param copy_data: If True, X will be copied; else, it may be overwritten
:param random_state: Pass for reproducible output across multiple function calls
:param c: Iterable of regularisation parameters for each view (between 0:CCA and 1:PLS)
:param eps: epsilon for stability
"""
super().__init__(
latent_dims=latent_dims,
scale=scale,
centre=centre,
copy_data=copy_data,
accept_sparse=["csc", "csr"],
random_state=random_state,
)
self.c = c
self.eps = eps
def _setup_evp(self, views: Iterable[np.ndarray], **kwargs):
all_views = np.concatenate(views, axis=1)
C = all_views.T @ all_views / self.n
# Can regularise by adding to diagonal
D = block_diag(
*[
(1 - self.c[i]) * m.T @ m / self.n + self.c[i] * np.eye(m.shape[1])
for i, m in enumerate(views)
]
)
C -= block_diag(*[view.T @ view / self.n for view in views]) - D
D_smallest_eig = min(0, | np.linalg.eigvalsh(D) | numpy.linalg.eigvalsh |
import unittest
from nose.plugins.attrib import attr
import numpy
import theano
from theano import tensor, function
# this tests other ops to ensure they keep the dimensions of their
# inputs correctly
class TestKeepDims(unittest.TestCase):
def makeKeepDims_local(self, x, y, axis):
if axis is None:
newaxis = range(x.ndim)
elif isinstance(axis, int):
if axis < 0:
newaxis = [axis + x.type.ndim]
else:
newaxis = [axis]
else:
newaxis = []
for a in axis:
if a < 0:
a += x.type.ndim
newaxis.append(a)
i = 0
new_dims = []
for j, _ in enumerate(x.shape):
if j in newaxis:
new_dims.append('x')
else:
new_dims.append(i)
i += 1
return tensor.DimShuffle(y.type.broadcastable, new_dims)(y)
@attr('slow')
def test_keepdims(self):
x = tensor.dtensor3()
a = numpy.random.rand(3, 2, 4)
# We don't need to test all opt and C code, as this is tested
# by the ops tests.
mode = theano.compile.Mode(optimizer="fast_compile", linker="py")
# 'max_and_argmax' has two outputs and can be specified with either
# a single or every axis:
for axis in [0, 1, 2, [0], [1], [2], None, [0, 1, 2],
[-1], [-2], [-3], [-1, -2, -3], [0, -1, -2],
[-2, -3, 2]]:
op = tensor.max_and_argmax
f = function([x], [op(x, axis=axis, keepdims=True)[0],
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False)[0],
axis)],
mode=mode)
ans1, ans2 = f(a)
assert | numpy.allclose(ans1, ans2) | numpy.allclose |
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import unittest
import numpy as np
from scipy.signal import convolve2d
from MyConvolution import convolve
class TestMyConvolution(unittest.TestCase):
def test_shape(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve(im, k)
in_shape = im.shape
out_shape = conv.shape
np.testing.assert_equal(out_shape, in_shape)
def test_result(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve(im, k)
exp = np.array([
[4., 6., 6., 6., 4.],
[6., 9., 9., 9., 6.],
[6., 9., 9., 9., 6.],
[6., 9., 9., 9., 6.],
[4., 6., 6., 6., 4.]
])
np.testing.assert_array_equal(conv, exp)
def test_scipy_shape(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve2d(im, k, mode='same')
in_shape = im.shape
out_shape = conv.shape
np.testing.assert_equal(out_shape, in_shape)
def test_scipy_result(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve2d(im, k, mode='same')
exp = np.array([
[4., 6., 6., 6., 4.],
[6., 9., 9., 9., 6.],
[6., 9., 9., 9., 6.],
[6., 9., 9., 9., 6.],
[4., 6., 6., 6., 4.]
])
np.testing.assert_array_equal(conv, exp)
def test_invalid_k_dim_x(self):
im = np.ones((5,5))
kernel = np.ones((2,3))
np.testing.assert_raises(ValueError, convolve, im, kernel)
def test_invalid_k_dim_y(self):
im = np.ones((5,5))
kernel = np.ones((3,2))
np.testing.assert_raises(ValueError, convolve, im, kernel)
def test_zero_padding(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve(im, k)
corners = conv[[0, 0, -1, -1], [0, -1, -1, 0]]
exp = np.array([4., 4., 4., 4.])
np.testing.assert_array_equal(corners, exp)
def test_colour_shape(self):
im = np.ones((5,5,3))
k = | np.ones((3,3)) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 15:45:56 2020
@author: ZongSing_NB
Main reference:
http://www.ejournal.org.cn/EN/10.3969/j.issn.0372-2112.2019.10.020
"""
import numpy as np
import matplotlib.pyplot as plt
class EGolden_SWOA():
def __init__(self, fitness, D=30, P=20, G=500, ub=1, lb=0,
b=1, a_max=2, a_min=0, a2_max=-1, a2_min=-2, l_max=1, l_min=-1):
self.fitness = fitness
self.D = D
self.P = P
self.G = G
self.ub = ub
self.lb = lb
self.a_max = a_max
self.a_min = a_min
self.a2_max = a2_max
self.a2_min = a2_min
self.l_max = l_max
self.l_min = l_min
self.b = b
self.gbest_X = np.zeros([self.D])
self.gbest_F = np.inf
self.loss_curve = np.zeros(self.G)
def opt(self):
# 初始化
self.X = np.random.uniform(low=self.lb, high=self.ub, size=[self.P, self.D])
tao = (np.sqrt(5)-1)/2
x1 = -np.pi+(1-tao)
x2 = -np.pi+tao*2*np.pi
# 迭代
for g in range(self.G):
# OBL
self.X, F = self.OBL()
# 更新最佳解
if np.min(F) < self.gbest_F:
idx = F.argmin()
self.gbest_X = self.X[idx].copy()
self.gbest_F = F.min()
# 收斂曲線
self.loss_curve[g] = self.gbest_F
# 更新
a = self.a_max - (self.a_max-self.a_min)*(g/self.G)
for i in range(self.P):
p = np.random.uniform()
r1 = np.random.uniform()
r2 = np.random.uniform()
A = 2*a*r1 - a
C = 2*r2
if np.abs(A)>=1:
X_rand = self.X[np.random.randint(low=0, high=self.P, size=self.D), :]
X_rand = np.diag(X_rand).copy()
D = | np.abs(C*X_rand - self.X[i, :]) | numpy.abs |
# Original license for TestTargetFinder:
# The Leginon software is Copyright under
# Apache License, Version 2.0
# For terms of the license agreement
# see http://leginon.org
#
# New modifications made by <NAME>
# Also licensed under Apache License, Version 2.0
# see readme.md in the github repo for more information
import os.path
import threading
from leginon import leginondata
from pyami import mrc
import targetfinder
import gui.wx.TestTargetFinder
#new imports
import numpy as np
import tensorflow as tf
from scipy.signal import convolve
def find_best(inp):
# finds the best combination of exposures and focus points using a Monte Carlo search
# designed with the Arctica Talos in mind, with a fixed beam size
# modifications required to use on a Titan Krios more effectively
res = inp[0] # matrix containing classifier output
indices = inp[1] # index of carbon areas
inv_scores = inp[2] # how close to the center the carbon is. Using 1/R to calculate value (R of 0 is impossible due to shape of the matrix, but keep that in mind if changing search grid size)
iter_size = inp[3] # number of iterations to try
seed = inp[4] # fixed seed. Leftover from multithreading variant
# original version used multithreading, hence a packed input rather than passing them individually
local_rng = np.random.RandomState(seed)
best = 0 # variable to store best value
best_points = [] # variable to store the best set of points
l = [[-2,0],[0,2],[2,0],[0,-2]] # determines how big the beam burn should be. Uses a star shape. Can be removed for the Titan
for n in range(iter_size):
points = []
total = 0
cur_ver = res.copy() # makes a copy of the matrix to make changes too
padded = np.pad(cur_ver, 2, mode='constant')
car_area = indices[local_rng.choice(len(indices), p=inv_scores)] # randomly picks a carbon focus point, weighted by how close it is to the center
padded[car_area[0]+1:car_area[0]+4,car_area[1]+1:car_area[1]+4] = -100 # simulated beam burn to the general area
for j in l: # burns the sourrounding area
padded[car_area[0]+j[0]+2,car_area[1]+j[1]+2] = -100
cur_ver = padded[2:16,2:16] # updates cur_ver to contain the burn
points.append(car_area) # adds focus point to the list. Focus point will always be first
for i in range(16): # tries to add up to 16 points, but will usually stop before then.
conv = convolve(cur_ver, np.ones((3,3)), mode='same', method='direct') # convolves the burn to make locations near it less attractive to the algorithm
maxes = np.argsort(conv,axis=None,)[::-1][0:5] # finds the locations of the max values
maxes = [np.unravel_index(i,(14,14)) for i in maxes]
max_sums = np.array([conv[i] for i in maxes])
t_val = max_sums > 0 # only keeps the values if value is above 0 (not burned), sets to false otherwise and will be skipped in the next step
maxes = [i[0] for i in zip(maxes,t_val) if i[1]]
max_sums = max_sums[t_val]
if len(max_sums) == 0:
break
max_sums = max_sums/sum(max_sums) # normalized to 1 to work for Monte Carlo search
ind = maxes[local_rng.choice(len(max_sums),p=max_sums)] # chooses a point weighted by its goodness score
if conv[ind] > 0:
# tmp_sum = total + conv[ind]
# if conv[ind]/tmp_sum < 0.1:
# break
# points.append(ind)
# total = tmp_sum
# above is an alternative code that used the conv values. Using non conv works better
tmp_sum = total + cur_ver[ind]
if cur_ver[ind]/tmp_sum < 0.1:
break
points.append(ind)
total = tmp_sum
padded[ind[0]+1:ind[0]+4,ind[1]+1:ind[1]+4] = -100 # simulates new burn
for j in l:
padded[ind[0]+j[0]+2,ind[1]+j[1]+2] = -100
cur_ver = padded[2:16,2:16]
else:
break
if total > best: # updates best score if better
best = total
best_points = points
return (best, best_points)
class TestTargetFinder(targetfinder.TargetFinder):
# most code left unchanged from the Leginon version. Only "your_targetfinder" is new.
# make sure to rename the class name if installing with multiple plug-ins
panelclass = gui.wx.TestTargetFinder.Panel
settingsclass = leginondata.TestTargetFinderSettingsData
defaultsettings = dict(targetfinder.TargetFinder.defaultsettings)
defaultsettings.update({
'test image': '',
})
def __init__(self, *args, **kwargs):
self.userpause = threading.Event()
targetfinder.TargetFinder.__init__(self, *args, **kwargs)
self.image = None
self.model = tf.keras.models.load_model("full_model_all_filters_v3") # imports tensorflow model
self.start()
def readImage(self, filename=''):
if filename:
self.image = mrc.read(filename)
self.setImage(self.image, 'Image')
def testFindTargets(self):
focus_targets_on_image = []
acquisition_targets_on_image = []
focus_targets_on_image, acquisition_targets_on_image = self.your_targetfinder(self.image)
self.setTargets(acquisition_targets_on_image, 'acquisition')
self.setTargets(focus_targets_on_image, 'focus')
import time
time.sleep(1)
if self.settings['user check']:
self.panel.foundTargets()
def your_targetfinder(self, image):
image_org = np.array(image)
image_norm = (image_org - np.mean(image_org))/np.std(image_org)
image_norm = (image_norm + 7.5)/15
image_norm[image_norm < 0] = 0
image_norm[image_norm > 1] = 1
# above code normalizes the image the same way as during training
ten_arr = np.zeros((14*14,64,64), dtype='float32')
for i in range(14):
for j in range(14):
n = i*14 + j
ten_arr[n] = image_norm[64*j+14:64*(j+1)+14,64*i+14:64*(i+1)+14]
ten_arr = ten_arr.reshape(-1, 64, 64, 1)
# image is split up into segments
pred = self.model.predict(ten_arr) # uses model to predict classification scores
pred = pred.reshape(14,14,4)
tmp = | np.moveaxis(pred, 2, 0) | numpy.moveaxis |
import numpy as np
from matplotlib.patches import Ellipse
def generate_bounding_box_from_mask(mask):
"""
Function for generating a bounding box around a mask.
Bounding box covers the extremes of the mask inclusively such that the far left box aligns
with the far left of the mask.
:param mask: 2D mask image (zero and non-zero pixels). Non-zero pixels counted as wanted (True) pixels
:return: List of inclusive bounding box coordinates. Format [<left>, <top>, <bottom>, <right>]
"""
flat_x = np.any(mask, axis=0)
flat_y = np.any(mask, axis=1)
if not np.any(flat_x) and not np.any(flat_y):
raise ValueError("No positive pixels found, cannot compute bounding box")
xmin = np.argmax(flat_x)
ymin = np.argmax(flat_y)
xmax = len(flat_x) - 1 - np.argmax(flat_x[::-1])
ymax = len(flat_y) - 1 - np.argmax(flat_y[::-1])
return [xmin, ymin, xmax, ymax]
def draw_cov(box, covs, ax, colour='k', mode='ellipse'):
"""
Function for drawing covariances around a bounding box
:param box: list of box corner coordinates. Format: [<left>, <up>, <right>, <bottom>]
:param covs: covariance matrices for both corners. Format: [<top_right_covariance>, <bottom_right_covariance>].
Individual covariances format: [[<x_variance>, <xy_correlation>], [<xy_correlation>, <y_variance>]]
:param ax: Matplotlib axis where covariances shall be drawn.
:param colour: Colour for the covariance to be drawn in. Default: Black
:param mode: Mode of covariance drawing. Options: ['arrow', 'ellipse']. Default: 'ellipse'
:return: None
"""
# Calculate the eigenvalues and eigenvectors for both top-left (tl) and bottom-right (br) corner covariances
tl_vals, tl_vecs = np.linalg.eig(covs[0])
br_vals, br_vecs = np.linalg.eig(covs[1])
# flip the vectors along the y axis (for plotting as y is inverted)
tl_vecs[:, 1] *= -1
br_vecs[:, 1] *= -1
# Determine which eigenvalue/vector is largest and which covariance that corresponds to
tl_val_max_idx = np.argmax(tl_vals)
br_val_max_idx = np.argmax(br_vals)
argmax_cov1 = np.argmax(np.diag(covs[0]))
argmax_cov2 = np.argmax(np.diag(covs[1]))
# Calculate the magnitudes along each eigenvector used for visualization (2 * std dev)
magnitude_tl1 = | np.sqrt(covs[0][argmax_cov1][argmax_cov1]) | numpy.sqrt |
import numpy as np
from rlberry.envs.benchmarks.ball_exploration import PBall2D
p = 5
A = np.array([
[1.0, 0.1],
[-0.1, 1.0]
]
)
reward_amplitudes = np.array([1.0, 0.5, 0.5])
reward_smoothness = np.array([0.25, 0.25, 0.25])
reward_centers = [
np.array([0.75 * np.cos(np.pi / 2), 0.75 * np.sin(np.pi / 2)]),
np.array([0.75 * np.cos(np.pi / 6), 0.75 * | np.sin(np.pi / 6) | numpy.sin |
import numpy as np
# import os
import matplotlib.pyplot as plt
import scipy.misc
from neuralnetwork import NeuralNetwork
import pickle
layers = (784, 200, 10)
epochs = 4
learning_rate = 0.2
network_score = []
n = NeuralNetwork(layers, learning_rate)
def mnist_train():
with open('./mnist_train.csv', 'r') as f:
traning_data = f.readlines()
print("Starting Training...")
for i in range(epochs):
print("Epoch:", i)
for record in traning_data:
all_values = record.split(',')
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# Show current image being tested.
# inputs_shaped = np.asfarray(all_values[1:]).reshape((28, 28))
# plt.imshow(inputs_shaped, cmap='Greys', interpolation='None')
# if c < 3:
# plt.imsave('test{}.png'.format(
# c), inputs_shaped, cmap='Greys')
# c += 1
# Creates the desired output layer based on
# the amount of output nodes used in the network.
targets = np.zeros(layers[-1]) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
with open('./trained-network', 'wb') as f:
weights = (n.whi, n.who)
pickle.dump(weights, f)
def mnist_test():
with open('./mnist_test.csv', 'r') as f:
test_data = f.readlines()
for record in test_data:
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
outputs = n.query(inputs)
label = np.argmax(outputs)
if label == correct_label:
network_score.append(1)
else:
network_score.append(0)
# print("Correct:", correct_label, "| Answer:", label)
def mnist_test_with_weights():
with open('./trained-network', 'rb') as f:
weights = pickle.load(f)
with open('./mnist_test.csv', 'r') as f:
test_data = f.readlines()
for record in test_data:
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
outputs = n.query_with_weights(inputs, weights)
label = | np.argmax(outputs) | numpy.argmax |
import os
import copy
import torch
import numpy as np
from torch import optim
from torch.nn import functional as F
from torch.distributions.categorical import Categorical
from .networks import ACUnet
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class PPO:
def __init__(self, action_dims, args):
self.clip = args.clip
self.epoch = args.epoch
self.ent_coef = args.ent_coef
self.batch_size = args.batch_size
self.vloss_coef = args.vloss_coef
self.max_grad_norm = args.max_grad_norm
# start to build the network.
if args.actor_net_type == 'unet':
self.actor = ACUnet(action_dims, None, args).to(device)
else:
raise NotImplementedError
self.old_actor = copy.deepcopy(self.actor).to(device)
# define the optimizer...
self.optimizer = optim.Adam(self.actor.parameters(), args.lr, eps=args.eps)
def predict(self, obs, is_training=False, training_mask=False):
if is_training:
self.actor.train()
else:
self.actor.eval()
obs = np.expand_dims(obs, axis=0)
with torch.no_grad():
# get tensors
obs_tensor = torch.tensor(obs, dtype=torch.float32).to(device)
values, acts_logit = self.actor(obs_tensor)
acts_softmax = F.softmax(acts_logit, dim=1)
# select actions
actions = Categorical(acts_softmax).sample()
if training_mask:
return acts_softmax.detach().cpu().numpy().squeeze(), actions.detach().cpu().numpy().squeeze()
else:
return values.detach().cpu().numpy().squeeze(), actions.detach().cpu().numpy().squeeze()
# update the network
def _update_network(self, obs, actions, returns, advantages):
# before update the network, the old network will try to load the weights
self.old_actor.load_state_dict(self.actor.state_dict())
inds = | np.arange(obs.shape[0]) | numpy.arange |
from __future__ import print_function
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from .query import gaussian_query
from .randomization import randomization
class screening(gaussian_query):
def __init__(self,
observed_data,
covariance,
randomizer,
perturb=None):
self.observed_score_state = -observed_data # -Z if Z \sim N(\mu,\Sigma), X^Ty in regression setting
self.nfeature = p = self.observed_score_state.shape[0]
self.covariance = covariance
self.randomizer = randomizer
self._initial_omega = perturb
def fit(self, perturb=None):
gaussian_query.fit(self, perturb=perturb)
self._randomized_score = self.observed_score_state - self._initial_omega
return self._randomized_score, self._randomized_score.shape[0]
def multivariate_targets(self, features, dispersion=1.):
"""
Entries of the mean of \Sigma[E,E]^{-1}Z_E
"""
score_linear = self.covariance[:, features].copy() / dispersion
Q = score_linear[features]
cov_target = np.linalg.inv(Q)
observed_target = -np.linalg.inv(Q).dot(self.observed_score_state[features])
crosscov_target_score = -score_linear.dot(cov_target)
alternatives = ['twosided'] * features.sum()
return observed_target, cov_target * dispersion, crosscov_target_score.T * dispersion, alternatives
def full_targets(self, features, dispersion=1.):
"""
Entries of the mean of \Sigma[E,E]^{-1}Z_E
"""
score_linear = self.covariance[:, features].copy() / dispersion
Q = self.covariance / dispersion
cov_target = (np.linalg.inv(Q)[features])[:, features]
observed_target = -np.linalg.inv(Q).dot(self.observed_score_state)[features]
crosscov_target_score = -np.identity(Q.shape[0])[:, features]
alternatives = ['twosided'] * features.sum()
return observed_target, cov_target * dispersion, crosscov_target_score.T * dispersion, alternatives
def marginal_targets(self, features):
"""
Entries of the mean of Z_E
"""
score_linear = self.covariance[:, features]
Q = score_linear[features]
cov_target = Q
observed_target = -self.observed_score_state[features]
crosscov_target_score = -score_linear
alternatives = ['twosided'] * features.sum()
return observed_target, cov_target, crosscov_target_score.T, alternatives
class marginal_screening(screening):
def __init__(self,
observed_data,
covariance,
randomizer,
threshold,
perturb=None):
threshold = np.asarray(threshold)
if threshold.shape == ():
threshold = np.ones_like(observed_data) * threshold
self.threshold = threshold
screening.__init__(self,
observed_data,
covariance,
randomizer,
perturb=None)
def fit(self, perturb=None):
_randomized_score, p = screening.fit(self, perturb=perturb)
active = np.fabs(_randomized_score) >= self.threshold
self._selected = active
self._not_selected = ~self._selected
sign = np.sign(-_randomized_score)
active_signs = sign[self._selected]
sign[self._not_selected] = 0
self.selection_variable = {'sign': sign,
'variables': self._selected.copy()}
self.observed_opt_state = ( | np.fabs(_randomized_score) | numpy.fabs |
# -*- coding: utf-8 -*-
"""
Generic utilities for working with images.
Note that images are read in by cv2utils to be numpy arrays.
Change log:
2015/10/07 -- repo_dir, image_dir, dir_by_ext, test_image written;
<EMAIL>
2015/10/10 -- added channel management methods; nloomis@
2016/01/24 -- added __authors__ variable; fixed order of imports; nloomis@
2017/02/05 -- added image resize/scaling functions; nloomis@
"""
__authors__ = ('<EMAIL>',)
import cv2
import cv2utils
import matplotlib.pyplot as plt
import numpy
import os
import scipy.ndimage
#try:
# from skimage import filters
#except ImportError:
# # handles the case where skimage is v0.10 (the filter module was renamed
# # to filters in v0.11)
# from skimage import filter as filters
#
# file handling
#
#constants
#extensions for images recognized by cv2
IMAGE_EXT = ('jpg', 'jpeg', 'jp2', 'tif', 'tiff', 'png', 'bmp',
'ppm', 'pbm', 'pgm', 'sr', 'ras')
def repo_dir():
"""Path to the base of the repository."""
#nb: this function assumes that the file is in <repo>/python
file_path = os.path.dirname(os.path.abspath(__file__))
return os.path.split(file_path)[0]
def image_dir():
"""Path to the directory of repo test images."""
return os.path.join(repo_dir(), 'images')
def dir_by_ext(srcdir, ext):
"""Finds files in a source directory that end with the given extensions.
The ext variable can either be a string with a single extension, or a tuple
of extension strings to try matching; any match will succeed. The extension
should NOT contain a period ('.'), only the extension letters, and the
extensions should be in lower case.
Examples:
dir_by_ext('some/path', ('jpg', 'jpeg'))
dir_by_ext('some/path', 'txt')
"""
#convert the ext to a tuple if it isn't already
if not isinstance(ext, tuple):
ext = tuple(ext)
files = os.listdir(srcdir)
matches = [file_name for file_name in files
if len(file_name.split('.')) > 1 and \
file_name.split('.')[1].lower().endswith(IMAGE_EXT)]
#the logical test pulls out the extension, converts it to lowercase, and
#checks to see if it matches any of the supplied extensions. note that if
#there is no extension, the test will fail before it bothers checking for
#the match.
return matches
def test_image(image_name=None):
"""Loads a test image with the closest match to the supplied name."""
test_image_dir = image_dir()
available_images = dir_by_ext(image_dir(), IMAGE_EXT)
if image_name is None:
print('Available images are: %s' % available_images)
return None
#find the matching image names
assert(isinstance(image_name, str))
matches = [file_name for file_name in available_images
if file_name.startswith(image_name)]
if len(matches) == 0:
print('No name match found for %s.' % image_name)
return None
elif len(matches) > 1:
print('Multiple matches found for %s: %s.' % (image_name, matches))
return None
else:
#load the matching image
filename = os.path.join(test_image_dir, matches[0])
print('loading: %s' % filename)
return cv2utils.imread(filename)
#
# image channels
#
def flip_channels(img):
"""Flips the order of channels in an image; eg, BGR <-> RGB.
This function assumes the image is a numpy.array (what's returned by cv2
function calls) and uses the numpy re-ordering methods. The number of
channels does not matter.
If the image array is strictly 2D, no re-ordering is possible and the
original data is returned untouched.
"""
if len(img.shape) == 2:
return img;
return img[:,:,::-1]
def cat3(*channels):
"""Concatenate channels in the supplied order.
Convenience function."""
#numpy.dstack() is 40% faster than cv2.merge()
return numpy.dstack(channels)
def split2(img):
"""Splits a 2-channel image into its constituent channels.
Convenience function using numpy slices, ~300x faster than cv2.split()."""
assert(isinstance(img, numpy.ndarray))
assert(nchannels(img) == 2)
return img[:, :, 0], img[:, :, 1]
#TODO: split into column vectors if a 2D array
def split3(img):
"""Splits a 3-channel image into its constituent channels.
Convenience function using numpy slices, ~300x faster than cv2.split()."""
assert(isinstance(img, numpy.ndarray))
assert(nchannels(img) == 3)
return img[:, :, 0], img[:, :, 1], img[:, :, 2]
#TODO: split into column vectors if a 2D array
def nchannels(img):
"""Returns the number of channels in an image."""
assert(isinstance(img, numpy.ndarray))
if img.ndim < 3:
return 1
else:
return img.shape[2]
#
# data types
#
def datatype(img):
"""The type of the data used in the image: img.dtype."""
assert(isinstance(img, numpy.ndarray))
return img.dtype
def float2uint8(img):
"""Converts a float array to a uint8 type.
The float values are expected to be in the range [0..1]. uint8 values are
returned in the range [0..255]."""
assert(isinstance(img, numpy.ndarray))
#TODO: img.view() may be faster, but isn't giving the right conversions?
return numpy.rint(img * 255).astype('uint8')
#
# display
#
def imshow(img, figure_name='image'):
"""Wrapper for matplotlib.pyplot.imshow()."""
#using matplotlib for now: (which expects RGB channel ordering)
plt.imshow(flip_channels(img))
#note that flip_channels() is MUCH faster than
#cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#rem: when using cv2.imshow(), the waitkey needs to get masked! use
#waitKey(0) & 0xFF in 64-bit
#def mydisp(img, clim)
#see plt.imshow() for helpful args; vmin and vmax for min/max values, norm for
#normalizing the values; check for int vs float, though!
#
# resize
#
def scale_to_width(image, width_pixels):
"""Rescales an image so that its width has the specified pixel count."""
image_size = image.shape
scale_factor = float(width_pixels) / image_size[1]
target_height = round(scale_factor * image_size[0])
target_size = (int(target_height), int(width_pixels))
interp_method = scaling_interpolation_method(image_size, target_size)
return cv2.resize(image, target_size, interpolation=interp_method)
def scale_to_height(image, height_pixels):
"""Rescales an image so that its height has the specified pixel count."""
image_size = image.shape
scale_factor = float(height_pixels) / image_size[0]
target_width = round(scale_factor * image_size[1])
target_size = (int(target_width), int(height_pixels))
interp_method = scaling_interpolation_method(image_size, target_size)
return cv2.resize(image, target_size, interpolation=interp_method)
def scale_to_min_size(image, min_pixels):
"""Rescale image so that its minimum dimension is a given pixel count."""
image_size = image.shape
if image_size[0] < image_size[1]:
# The height is the smaller of the two dimensions.
return scale_to_height(image, min_pixels)
return scale_to_width(image, min_pixels)
def scaling_interpolation_method(original_size, destination_size):
"""Returns a preferred high-quality interpolation method for resizing.
The preferred method depends on whether the image is decreasing in size or
expanding in size.
"""
height_scale = float(destination_size[0]) / original_size[0]
width_scale = float(destination_size[1]) / original_size[1]
mean_scale = 0.5 * (height_scale + width_scale)
if mean_scale <= 1:
return cv2.INTER_AREA
return cv2.INTER_CUBIC
#
# filters
#
def imfilter():
"""Filters an image using a kernel."""
pass
# TODO(nloomis): make sure that the number of channels in the image and
# the kernel match! May need to repmat the filter. If the filter has too
# many channels or can't be repmat'd without uncertainty about what is
# happening, throw an error.
def steerable_deriv(n_pix=None, sigma=1.5):
"""Builds a steerable Gaussian derivative filter in the x direction.
Transpose the output array to get the filter in the y direction.
Based on 'Design and use of steerable filters', Freeman and Adelson, PAMI,
1991.
Inputs:
n_pix: number of pixels in each side of the output filter. if n_pix is
not specified, it defaults to 3*sigma. n_pix can be even or odd.
sigma: amount of smoothing used for the filter; for a wider filter and
more smoothing, use a large sigma. the sigma value is approximately
the half-width of the filter in pixels. experiment with different
values between 1 and 10 for most image processing applications.
if sigma is not specified, it defaults to 1.5.
Returns:
a numpy array of size (n_pix x n_pix) with the weights of the x-direction
steerable derivative filter.
"""
if not n_pix:
n_pix = int(numpy.ceil(3 * sigma))
x = numpy.linspace(-n_pix / 2., n_pix / 2., int(n_pix))
X, Y = | numpy.meshgrid(x, x) | numpy.meshgrid |
import os
from collections import OrderedDict, namedtuple
from itertools import product
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from torch import nn
def create_dict_combination(dict):
# get combinations
ordered_dict = OrderedDict(dict)
combinations = product(*ordered_dict.values())
# create named tuple to save combinations
combination_tuple = namedtuple('run', ordered_dict.keys())
# create array to save namedtupled and iterate over them
run_configs = []
# add combinations as configs
for combination in combinations:
run_configs.append(combination_tuple(*combination))
# return value
return run_configs
def dict_to_torch(dict_inp, feature_count):
features = [value.tolist() for value in list(dict_inp.values())[1:]]
labels = [value.tolist() for value in list(dict_inp.values())[0]]
features = np.array(features).T # to set dimensions from feature_size x batch_size to its transpose
labels = | np.array(labels) | numpy.array |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import theano.tensor as tt
from scipy.special import logsumexp
from scipy.stats import multivariate_normal
from scipy.optimize import approx_fprime
from theano import function as theano_function
import arviz as az
from pymc3.backends.ndarray import NDArray
from pymc3.model import Point, modelcontext
from pymc3.sampling import sample_prior_predictive
from pymc3.theanof import (
floatX,
inputvars,
join_nonshared_inputs,
make_shared_replacements,
gradient,
)
from pymc3.sinf.GIS import GIS
import torch
class NF_SMC:
"""Sequential Monte Carlo with normalizing flow based sampling."""
def __init__(
self,
draws=2000,
start=None,
threshold=0.5,
model=None,
random_seed=-1,
chain=0,
frac_validate=0.1,
iteration=None,
alpha=(0,0),
k_trunc=0.5,
pareto=False,
epsilon=1e-3,
local_thresh=3,
local_step_size=0.1,
local_grad=True,
nf_local_iter=0,
max_line_search=2,
verbose=False,
n_component=None,
interp_nbin=None,
KDE=True,
bw_factor=0.5,
edge_bins=None,
ndata_wT=None,
MSWD_max_iter=None,
NBfirstlayer=True,
logit=False,
Whiten=False,
batchsize=None,
nocuda=False,
patch=False,
shape=[28,28,1],
):
self.draws = draws
self.start = start
self.threshold = threshold
self.model = model
self.random_seed = random_seed
self.chain = chain
self.frac_validate = frac_validate
self.iteration = iteration
self.alpha = alpha
self.k_trunc = k_trunc
self.pareto = pareto
self.epsilon = epsilon
self.local_thresh = local_thresh
self.local_step_size = local_step_size
self.local_grad = local_grad
self.nf_local_iter = nf_local_iter
self.max_line_search = max_line_search
self.verbose = verbose
self.n_component = n_component
self.interp_nbin = interp_nbin
self.KDE = KDE
self.bw_factor = bw_factor
self.edge_bins = edge_bins
self.ndata_wT = ndata_wT
self.MSWD_max_iter = MSWD_max_iter
self.NBfirstlayer = NBfirstlayer
self.logit = logit
self.Whiten = Whiten
self.batchsize = batchsize
self.nocuda = nocuda
self.patch = patch
self.shape = shape
self.model = modelcontext(model)
if self.random_seed != -1:
np.random.seed(self.random_seed)
self.beta = 0
self.variables = inputvars(self.model.vars)
self.weights = np.ones(self.draws) / self.draws
#self.sinf_logq = np.array([])
self.log_marginal_likelihood = 0
def initialize_population(self):
"""Create an initial population from the prior distribution."""
population = []
var_info = OrderedDict()
if self.start is None:
init_rnd = sample_prior_predictive(
self.draws,
var_names=[v.name for v in self.model.unobserved_RVs],
model=self.model,
)
else:
init_rnd = self.start
init = self.model.test_point
for v in self.variables:
var_info[v.name] = (init[v.name].shape, init[v.name].size)
for i in range(self.draws):
point = Point({v.name: init_rnd[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.nf_samples = np.array(floatX(population))
#self.posterior = np.copy(self.nf_samples)
self.var_info = var_info
def setup_logp(self):
"""Set up the likelihood logp function based on the chosen kernel."""
shared = make_shared_replacements(self.variables, self.model)
self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)
self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared)
self.posterior_dlogp_func = logp_forw([gradient(self.model.logpt, self.variables)], self.variables, shared)
self.prior_dlogp_func = logp_forw([gradient(self.model.varlogpt, self.variables)], self.variables, shared)
self.likelihood_dlogp_func = logp_forw([gradient(self.model.datalogpt, self.variables)], self.variables, shared)
def get_nf_logp(self):
"""Get the prior, likelihood and tempered posterior log probabilities, for the current NF samples."""
priors = [self.prior_logp_func(sample) for sample in self.nf_samples]
likelihoods = [self.likelihood_logp_func(sample) for sample in self.nf_samples]
self.nf_prior_logp = np.array(priors).squeeze()
self.nf_likelihood_logp = np.array(likelihoods).squeeze()
self.nf_posterior_logp = self.nf_prior_logp + self.nf_likelihood_logp * self.beta
def get_full_logp(self):
"""Get the prior, likelihood and tempered posterior log probabilities, for the full sample set."""
priors = [self.prior_logp_func(sample) for sample in self.posterior]
likelihoods = [self.likelihood_logp_func(sample) for sample in self.posterior]
self.prior_logp = np.array(priors).squeeze()
self.likelihood_logp = np.array(likelihoods).squeeze()
self.posterior_logp = self.prior_logp + self.likelihood_logp * self.beta
def eval_prior_logp(self, param_vals):
"""Evaluates the prior logp for given parameter values."""
prior_logps = [self.prior_logp_func(val) for val in param_vals]
return np.array(prior_logps).squeeze()
def eval_prior_dlogp(self, param_vals):
"""Evaluates the gradient of the prior logp for given parameter values."""
prior_dlogps = [self.prior_dlogp_func(val) for val in param_vals]
return np.array(prior_dlogps).squeeze()
def sinf_logq(self, param_vals):
"""Function for evaluating the SINF gradient."""
sinf_logq = self.nf_model.evaluate_density(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_logq.item()
def target_logp(self, param_vals):
"""Evaluates logp of the target distribution for given parameter values."""
logps = [self.posterior_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def tempered_logp(self, param_vals):
"""Evaluates the tempered logp of the target distribution for given parameter values."""
logps = [self.prior_logp_func(val) + self.beta * self.likelihood_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp(self, param_vals):
"""Evaluates the gradient of the target distribution logp for given parameter values."""
dlogps = [self.posterior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def tempered_dlogp(self, param_vals):
"""Evaluates the gradient of the temepered target distribution for given parameter values."""
dlogps = [self.prior_dlogp_func(val) + self.beta * self.likelihood_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def regularize_weights(self):
"""Either performs Pareto-smoothing of the IW, or applies clipping."""
if self.pareto:
psiw = az.psislw(self.log_sinf_weights)
self.log_sinf_weights = psiw[0]
self.sinf_weights = np.exp(self.log_sinf_weights)
elif not self.pareto:
self.log_sinf_weights = np.clip(self.log_sinf_weights, a_min=None,
a_max=logsumexp(self.log_sinf_weights) + (self.k_trunc - 1) * np.log(len(self.log_sinf_weights)))
self.log_sinf_weights = self.log_sinf_weights - logsumexp(self.log_sinf_weights)
self.sinf_weights = np.exp(self.log_sinf_weights)
def local_exploration(self, logq_func=None, dlogq_func=None):
"""Perform local exploration."""
self.high_iw_idx = np.where(self.log_sinf_weights >= np.log(self.local_thresh) - np.log(self.draws))[0]
self.num_local = len(self.high_iw_idx)
self.high_iw_samples = self.nf_samples[self.high_iw_idx, ...]
self.high_log_weight = self.log_sinf_weights[self.high_iw_idx]
self.high_weights = self.sinf_weights[self.high_iw_idx]
print(f'Number of points we perform additional local exploration around = {self.num_local}')
self.local_samples = np.empty((0, np.shape(self.high_iw_samples)[1]))
self.local_log_weight = np.array([])
self.modified_log_weight = np.array([])
self.local_weights = np.array([])
self.modified_weights = np.array([])
for i, sample in enumerate(self.high_iw_samples):
sample = sample.reshape(-1, len(sample))
if self.local_grad:
if dlogq_func is None:
raise Exception('Using gradient-based exploration requires you to supply dlogq_func.')
self.log_weight_grad = self.tempered_dlogp(sample.astype(np.float64)) - dlogq_func(sample.astype(np.float64))
elif not self.local_grad:
if logq_func is None:
raise Exception('Gradient-free approximates gradients with finite difference. Requires you to supply logq_func.')
self.log_weight_grad = approx_fprime(sample, self.tempered_logp, np.finfo(float).eps) - approx_fprime(sample, logq_func, np.finfo(float).eps)
self.log_weight_grad = np.asarray(self.log_weight_grad).astype(np.float64)
delta = 1.0 * self.local_step_size
proposed_step = sample + delta * self.log_weight_grad
line_search_iter = 0
while self.tempered_logp(proposed_step) < self.tempered_logp(sample):
delta = delta / 2.0
proposed_step = sample + delta * self.log_weight_grad
line_search_iter += 1
if line_search_iter >= self.max_line_search:
break
local_log_w = self.high_log_weight[i] + self.tempered_logp(proposed_step) - np.log(np.exp(self.tempered_logp(proposed_step)) + np.exp(self.tempered_logp(sample)))
modif_log_w = self.high_log_weight[i] + self.tempered_logp(sample) - np.log(np.exp(self.tempered_logp(proposed_step)) + np.exp(self.tempered_logp(sample)))
self.local_log_weight = np.append(self.local_log_weight, local_log_w)
self.modified_log_weight = np.append(self.modified_log_weight, modif_log_w)
self.local_weights = np.append(self.local_weights, np.exp(local_log_w))
self.modified_weights = np.append(self.modified_weights, np.exp(modif_log_w))
self.local_samples = np.append(self.local_samples, proposed_step, axis=0)
self.log_sinf_weights[self.high_iw_idx] = self.modified_log_weight
self.sinf_weights[self.high_iw_idx] = self.modified_weights
self.log_sinf_weights = np.append(self.log_sinf_weights, self.local_log_weight)
self.sinf_weights = | np.append(self.sinf_weights, self.local_weights) | numpy.append |
#from Python
import time
import csv
import os
import math
import numpy as np
import sys
from shutil import copyfile
#from Pytorch
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets
from torchvision import transforms
from torchvision.utils import save_image
version = '5.4-Freq'
eps = 0.000001
def Laplacian(input,ksize):
if ksize==1:
kernel = torch.tensor( [0,1,0],
[1,-4,1],
[0,1,0])
else:
kernel = torch.ones(ksize,ksize)
kernel[int(ksize/2),int(ksize/2)] = 1 - ksize**2
kernel = Variable(kernel.view(1,1,1,ksize,ksize))
output = F.conv3d(input.view(input.size()[0],1,3,input.size()[2],input.size()[3]),kernel,padding = [0, int(ksize/2), int(ksize/2)]).view(input.size()[0],-1,input.size()[2],input.size()[3])
return output
def Gaussian(input,ksize,sigma):
ax = np.arange(-ksize // 2 + 1., ksize // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx**2 + yy**2) / (2. * sigma**2))
kernel = kernel / np.sum(kernel)
kernel = Variable((torch.from_numpy(kernel).float()).cuda()).view(1,1,1,ksize,ksize)
output = F.conv3d(input.view(input.size()[0],1,3,input.size()[2],input.size()[3]),kernel,padding = [0, int(ksize/2), int(ksize/2)]).view(input.size()[0],-1,input.size()[2],input.size()[3])
return output
def Gaussiankernel(b,c,h,w,sigma):
ax = np.arange(-w // 2 + 1., w // 2 + 1.)
ay = np.arange(-h // 2 + 1., h // 2 + 1.)
xx, yy = np.meshgrid(ax, ay)
kernel = np.exp(-(xx**2 + yy**2) / (2. * sigma**2))
kernel = kernel / np.max(kernel)
kernel = | np.fft.fftshift(kernel) | numpy.fft.fftshift |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
from numpy import cos, sin, tan
def load_mesh_file(filename):
"""Load GiD .msh file.
Parameters
----------
filename: str
the name of the GiD mesh file in meshfile folder without extension.
Returns
-------
array_nodes:
an array of coordinates of all nodes.
array_elements:
an array of numbers of nodes of all elements.
number_of_nodes:
the number of nodes.
number_of_elements:
the number of elements.
"""
with open("meshfile\\" + filename + ".msh") as file:
lines = file.readlines()
for i, line in enumerate(lines):
if line.strip() == "Coordinates":
index_node_start = i
if line.strip() == "End Coordinates":
index_node_end = i
if line.strip() == "Elements":
index_element_start = i
if line.strip() == "End Elements":
index_element_end = i
number_of_elements = index_element_end - index_element_start - 1
number_of_nodes = index_node_end - index_node_start - 1
array_nodes = np.empty((3, number_of_nodes))
for i in range(index_node_start + 1, index_node_end):
line = lines[i].split()
for j in range(3):
array_nodes[j, i - index_node_start - 1] = line[j + 1]
array_elements = np.empty((2, number_of_elements), dtype=int)
for i in range(index_element_start + 1, index_element_end):
line = lines[i].split()
for j in range(2):
array_elements[j, i - index_element_start - 1] = line[j + 1]
return array_nodes, array_elements, number_of_nodes, number_of_elements
def get_eta_and_mu(alpha):
"""Get the value of eta and mu. See (4.46) of the PhD thesis of <NAME>.
Parameters
----------
alpha: float
the angle of the rotation.
Returns
-------
The first coefficient eta: float.
The second coefficient mu: float.
"""
if alpha == 0.:
eta = 1 / 12
mu = 1 / 360
else:
eta = (2 * sin(alpha) - alpha * (1 + cos(alpha))) / \
(2 * alpha ** 2 * sin(alpha))
mu = (alpha * (alpha + sin(alpha)) - 8 * | sin(alpha / 2) | numpy.sin |
import time
from collections import deque
import numpy as np
from stable_baselines import SAC
from stable_baselines import logger
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.ppo2.ppo2 import safe_mean, get_schedule_fn
from stable_baselines.common import TensorboardWriter
import scipy.stats
from statistics import mean
from utils.joystick import JoyStick
class SACWithVAE(SAC):
"""
Custom version of Soft Actor-Critic (SAC) to use it with donkey car env.
It is adapted from the stable-baselines version.
Notable changes:
- optimization is done after each episode and not at every step
- this version is integrated with teleoperation
"""
def _train_step(self, step, writer, learning_rate):
# Sample a batch from the replay buffer
batch = self.replay_buffer.sample(self.batch_size)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
feed_dict = {
self.observations_ph: batch_obs,
self.actions_ph: batch_actions,
self.next_observations_ph: batch_next_obs,
self.rewards_ph: batch_rewards.reshape(self.batch_size, -1),
self.terminals_ph: batch_dones.reshape(self.batch_size, -1),
self.learning_rate_ph: learning_rate
}
# out = [policy_loss, qf1_loss, qf2_loss,
# value_loss, qf1, qf2, value_fn, logp_pi,
# self.entropy, policy_train_op, train_values_op]
# Do one gradient step
# and optionally compute log for tensorboard
if writer is not None:
out = self.sess.run([self.summary] + self.step_ops, feed_dict)
summary = out.pop(0)
writer.add_summary(summary, step)
else:
out = self.sess.run(self.step_ops, feed_dict)
# Unpack to monitor losses and entropy
policy_loss, qf1_loss, qf2_loss, value_loss, *values = out
# qf1, qf2, value_fn, logp_pi, entropy, *_ = values
entropy = values[4]
if self.log_ent_coef is not None:
ent_coef_loss, ent_coef = values[-2:]
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy
def optimize(self, step, writer, current_lr):
"""
Do several optimization steps to update the different networks.
:param step: (int) current timestep
:param writer: (TensorboardWriter object)
:param current_lr: (float) Current learning rate
:return: ([np.ndarray]) values used for monitoring
"""
train_start = time.time()
mb_infos_vals = []
for grad_step in range(self.gradient_steps):
if step < self.batch_size or step < self.learning_starts:
break
if len(self.replay_buffer) < self.batch_size:
break
self.n_updates += 1
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
self.sess.run(self.target_update_op)
if self.n_updates > 0:
# print("SAC training duration: {:.2f}s".format(time.time() - train_start))
pass
return mb_infos_vals
def importance_sampling_ratio(self, proba_behavior_policy, proba_target_policy):
EPS = 1e-10
if proba_behavior_policy == 0:
ratio = (proba_target_policy + EPS)/(proba_behavior_policy + EPS)
else:
ratio = proba_target_policy/proba_behavior_policy
return ratio
def learn_jirl(self, total_timesteps, joystick=None, callback=None,
seed=None, log_interval=1, tb_log_name="SAC",
print_freq=100, base_policy=None, stochastic_actor=True,
expert_guidance_steps=50000, save_path=None):
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
# Add path to model in this function
self._setup_learn(seed)
# Joystick object
js = JoyStick()
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
episode_rewards = [0.0]
# Reset the environment
obs = self.env.reset()
# Book keeping
self.episode_reward = np.zeros((1,))
ep_info_buf = deque(maxlen=100)
ep_len = 0
self.n_updates = 0
n_crashes = 0
infos_values = []
mb_infos_vals = []
pred_action_info = deque(maxlen=20)
mean_info = deque(maxlen=50)
std_info = deque(maxlen=50)
throttle_info = deque(maxlen=1000)
is_action_expert = False
is_action_actor = True
was_last_action_actor = False
last_action_actor = None
last_obs = None
# steps in which expert takes control
expert_control_steps = []
state = {} # for the imitation learning agent
MAX_LEN = 10
is_ratios_target_expert = deque(maxlen=MAX_LEN) # IS ratios over the last few steps
is_ratios_target_actor = deque(maxlen=MAX_LEN)
EPS = 1e-10
# Stats for plotting
rew_per_step = []
rew_per_step_rl = []
rl_control = []
# Buffer to control the threshold dynamically
thresh_buffer = deque(maxlen=1000)
std_buffer = deque(maxlen=10000)
mean_buffer = deque(maxlen=10000)
import time
start_time = time.time()
try:
for step in range(total_timesteps):
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
# Get prediction from base policy
steerCmd = float(base_policy.predict(obs)[0][0])
# print("Steering from IL: ", steerCmd)
throttleCmd = - 1
action_expert = [steerCmd, throttleCmd]
# mean_exp, std_exp = il_model.get_proba_actions(state)
# print(scipy.stats.multivariate_normal(mean = mean, cov = std).pdf(action_expert))
# Test with hard coded variance
# std_exp = [0.1, 0.1]
# proba_expert_policy = scipy.stats.norm(mean_exp[0], std_exp[0]).pdf(action_expert[0])
# proba_expert_policy = scipy.stats.norm(mean_exp[0], std_exp[0]).cdf(action_expert[0] + EPS) - scipy.stats.norm(mean_exp[0], std_exp[0]).cdf(action_expert[0] - EPS)
# if 2*np.pi*np.prod(std) <= 1:
# proba_expert_policy = 2*np.pi*np.prod(std)*scipy.stats.multivariate_normal(mean = mean, cov = std).pdf(action_expert)
# else:
# proba_expert_policy = scipy.stats.multivariate_normal(mean = mean, cov = std).pdf(action_expert)
## ====== Test code snippet ======
# action_expert, _ = model.predict(obs, deterministic=True)
# new_obs, reward, done, info = self.env.step(action_expert)
## ===============================
if not stochastic_actor:
action_actor = self.policy_tf.step(obs[None], deterministic=True).flatten()
else:
action_actor = self.policy_tf.step(obs[None], deterministic=False).flatten()
if step >= expert_guidance_steps:
action_actor = self.policy_tf.step(obs[None], deterministic=True).flatten()
mean_act, std_act = self.policy_tf.proba_step(obs[None])
# print(scipy.stats.multivariate_normal(mean = mean.flatten(), cov = std.flatten()).pdf(action_actor))
proba_actor_policy = scipy.stats.norm(mean_act.flatten()[0], std_act.flatten()[0]).pdf(action_actor[0])
proba_expert_policy = scipy.stats.norm(mean_act.flatten()[0], std_act.flatten()[0]).pdf(action_expert[0])
# proba_actor_policy = scipy.stats.norm(mean_act.flatten()[0], std_act.flatten()[0]).cdf(action_actor[0] + EPS) - scipy.stats.norm(mean_act.flatten()[0], std_act.flatten()[0]).cdf(action_actor[0] - EPS)
# if 2*np.pi*np.prod(std) <= 1:
# proba_actor_policy = 2*np.pi*np.prod(std.flatten())*scipy.stats.multivariate_normal(mean = mean.flatten(), cov = std.flatten()).pdf(action_actor)
# else:
# proba_actor_policy = scipy.stats.multivariate_normal(mean = mean.flatten(), cov = std.flatten()).pdf(action_actor)
# Update entropy buffer
std_buffer.append(std_act)
# Update mean difference buffer
mean_buffer.append(np.linalg.norm(mean_act - action_expert))
# mean_buffer.append(np.linalg.norm(action_actor - action_expert))
rho = round(float(step)/expert_guidance_steps, 2)
# THRESH = (1 - rho) * (scipy.stats.norm(0, 0.1).pdf(0) - 1.0)**MAX_LEN
# _THRESH = (1 - rho) * (scipy.stats.norm(0, 0.1).pdf(0) - 2.0)
_THRESH = (np.mean(std_buffer) + np.mean(mean_buffer)) * (1 - rho)
THRESH = _THRESH**MAX_LEN
if step >= expert_guidance_steps:
# Only let the RL control the car
# If this doesn't work, tune MAX_LEN
THRESH = _THRESH = 0
if js.is_on():
## =====================================
## MANUAL CONTROL
## =====================================
# Execute commands from the joystick in the environment
action_js = [js.get_steer(), -1]
new_obs, reward, done, info = self.env.step(action_js)
# Store transition in the replay buffer.
self.replay_buffer.add(obs, action_js, reward, new_obs, float(done))
## ==========================================
sigma_p = 0.01
reward_hat = reward*np.exp(-np.linalg.norm(action_actor - action_js)/sigma_p)
self.replay_buffer.add(obs, action_actor, reward_hat, new_obs, float(done))
## ==========================================
if was_last_action_actor:
# Train the actor when the expert's actions are executed
# mb_infos_vals = self.optimize(step, writer, current_lr)
penalty = -1 #-10
self.replay_buffer.add(last_obs, last_action_actor, penalty, obs, float(done))
is_ratios_target_expert = deque(maxlen=MAX_LEN)
was_last_action_actor = False
last_action_actor = None
last_obs = None
is_action_actor = False
# print("Actor IS ratio: ", is_ratio)
# if ep_len > 700:
# print("Expert: ", np.prod(is_ratios_target_actor))
if (len(is_ratios_target_actor) == MAX_LEN) and np.all([(p > _THRESH) for p in is_ratios_target_actor]):
# Switch control to actor in the next step
is_action_actor = True
rew_per_step_rl.append(0.0)
rl_control.append(0)
# else:
elif is_action_actor:
## =====================================
## RL CONTROL
## =====================================
# Execute actor's actions in the environment
new_obs, reward, done, info = self.env.step(action_actor)
# Update IS ratiowill need to
is_ratio = self.importance_sampling_ratio(1.0, proba_expert_policy)
is_ratios_target_expert.append(is_ratio)
# Store transition in the replay buffer.
self.replay_buffer.add(obs, action_actor, reward, new_obs, float(done))
if not was_last_action_actor:
is_ratios_target_actor = deque(maxlen=MAX_LEN)
is_action_actor = True
# print("Actor: ", np.prod(is_ratios_target_expert))
# Per step safety check
if is_ratio < _THRESH:
# Switch control to the expert
is_action_actor = False
# Safe ty check for a sequence of states
if (len(is_ratios_target_actor) == MAX_LEN) and np.all([(p > _THRESH) for p in is_ratios_target_actor]):
#if (len(is_ratios_target_expert) == MAX_LEN) and (np.prod(is_ratios_target_expert) <= THRESH):
# Switch control to expert in the next step
is_action_actor = False
was_last_action_actor = True
last_action_actor = action_actor
last_obs = obs
rew_per_step_rl.append(reward)
rl_control.append(1)
else:
## =======================================
## EXPERT CONTROL
## =======================================
# Execute expert action in the environment
new_obs, reward, done, info = self.env.step(action_expert)
# Update IS ratio
# is_ratio = self.importance_sampling_ratio(1.0, proba_actor_policy)
is_ratio = self.importance_sampling_ratio(1.0, proba_expert_policy)
is_ratios_target_actor.append(is_ratio)
# print("Expert ", is_ratio)
# Store transition in the replay buffer.
self.replay_buffer.add(obs, action_expert, reward, new_obs, float(done))
## ==========================================
# # NOTE: Figure out what's going wrong here
# # Without the penalized reward the policy diverges (mean doesn't go towards 0
# # Also test with stochastic actions from the RL policy
# # # Add penalized reward to actor's action
# # r_hat: penalized reward
sigma_p = 0.01
reward_hat = reward*np.exp(-np.linalg.norm(action_actor - action_expert)/sigma_p)
self.replay_buffer.add(obs, action_actor, reward_hat, new_obs, float(done))
## ==========================================
if was_last_action_actor:
# Train the actor when the expert's actions are executed
# mb_infos_vals = self.optimize(step, writer, current_lr)
penalty = -1 #-10
self.replay_buffer.add(last_obs, last_action_actor, penalty, obs, float(done))
is_ratios_target_expert = deque(maxlen=MAX_LEN)
was_last_action_actor = False
last_action_actor = None
last_obs = None
is_action_actor = False
# print("Actor IS ratio: ", is_ratio)
# if ep_len > 700:
# print("Expert: ", np.prod(is_ratios_target_actor))
# if (len(is_ratios_target_actor) == MAX_LEN) and (np.prod(is_ratios_target_actor) > THRESH):
if (len(is_ratios_target_actor) == MAX_LEN) and np.all([(p > _THRESH) for p in is_ratios_target_actor]):
# Switch control to actor in the next step
is_action_actor = True
rew_per_step_rl.append(0.0)
rl_control.append(0)
throttle_info.append(float(self.env.last_throttle))
rew_per_step.append(reward)
pred_action_info.append(np.abs(action_actor[0] - action_expert[0]))
# mean_info.append([mean_exp[0], mean_act.flatten()[0]])
# std_info.append([std_exp[0], std_act.flatten()[0]])
ep_len += 1
obs = new_obs
if ep_len % 400 == 0:
print("Mean error pred actions: {}".format(np.mean(pred_action_info)))
print("Mean difference: {}".format(np.mean(mean_buffer)))
print("Mean std: {}".format(np.mean(std_buffer)))
# print("Mean: ", [np.mean([x[0] for x in mean_info]), np.mean([x[1] for x in mean_info])])
# print("Std: ", [np.mean([x[0] for x in std_info]), np.mean([x[1] for x in std_info])])
# print(np.prod(is_ratios_target_actor))
# Train every step ---under consideratioon
if (ep_len % 400) == 0:
self.env.jet.apply_throttle(0)
mb_infos_vals = self.optimize(step, writer, current_lr)
# if print_freq > 0 and ep_len % print_freq == 0 and ep_len > 0:
# print("{} steps".format(ep_len))
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
self.episode_reward = total_episode_reward_logger(self.episode_reward, ep_reward,
ep_done, writer, step)
episode_rewards[-1] += reward
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
if len(rl_control) < 1000:
mean_rl_control = round(100 * float(np.mean(rl_control)), 3)
else:
mean_rl_control = round(100 * float(np.mean(rl_control[-1001:-1])), 3)
num_episodes = len(episode_rewards)
if self.verbose >= 1 and (ep_len % 400) == 0:
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv("n_updates", self.n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("mean RL control percent", mean_rl_control)
logger.logkv("mean of throttle values", mean(throttle_info))
logger.logkv("time elapsed", int(time.time() - start_time))
#logger.logkv("n_crashes", n_crashes)
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", step)
logger.dumpkvs()
# Reset infos:
infos_values = []
except KeyboardInterrupt:
print("Exiting")
self.env.reset()
import sys
sys.exit(0)
# Use last batch
print("Final optimization before saving")
self.env.reset()
mb_infos_vals = self.optimize(step, writer, current_lr)
# save stats
np.save(save_path + '/episode_reward', episode_rewards)
np.save(save_path + '/stepwise_reward', rew_per_step)
np.save(save_path + '/stepwise_reward_rl', rew_per_step_rl)
print("Saving complete. Give a keyboard interrupt to end")
return self
def learn(self, total_timesteps, callback=None, seed=None,
log_interval=10, tb_log_name="SAC", reset_num_timesteps=True,
prioritized_replay=False, stochastic_actor=False, save_path=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) as writer:
self._setup_learn(seed)
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
# Initial learning rate
current_lr = self.learning_rate(1)
episode_rewards = [0.0]
obs = self.env.reset()
self.episode_reward = np.zeros((1,))
ep_info_buf = deque(maxlen=100)
n_updates = 0
infos_values = []
# Stats for plotting
rew_per_step = []
for step in range(total_timesteps):
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
# Before training starts, randomly sample actions
# from a uniform distribution for better exploration.
# Afterwards, use the learned policy.
if self.num_timesteps < self.learning_starts:
action = self.env.action_space.sample()
# No need to rescale when sampling random action
rescaled_action = action
else:
action = self.policy_tf.step(obs[None], deterministic=False).flatten()
# Rescale from [-1, 1] to the correct bounds
rescaled_action = action * np.abs(self.action_space.low)
assert action.shape == self.env.action_space.shape
new_obs, reward, done, info = self.env.step(rescaled_action)
# Store transition in the replay buffer.
self.replay_buffer.add(obs, action, reward, new_obs, float(done))
obs = new_obs
rew_per_step.append(reward)
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
self.episode_reward = total_episode_reward_logger(self.episode_reward, ep_reward,
ep_done, writer, self.num_timesteps)
if step % self.train_freq == 0:
mb_infos_vals = []
# Update policy, critics and target networks
for grad_step in range(self.gradient_steps):
if self.num_timesteps < self.batch_size or self.num_timesteps < self.learning_starts:
break
n_updates += 1
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
# Update target network
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
self.sess.run(self.target_update_op)
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
episode_rewards[-1] += reward
if done:
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
num_episodes = len(episode_rewards)
self.num_timesteps += 1
# Display training infos
if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv("n_updates", n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("fps", fps)
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", self.num_timesteps)
logger.dumpkvs()
# Reset infos:
infos_values = []
# Save book keeping stats
np.save(save_path + '/episode_reward', episode_rewards)
| np.save(save_path + '/stepwise_reward', rew_per_step) | numpy.save |
"""
Asset selling driver script
"""
from collections import namedtuple
import pandas as pd
import numpy as np
from AssetSellingModel_Q3 import AssetSellingModel
from AssetSellingPolicy_Q3 import AssetSellingPolicy
import matplotlib.pyplot as plt
from copy import copy
import math
import time
plt.rcParams["figure.figsize"] = (15,8)
if __name__ == "__main__":
# read in policy parameters from an Excel spreadsheet, "asset_selling_policy_parameters.xlsx"
sheet1 = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet1")
params = zip(sheet1['param1'], sheet1['param2'])
param_list = list(params)
sheet2 = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet2")
sheet3 = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet3")
biasdf = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet4")
policy_selected = sheet3['Policy'][0]
T = sheet3['TimeHorizon'][0]
gamma = sheet3['DiscountFactor'][0]
initPrice = sheet3['InitialPrice'][0]
initBias = sheet3['InitialBias'][0]
exog_params = {'UpStep':sheet3['UpStep'][0],'DownStep':sheet3['DownStep'][0],'Variance':sheet3['Variance'][0],'biasdf':biasdf}
nIterations = sheet3['Iterations'][0]
printStep = sheet3['PrintStep'][0]
printIterations = [0]
printIterations.extend(list(reversed(range(nIterations-1,0,-printStep))))
print("exog_params ",exog_params)
# initialize the model and the policy
policy_names = ['sell_low', 'high_low', 'track']
#####
state_names = ['price', 'resource','bias', 'prev_price', 'prev_price2']
init_state = {'price': initPrice, 'resource': 1,'bias':initBias, \
'prev_price':initPrice, 'prev_price2':initPrice}
#####
decision_names = ['sell', 'hold']
M = AssetSellingModel(state_names, decision_names, init_state,exog_params,T,gamma)
P = AssetSellingPolicy(M, policy_names)
t = 0
prev_price = init_state['price']
# make a policy_info dict object
policy_info = {'sell_low': param_list[0],
'high_low': param_list[1],
'track': param_list[2] + (prev_price, prev_price)}
print("Parameters track!!!!!!!!!!!! ",policy_info['track'])
start = time.time()
#####
if not policy_selected in ['full_grid','track']:
#####
#print("Selected policy {}, time horizon {}, initial price {} and number of iterations {}".format(policy_selected,T,initPrice,
# ))
contribution_iterations=[P.run_policy(param_list, policy_info, policy_selected, t) for ite in list(range(nIterations))]
contribution_iterations = pd.Series(contribution_iterations)
print("Contribution per iteration: ")
print(contribution_iterations)
cum_avg_contrib = contribution_iterations.expanding().mean()
print("Cumulative average contribution per iteration: ")
print(cum_avg_contrib)
#plotting the results
fig, axsubs = plt.subplots(1,2,sharex=True,sharey=True)
fig.suptitle("Asset selling using policy {} with parameters {} and T {}".format(policy_selected,policy_info[policy_selected],T) )
i = np.arange(0, nIterations, 1)
axsubs[0].plot(i, cum_avg_contrib, 'g')
axsubs[0].set_title('Cumulative average contribution')
axsubs[1].plot(i, contribution_iterations, 'g')
axsubs[1].set_title('Contribution per iteration')
# Create a big subplot
ax = fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
ax.set_ylabel('USD', labelpad=0) # Use argument `labelpad` to move label downwards.
ax.set_xlabel('Iterations', labelpad=10)
plt.show()
#####
elif policy_selected == 'track':
#print("Selected policy {}, time horizon {}, initial price {} and number of iterations {}".format(policy_selected,T,initPrice,
# ))
theta_range = | np.linspace(policy_info['track'][0], policy_info['track'][1], printStep) | numpy.linspace |
from typing import Optional, List
import matplotlib.pyplot as plt
import numpy as np
plt.style.use("seaborn-colorblind")
from .. import log
_log = log.getLogger("utils-viz")
def show_convergence(center_points: List, outfile: Optional[str] = None) -> None:
npoints = len(center_points)
convergence = np.empty((npoints - 1))
previous_point = None
xticks = []
for idx, cp in enumerate(center_points):
if previous_point is not None:
convergence[idx - 1] = | np.linalg.norm(previous_point - cp) | numpy.linalg.norm |
from __future__ import absolute_import
import cv2
import numpy as np
import random
import copy
import threading
import itertools
import pdb
import math
import time
def format_img_size(img, C):
""" formats the image size based on config """
img_min_side = float(C.im_size)
(height, width, _) = img.shape
if width <= height:
ratio = img_min_side / width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side / height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
fx = width/float(new_width)
fy = height/float(new_height)
return img, ratio, fx, fy
def format_img_channels(img, C):
""" formats the image channels based on config """
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
# Method to transform the coordinates of the bounding box to its original size
def get_real_coordinates(ratio, x1, y1, x2, y2):
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return (real_x1, real_y1, real_x2, real_y2)
def augment(img_data, config, augment=True):
assert 'filepath' in img_data
assert 'bboxes' in img_data
assert 'width' in img_data
assert 'height' in img_data
img_data_aug = copy.deepcopy(img_data)
img = cv2.imread(img_data_aug['filepath'])
if augment:
rows, cols = img.shape[:2]
if config.use_horizontal_flips and np.random.randint(0, 2) == 0:
img = cv2.flip(img, 1)
for bbox in img_data_aug['bboxes']:
x1 = bbox['x1']
x2 = bbox['x2']
bbox['x2'] = cols - x1
bbox['x1'] = cols - x2
if config.use_vertical_flips and np.random.randint(0, 2) == 0:
img = cv2.flip(img, 0)
for bbox in img_data_aug['bboxes']:
y1 = bbox['y1']
y2 = bbox['y2']
bbox['y2'] = rows - y1
bbox['y1'] = rows - y2
if config.rot_90:
angle = np.random.choice([0,90,180,270],1)[0]
if angle == 270:
img = np.transpose(img, (1,0,2))
img = cv2.flip(img, 0)
elif angle == 180:
img = cv2.flip(img, -1)
elif angle == 90:
img = np.transpose(img, (1,0,2))
img = cv2.flip(img, 1)
elif angle == 0:
pass
for bbox in img_data_aug['bboxes']:
x1 = bbox['x1']
x2 = bbox['x2']
y1 = bbox['y1']
y2 = bbox['y2']
if angle == 270:
bbox['x1'] = y1
bbox['x2'] = y2
bbox['y1'] = cols - x2
bbox['y2'] = cols - x1
elif angle == 180:
bbox['x2'] = cols - x1
bbox['x1'] = cols - x2
bbox['y2'] = rows - y1
bbox['y1'] = rows - y2
elif angle == 90:
bbox['x1'] = rows - y2
bbox['x2'] = rows - y1
bbox['y1'] = x1
bbox['y2'] = x2
elif angle == 0:
pass
img_data_aug['width'] = img.shape[1]
img_data_aug['height'] = img.shape[0]
return img_data_aug, img
def union(au, bu, area_intersection):
area_a = (au[2] - au[0]) * (au[3] - au[1])
area_b = (bu[2] - bu[0]) * (bu[3] - bu[1])
area_union = area_a + area_b - area_intersection
return area_union
def intersection(ai, bi):
x = max(ai[0], bi[0])
y = max(ai[1], bi[1])
w = min(ai[2], bi[2]) - x
h = min(ai[3], bi[3]) - y
if w < 0 or h < 0:
return 0
return w*h
def iou(a, b):
# a and b should be (x1,y1,x2,y2)
if a[0] >= a[2] or a[1] >= a[3] or b[0] >= b[2] or b[1] >= b[3]:
return 0.0
area_i = intersection(a, b)
area_u = union(a, b, area_i)
return float(area_i) / float(area_u + 1e-6)
def get_new_img_size(width, height, img_min_side=600):
if width <= height:
f = float(img_min_side) / width
resized_height = int(f * height)
resized_width = img_min_side
else:
f = float(img_min_side) / height
resized_width = int(f * width)
resized_height = img_min_side
return resized_width, resized_height
class SampleSelector:
def __init__(self, class_count):
# ignore classes that have zero samples
self.classes = [b for b in class_count.keys() if class_count[b] > 0]
self.class_cycle = itertools.cycle(self.classes)
self.curr_class = next(self.class_cycle)
def skip_sample_for_balanced_class(self, img_data):
class_in_img = False
for bbox in img_data['bboxes']:
cls_name = bbox['class']
if cls_name == self.curr_class:
class_in_img = True
self.curr_class = next(self.class_cycle)
break
if class_in_img:
return False
else:
return True
def calc_rpn(C, img_data, width, height, resized_width, resized_height, img_length_calc_function):
downscale = float(C.rpn_stride)
anchor_sizes = C.anchor_box_scales
anchor_ratios = C.anchor_box_ratios
num_anchors = len(anchor_sizes) * len(anchor_ratios)
# calculate the output map size based on the network architecture
(output_width, output_height) = img_length_calc_function(resized_width, resized_height)
n_anchratios = len(anchor_ratios)
# initialise empty output objectives
y_rpn_overlap = np.zeros((output_height, output_width, num_anchors))
y_is_box_valid = np.zeros((output_height, output_width, num_anchors))
y_rpn_regr = np.zeros((output_height, output_width, num_anchors * 4))
num_bboxes = len(img_data['bboxes'])
num_anchors_for_bbox = np.zeros(num_bboxes).astype(int)
best_anchor_for_bbox = -1* | np.ones((num_bboxes, 4)) | numpy.ones |
from . import Image
import matplotlib.pyplot as plt
import numpy as np
import re
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import SkyCoord
from .fluxes import ApertureFluxes
from . import viz
from astropy.io import fits
from .telescope import Telescope
from . import utils
from astroquery.mast import Catalogs
from astropy.wcs import WCS, utils as wcsutils
import pandas as pd
from scipy.stats import binned_statistic
from .blocks.psf import Gaussian2D, Moffat2D,cutouts
from .console_utils import INFO_LABEL
from astropy.stats import sigma_clipped_stats
from astropy.io.fits.verify import VerifyWarning
from datetime import datetime
import warnings
from .blocks.registration import distances
import requests
import shutil
from pathlib import Path
from . import twirl
import io
from .utils import fast_binning, z_scale
from .console_utils import info
warnings.simplefilter('ignore', category=VerifyWarning)
class Observation(ApertureFluxes):
"""
Class to load and analyze photometry products
Parameters
----------
photfile : str
path of the `.phot` file to load
"""
def __init__(self, photfile, ignore_time=False):
super().__init__(photfile)
utils.remove_sip(self.xarray.attrs)
self.phot = photfile
self.telescope = Telescope.from_name(self.telescope)
self.gaia_data = None
self.tic_data = None
self.wcs = WCS(utils.remove_arrays(self.xarray.attrs))
self._meridian_flip = None
has_bjd = hasattr(self.xarray, "bjd_tdb")
if has_bjd:
has_bjd = ~np.all(self.xarray.bjd_tdb.isnull().values)
if not has_bjd:
try:
self.compute_bjd()
if not ignore_time:
print(f"{INFO_LABEL} Time converted to BJD TDB")
except:
if not ignore_time:
print(f"{INFO_LABEL} Could not convert time to BJD TDB")
def _check_stack(self):
assert 'stack' in self.xarray is not None, "No stack found"
# Loaders and savers (files and data)
# ------------------------------------
def __copy__(self):
copied = Observation(self.xarray.copy(), ignore_time=True)
copied.phot = self.phot
copied.telescope = self.telescope
copied.gaia_data = self.gaia_data
copied.tic_data = self.tic_data
copied.wcs = self.wcs
return copied
def copy(self):
return self.__copy__()
def to_csv(self, destination, sep=" "):
"""Export a typical csv of the observation's data
Parameters
----------
destination : str
Path of the csv file to save
sep : str, optional
separation string within csv, by default " "
"""
df = pd.DataFrame(
{
"BJD-TDB" if self.time_format == "bjd_tdb" else "JD-UTC": self.time,
"DIFF_FLUX": self.diff_flux,
"ERROR": self.diff_error,
"dx_MOVE": self.dx,
"dy_MOVE": self.dy,
"FWHM": self.fwhm,
"FWHMx": self.fwhm,
"FWHMy": self.fwhm,
"SKYLEVEL": self.sky,
"AIRMASS": self.airmass,
"EXPOSURE": self.exptime,
}
)
df.to_csv(destination, sep=sep, index=False)
def save(self, destination=None):
"""Save current observation
Parameters
----------
destination : str, optional
path to phot file, by default None
"""
self.xarray.to_netcdf(self.phot if destination is None else destination)
info(f"saved {self.phot}")
def export_stack(self, destination, **kwargs):
"""Export stack to FITS file
Parameters
----------
destination : str
path of FITS to export
"""
header = {name: value for name, value in self.xarray.attrs.items() if name.isupper()}
data = self.stack
hdul = fits.HDUList([fits.PrimaryHDU(data=data, header=fits.Header(header))])
hdul.writeto(destination, **kwargs)
def import_stack(self, fitsfile):
"""Import FITS as stack to current obs (including WCS) - do not forget to save to keep it
Parameters
----------
fitsfile : str
path of FITS stack to import
"""
data = fits.getdata(fitsfile)
header = fits.getheader(fitsfile)
self.wcs = WCS(header)
self.xarray.attrs.update(utils.header_to_cdf4_dict(header))
self.xarray["stack"] = (('w', 'h'), data)
# Convenience
# -----------
@property
def skycoord(self):
"""astropy SkyCoord object for the target
"""
return SkyCoord(self.RA, self.DEC, frame='icrs', unit=(self.telescope.ra_unit, self.telescope.dec_unit))
@property
def simbad_url(self):
"""
[notebook feature] clickable simbad query url for specified target
"""
from IPython.core.display import display, HTML
display(HTML('<a href="{}">{}</a>'.format(self.simbad, self.simbad)))
@property
def simbad(self):
"""
simbad query url for specified target
"""
return f"http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={self.RA}+{self.DEC}&CooFrame=FK5&CooEpoch=2000&CooEqui=" \
"2000&CooDefinedFrames=none&Radius=2&Radius.unit=arcmin&submit=submit+query&CoordList="
@property
def denominator(self):
"""A conveniant name for the observation: {telescope}_{date}_{name}_{filter}
Returns
-------
[type]
[description]
"""
return f"{self.telescope.name}_{self.date}_{self.name}_{self.filter}"
@property
def meridian_flip(self):
"""Meridian flip time. Supposing EAST and WEST encode orientation
"""
if self._meridian_flip is not None:
return self._meridian_flip
else:
has_flip = hasattr(self.xarray, "flip")
if has_flip:
try:
np.all(np.isnan(self.flip))
return None
except TypeError:
pass
if has_flip:
if "WEST" in self.flip:
flip = (self.flip.copy() == "WEST").astype(int)
diffs = np.abs(np.diff(flip))
if np.any(diffs):
self._meridian_flip = self.time[np.argmax(diffs).flatten()]
else:
self._meridian_flip = None
return self._meridian_flip
else:
return None
else:
return None
# TESS specific methods
# --------------------
@property
def tic_id(self):
"""TIC id from digits found in target name
"""
try:
nb = re.findall('\d*\.?\d+', self.name)
df = pd.read_csv("https://exofop.ipac.caltech.edu/tess/download_toi?toi=%s&output=csv" % nb[0])
tic = df["TIC ID"][0]
return f"{tic}"
except KeyError:
print('TIC ID not found')
return None
@property
def gaia_from_toi(self):
"""Gaia id from TOI id if TOI is in target name
"""
if self.tic_id is not None:
tic_id = ("TIC " + self.tic_id)
catalog_data = Catalogs.query_object(tic_id, radius=.001, catalog="TIC")
return f"{catalog_data['GAIA'][0]}"
else:
return None
@property
def tfop_prefix(self):
return f"TIC{self.tic_id}_{self.date}_{self.telescope.name}_{self.filter}"
# Methods
# -------
def compute_bjd(self, version="prose"):
"""Compute BJD_tdb based on current time
Once this is done self.time is BJD tdb and time format can be checked in self.time_format. Note that half the
exposure time is added to the JD times before conversion. The precision of the returned time is not
guaranteed, especially with "prose" method (~30ms). "eastman" option accuracy is 20ms. See
http://astroutils.astronomy.ohio-state.edu/time/utc2bjd.html for more details.
Parameters
----------
version : str, optiona
- "prose": uses an astropy method
- "eastman": uses the web applet http://astroutils.astronomy.ohio-state.edu (Eastman et al. 2010) [requires
an internet connection]
by default "prose"
"""
assert self.telescope is not None
assert self.skycoord is not None
exposure_days = self.xarray.exposure.values/60/60/24
# For backward compatibility
# --------------------------
if "time_format" not in self.xarray.attrs:
self.xarray.attrs["time_format"] = "jd_utc"
self.xarray["jd_utc"] = ("time", self.time)
if "jd_utc" not in self:
self.xarray["jd_utc"] = ("time", self.jd)
self.xarray.drop("jd")
# -------------------------
if version == "prose":
time = Time(self.jd_utc + exposure_days/2, format="jd", scale="utc", location=self.telescope.earth_location).tdb
light_travel_tbd = time.light_travel_time(self.skycoord, location=self.telescope.earth_location)
bjd_time = (time + light_travel_tbd).value
elif version == "eastman":
bjd_time = utils.jd_to_bjd(self.jd_utc + exposure_days/2, self.skycoord.ra.deg, self.skycoord.dec.deg)
self.xarray = self.xarray.assign_coords(time=bjd_time)
self.xarray["bjd_tdb"] = ("time", bjd_time)
self.xarray.attrs["time_format"] = "bjd_tdb"
# Catalog queries
# ---------------
def query_gaia(self, limit=-1, cone_radius=None):
"""Query gaia catalog for stars in the field
"""
from astroquery.gaia import Gaia
Gaia.ROW_LIMIT = limit
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
gaia_query = Gaia.cone_search_async(coord, radius, verbose=False, )
self.gaia_data = gaia_query.get_results()
self.gaia_data.sort("phot_g_mean_flux", reverse=True)
delta_years = (utils.datetime_to_years(datetime.strptime(self.date, "%Y%m%d")) - \
self.gaia_data["ref_epoch"].data.data) * u.year
dra = delta_years * self.gaia_data["pmra"].to(u.deg / u.year)
ddec = delta_years * self.gaia_data["pmdec"].to(u.deg / u.year)
skycoords = SkyCoord(
ra=self.gaia_data['ra'].quantity + dra,
dec=self.gaia_data['dec'].quantity + ddec,
pm_ra_cosdec=self.gaia_data['pmra'],
pm_dec=self.gaia_data['pmdec'],
radial_velocity=self.gaia_data['radial_velocity'],
obstime=Time(2015.0, format='decimalyear'))
gaias = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs)).T
gaias[np.any(np.isnan(gaias), 1), :] = [0, 0]
self.gaia_data["x"], self.gaia_data["y"] = gaias.T
inside = np.all((np.array([0, 0]) < gaias) & (gaias < np.array(self.stack.shape)), 1)
self.gaia_data = self.gaia_data[np.argwhere(inside).squeeze()]
w, h = self.stack.shape
if np.abs(np.mean(self.gaia_data["x"])) > w or np.abs(np.mean(self.gaia_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
def query_tic(self,cone_radius=None):
"""Query TIC catalog (through MAST) for stars in the field
"""
from astroquery.mast import Catalogs
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
self.tic_data = Catalogs.query_region(coord, radius, "TIC", verbose=False)
self.tic_data.sort("Jmag")
skycoords = SkyCoord(
ra=self.tic_data['ra'],
dec=self.tic_data['dec'], unit="deg")
self.tic_data["x"], self.tic_data["y"] = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs))
w, h = self.stack.shape
if np.abs(np.mean(self.tic_data["x"])) > w or np.abs(np.mean(self.tic_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
@property
def gaia_target(self):
return None
@gaia_target.setter
def gaia_target(self, gaia_id):
"""Set target with a gaia id
Parameters
----------
gaia_id : int
gaia id
"""
if self.gaia_data is None:
self.query_gaia()
_ = self.gaia_data.to_pandas()[["source_id", "x", "y"]].to_numpy()
ids = _[:, 0]
positions = _[:, 1:3]
gaia_i = np.argmin(np.abs(gaia_id - ids))
self.target = np.argmin(np.power(positions[gaia_i, :] - self.stars[:, ::-1], 2).sum(1))
# Plot
# ----
def show(self, size=10, flip=False, zoom=False, contrast=0.05, wcs=False, cmap="Greys_r", sigclip=None,vmin=None,vmax=None):
"""Show stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
, by default False
zoom : bool, optional
whether to include a zoom inlay in the image, by default False
contrast : float, optional
contrast for the Zscale of image, by default 0.05
wcs : bool, optional
whether to show grid ans axes to world coordinate
"""
if self.target == -1:
zoom = False
self._check_stack()
fig = plt.figure(figsize=(size, size))
fig.patch.set_facecolor('white')
image = self.stack.copy()
if flip:
image = image[::-1, ::-1]
if sigclip is not None:
mean, median, std = sigma_clipped_stats(image)
image[image - median < 2 * std] = median
if wcs:
ax = plt.subplot(projection=self.wcs, label='overlays')
else:
ax = fig.add_subplot(111)
if all([vmin, vmax]) is False:
_ = ax.imshow(utils.z_scale(image,c=contrast), cmap=cmap, origin="lower")
else:
_ = ax.imshow(image, cmap=cmap, origin="lower",vmin=vmin,vmax=vmax)
if wcs:
ax.coords.grid(True, color='white', ls='solid', alpha=0.3)
ax.coords[0].set_axislabel('Galactic Longitude')
ax.coords[1].set_axislabel('Galactic Latitude')
overlay = ax.get_coords_overlay('fk5')
overlay.grid(color='white', ls='--', alpha=0.3)
overlay[0].set_axislabel('Right Ascension (J2000)')
overlay[1].set_axislabel('Declination (J2000)')
def _check_show(self, **kwargs):
axes = plt.gcf().axes
if len(axes) == 0:
self.show(**kwargs)
def show_stars(self, size=10, view=None, n=None, flip=False,
comp_color="yellow", color=[0.51, 0.86, 1.], stars=None, legend=True, **kwargs):
"""Show detected stars over stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
whether to flip image, by default False
view : str, optional
"all" to see all stars OR "reference" to have target and comparison stars hilighted, by default None
n : int, optional
max number of stars to show, by default None,
Raises
------
AssertionError
[description]
"""
self._check_show(flip=flip, size=size, **kwargs)
if stars is None:
stars = self.stars
if n is not None:
if view == "reference":
raise AssertionError("'n_stars' kwargs is incompatible with 'reference' view that will display all stars")
else:
n = len(stars)
stars = stars[0:n]
if view is None:
view = "reference" if 'comps' in self else "all"
image_size = np.array(np.shape(self.stack))[::-1]
if flip:
stars = np.array(image_size) - stars
if view == "all":
viz.plot_marks(*stars.T, np.arange(len(stars)), color=color)
if "stars" in self.xarray:
others = np.arange(n, len(self.stars))
others = np.setdiff1d(others, self.target)
viz.plot_marks(*self.stars[others].T, alpha=0.4, color=color)
elif view == "reference":
x = self.xarray.isel(apertures=self.aperture)
assert 'comps' in self, "No differential photometry"
comps = x.comps.values
others = np.setdiff1d(np.arange(len(stars)), x.comps.values)
others = np.setdiff1d(others, self.target)
_ = viz.plot_marks(*stars[self.target], self.target, color=color)
_ = viz.plot_marks(*stars[comps].T, comps, color=comp_color)
_ = viz.plot_marks(*stars[others].T, alpha=0.4, color=color)
if legend:
colors = [comp_color, color]
texts = ["Comparison stars", "Target"]
viz.circles_legend(colors, texts)
def show_gaia(self, color="yellow", alpha=1, n=None, idxs=True, limit=-1, fontsize=8, align=False):
"""Overlay Gaia objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show gaia ids, by default True
"""
self._check_show()
if self.gaia_data is None:
self.query_gaia(limit=limit)
gaias = np.vstack([self.gaia_data["x"].data, self.gaia_data["y"].data]).T
defined = ~np.any(np.isnan(gaias), 1)
gaias = gaias[defined]
labels = self.gaia_data["source_id"].data.astype(str)[defined]
if align:
X = twirl.find_transform(gaias[0:30], self.stars, n=15)
gaias = twirl.affine_transform(X)(gaias)
labels = [f"{_id[0:len(_id) // 2]}\n{_id[len(_id) // 2::]}" for _id in labels]
_ = viz.plot_marks(*gaias.T, labels if idxs else None, color=color, alpha=alpha, n=n, position="top",
fontsize=fontsize)
def show_tic(self, color="white", alpha=1, n=None, idxs=True, align=True):
"""Overlay TIC objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show TIC ids, by default True
"""
self._check_show()
if self.tic_data is None:
self.query_tic()
x = self.tic_data["x"].data
y = self.tic_data["y"].data
tics = np.vstack([x, y]).T
ID = self.tic_data["ID"].data
if align:
X = twirl.find_transform(tics[0:30], self.stars, n=15)
tics = twirl.affine_transform(X)(tics)
_ = viz.plot_marks(*tics.T, ID if idxs else None, color=color, alpha=alpha, n=n, position="top", fontsize=9, offset=10)
def show_cutout(self, star=None, size=200, marks=True,**kwargs):
"""
Show a zoomed cutout around a detected star or coordinates
Parameters
----------
star : [type], optional
detected star id or (x, y) coordinate, by default None
size : int, optional
side size of square cutout in pixel, by default 200
"""
if star is None:
x, y = self.stars[self.target]
elif isinstance(star, int):
x, y = self.stars[star]
elif isinstance(star, (tuple, list, np.ndarray)):
x, y = star
else:
raise ValueError("star type not understood")
self.show(**kwargs)
plt.xlim(np.array([-size / 2, size / 2]) + x)
plt.ylim(np.array([-size / 2, size / 2]) + y)
if marks:
idxs = np.argwhere(np.max(np.abs(self.stars - [x, y]), axis=1) < size).squeeze()
viz.plot_marks(*self.stars[idxs].T, label=idxs)
def plot_comps_lcs(self, n=15, ylim=(0.98, 1.02)):
"""Plot comparison stars light curves along target star light curve
Parameters
----------
n : int, optional
Number max of comparison to show, by default 5
ylim : tuple, optional
ylim of the plot, by default (0.98, 1.02)
"""
idxs = [self.target, *self.xarray.comps.isel(apertures=self.aperture).values[0:n]]
lcs = [self.xarray.diff_fluxes.isel(star=i, apertures=self.aperture).values for i in idxs]
if ylim is None:
ylim = (self.diff_flux.min() * 0.99, self.diff_flux.max() * 1.01)
offset = ylim[1] - ylim[0]
if len(plt.gcf().axes) == 0:
plt.figure(figsize=(5, 10))
for i, lc in enumerate(lcs):
color = "grey" if i != 0 else "black"
viz.plot(self.time, lc - i * offset, bincolor=color)
plt.annotate(idxs[i], (self.time.min() + 0.005, 1 - i * offset + offset / 3))
plt.ylim(1 - (i + 0.5) * offset, ylim[1])
plt.title("Comparison stars", loc="left")
plt.grid(color="whitesmoke")
plt.tight_layout()
def plot_psf_fit(self, size=21, cmap="inferno", c="blueviolet", model=Gaussian2D):
"""Plot a 2D gaussian fit of the global psf (extracted from stack fits)
Parameters
----------
size : int, optional
square size of extracted PSF, by default 21
cmap : str, optional
color map of psf image, by default "inferno"
c : str, optional
color of model plot line, by default "blueviolet"
model : prose.blocks, optional
a PsfFit block, by default Gaussian2D
Returns
-------
dict
PSF fit info (theta, std_x, std_y, fwhm_x, fwhm_y)
"""
psf_fit = model()
image = Image(data=self.stack, stars_coords=self.stars, header=self.xarray.attrs)
psf_fit.run(image)
if len(plt.gcf().get_axes()) == 0:
plt.figure(figsize=(12, 4))
viz.plot_marginal_model(psf_fit.epsf, psf_fit.optimized_model, cmap=cmap, c=c)
return {"theta": image.theta,
"std_x": image.psf_sigma_x,
"std_y": image.psf_sigma_y,
"fwhm_x": image.fwhmx,
"fwhm_y": image.fwhmy }
def plot_star_psf(self,star=None,cutout_size=21,print_values=True,plot=True):
if star is None:
star = self.target
cutout = cutouts(self.stack, [self.stars[star]], size=cutout_size)
psf_fit = Moffat2D(cutout_size=cutout_size)
params = ['fwhmx =', 'fwhmy =', 'theta =']
values = []
for i in range(len(params)):
if print_values is True:
print(params[i], psf_fit(cutout.data[0])[i])
values.append(psf_fit(cutout.data[0])[i])
if plot is True:
viz.plot_marginal_model(psf_fit.epsf, psf_fit.optimized_model)
return values
def plot_rms(self, bins=0.005):
"""Plot binned rms of lightcurves vs the CCD equation
Parameters
----------
bins : float, optional
bin size used to compute error, by default 0.005 (in days)
"""
self._check_diff()
viz.plot_rms(
self.diff_fluxes,
self.lcs,
bins=bins,
target=self.target["id"],
highlights=self.comparison_stars)
def plot_systematics(self, fields=None, ylim=(0.98, 1.02)):
"""Plot systematics measurements along target light curve
Parameters
----------
fields : list of str, optional
list of systematic to include (must be in self), by default None
ylim : tuple, optional
plot ylim, by default (0.98, 1.02)
"""
if fields is None:
fields = ["dx", "dy", "fwhm", "airmass", "sky"]
flux = self.diff_flux.copy()
flux /= np.nanmean(flux)
if ylim is None:
ylim = (flux.nanmin() * 0.99, flux.nanmax() * 1.01)
offset = ylim[1] - ylim[0]
if len(plt.gcf().axes) == 0:
plt.figure(figsize=(5 ,10))
viz.plot(self.time, flux, bincolor="black")
for i, field in enumerate(fields):
if field in self:
scaled_data = self.xarray[field].values.copy()
scaled_data = np.nan_to_num(scaled_data, -1)
scaled_data[scaled_data - | np.nanmean(scaled_data) | numpy.nanmean |
"""
Notes on functionals in pyquante2.
1. I would like for all operations here to be array-scale operations.
- This means e.g. np.power rather than pow
2. I would like as few memory copies as possible.
3. Need to decide how to handle redundant information, i.e.
- rhoa and rhob when non-spin-polarized
This might make tracking total density and the zeta (spin polarization)
worthwhile; the latter could just be zero (or False or None or whatever)
- return values from non-spin-polarized calculations.
"""
import numpy as np
def zero_low_density(rho,cut=1e-10):
rho[rho<cut]=0
return rho
def xs(rho,alpha=2/3.):
"Xalpha X functional. alpha is the X-alpha scaling factor"
fac=-2.25*alpha*np.power(0.75/np.pi,1./3.)
rho3 = np.power(rho,1./3.)
fx = fac*rho*rho3
dfxdna = (4./3.)*fac*rho3
return fx,dfxdna
def xb88_array(rho,gam,tol=1e-6):
# Still doesn't work
rho = zero_low_density(rho)
rho13 = np.power(rho,1./3.)
x = np.zeros(rho.shape,dtype=float)
g = np.zeros(rho.shape,dtype=float)
dg = np.zeros(rho.shape,dtype=float)
x[rho>tol] = np.sqrt(gam)/rho13/rho
g[rho>tol] = b88_g(x[rho>tol])
dg[rho>tol] = b88_dg(x[rho>tol])
dfxdrho = (4./3.)*rho13*(g-x*dg)
dfxdgam = 0.5*dg/np.sqrt(gam)
fx = rho*rho13*g
return fx,dfxdrho,dfxdgam
def xb88(rho,gam,tol=1e-10):
rho = zero_low_density(rho)
fxs = []
dfxdrhos = []
dfxdgams = []
for na,gama in zip(rho,gam):
fx = dfxdrho = dfxdgam = 0
if na > tol:
rho13 = np.power(na,1./3.)
x = np.sqrt(gama)/rho13/na
g = b88_g(x)
dg = b88_dg(x)
dfxdrho = (4./3.)*rho13*(g-x*dg)
dfxdgam = 0.5*dg/np.sqrt(gama)
fx = na*rho13*g
fxs.append(fx)
dfxdrhos.append(dfxdrho)
dfxdgams.append(dfxdgam)
return np.array(fxs),np.array(dfxdrhos),np.array(dfxdgams)
def xpbe(rho,gam,tol=1e-10):
rho = zero_low_density(rho)
fxs = []
dfxdrhos = []
dfxdgams = []
for na,gama in zip(rho,gam):
fx = dfxdrho = dfxdgam = 0
if na > tol:
kap = 0.804
mu = 0.449276922095889E-2
fx0,vx0 = xs(na)
rho13 = na**(1.0/3.0)
rho43 = rho13*na
den = 1+mu*gama/rho43/rho43
F = 1+kap-kap/den
fx = fx0*F
dFdr = -(8./3.)*kap*mu*gama/den/den*na**(-11./3.)
dfxdrho = vx0*F+fx0*dFdr
dFdg = -kap*mu/rho43/rho43/den/den
dfxdgam = fx0*dFdg
fxs.append(fx)
dfxdrhos.append(dfxdrho)
dfxdgams.append(dfxdgam)
return np.array(fxs),np.array(dfxdrhos),np.array(dfxdgams)
def cvwn5(rhoa,rhob,tol=1e-10):
rhoa = zero_low_density(rhoa)
rhob = zero_low_density(rhob)
ecs = []
vcrhoas = []
vcrhobs = []
for na,nb in zip(rhoa,rhob):
rho = na+nb
ec = vcrhoa = vcrhob = 0
if rho>tol:
zeta=(na-nb)/rho
x = pow(3./4./np.pi/rho,1/6.)
epsp = vwn_epsp(x)
epsf = vwn_epsf(x)
g = vwn_g(zeta)
eps = epsp + g*(epsf-epsp)
ec = eps*rho
depsp = vwn_depsp(x)
depsf = vwn_depsf(x)
dg = vwn_dg(zeta)
deps_dx = depsp + g*(depsf-depsp)
deps_dg = (epsf-epsp)*dg
vcrhoa = eps - (x/6.)*deps_dx + deps_dg*(1-zeta)
vcrhob = eps - (x/6.)*deps_dx - deps_dg*(1+zeta)
ecs.append(ec)
vcrhoas.append(vcrhoa)
vcrhobs.append(vcrhob)
return np.array(ecs),np.array(vcrhoas),np.array(vcrhobs)
def clyp(rhoas,rhobs,gaas,gabs,gbbs,tol=1e-10):
fcs = []
fcnas = []
fcnbs = []
fcgaas = []
fcgabs = []
fcgbbs = []
for na,nb,gaa,gab,gbb in zip(rhoas,rhobs,gaas,gabs,gbbs):
fc,fcna,fcnb,fcgaa,fcgab,fcgbb = clyp_point(na,nb,gaa,gab,gbb,tol)
fcs.append(fc)
fcnas.append(fcnbs)
fcnbs.append(fcnb)
fcgaas.append(fcgaa)
fcgabs.append(fcgab)
fcgbbs.append(fcgbb)
return np.array(fcs),np.array(fcnas),np.array(fcnbs),np.array(fcgaas),np.array(fcgabs),np.array(fcgbbs)
def clyp_point(rhoa,rhob,gamaa,gamab,gambb,tol=1e-10):
# Modified and corrected by AEM in June 2006.
a = 0.04918 # Parameters from the LYP papers
b = 0.132
c = 0.2533
d = 0.349
rho = rhoa+rhob
fc=fcrhoa=fcrhob=fcgamaa=fcgamab=fcgambb=0
assert rhoa >= 0.0
assert rhob >= 0.0
if rho > tol:
rhom3 = np.power(rho,-1./3.)
w = np.exp(-c*rhom3)/(1+d*rhom3)*np.power(rho,-11./3.)
dl = c*rhom3+d*rhom3/(1+d*rhom3)
fcgamaa = -a*b*w*((1./9.)*rhoa*rhob*(1-3*dl-(dl-11)*rhoa/rho)-rhob*rhob)
fcgamab = -a*b*w*((1./9.)*rhoa*rhob*(47-7*dl)-(4./3.)*rho*rho)
fcgambb = -a*b*w*((1./9.)*rhoa*rhob*(1-3*dl-(dl-11)*rhob/rho)-rhoa*rhoa)
fc = -4*a/(1+d*rhom3)*rhoa*rhob/rho \
-np.power(2,11./3.)*0.3*np.power(3*np.pi*np.pi,2./3.)*a*b*w \
*rhoa*rhob*(np.power(rhoa,8./3.)+np.power(rhob,8./3.)) \
+ fcgamaa*gamaa + fcgamab*gamab + fcgambb*gambb
dw = -(1./3.)*np.power(rho,-4./3.)*w*(11*np.power(rho,1./3.)-c-d/(1+d*rhom3))
ddl = (1./3.)*(d*d*np.power(rho,-5./3.)/np.power(1+d*rhom3,2)-dl/rho)
d2f_dradgaa = dw/w*fcgamaa - a*b*w*(
(1./9.)*rhob*(1-3*dl-(dl-11)*rhoa/rho)
-(1./9.)*rhoa*rhob*((3+rhoa/rho)*ddl+(dl-11)*rhob/rho/rho))
d2f_dradgbb = dw/w*fcgambb - a*b*w*(
(1./9.)*rhob*(1-3*dl-(dl-11)*rhob/rho)
-(1./9.)*rhoa*rhob*((3+rhob/rho)*ddl-(dl-11)*rhob/rho/rho)
-2*rhoa)
d2f_dradgab = dw/w*fcgamab-a*b*w*(
(1./9)*rhob*(47-7*dl)-(7./9.)*rhoa*rhob*ddl-(8./3.)*rho)
d2f_drbdgaa = dw/w*fcgamaa - a*b*w*(
(1./9.)*rhoa*(1-3*dl-(dl-11)*rhoa/rho)
-(1./9.)*rhoa*rhob*((3+rhoa/rho)*ddl-(dl-11)*rhoa/rho/rho)
-2*rhob)
d2f_drbdgbb = dw/w*fcgambb - a*b*w*(
(1./9.)*rhoa*(1-3*dl-(dl-11)*rhob/rho)
-(1./9.)*rhoa*rhob*((3+rhob/rho)*ddl+(dl-11)*rhoa/rho/rho))
d2f_drbdgab = dw/w*fcgamab-a*b*w*(
(1./9)*rhoa*(47-7*dl)-(7./9.)*rhoa*rhob*ddl-(8./3.)*rho)
fcrhoa = fcrhob = 0
if rhoa > tol:
fcrhoa = -4*a/(1+d*rhom3)*rhoa*rhob/rho*(
(1./3.)*d*np.power(rho,-4./3.)/(1+d*rhom3)+1/rhoa-1/rho)\
-np.power(2,11./3.)*0.3*np.power(3*np.pi*np.pi,2./3.)*a*b*(
dw*rhoa*rhob*(np.power(rhoa,8./3.)+np.power(rhob,8./3.))
+w*rhob*((11./3.)*np.power(rhoa,8./3.)+np.power(rhob,8./3.))) \
+d2f_dradgaa*gamaa + d2f_dradgbb*gambb + d2f_dradgab*gamab
if rhob > tol:
fcrhob = -4*a/(1+d*rhom3)*rhoa*rhob/rho*(
(1./3.)*d*np.power(rho,-4./3.)/(1+d*rhom3)+1/rhob-1/rho)\
-np.power(2,11./3.)*0.3*np.power(3*np.pi*np.pi,2./3.)*a*b*(
dw*rhoa*rhob*(np.power(rhob,8./3.)+np.power(rhoa,8./3.))
+w*rhoa*((11./3.)*np.power(rhob,8./3.)+np.power(rhoa,8./3.))) \
+d2f_drbdgaa*gamaa + d2f_drbdgbb*gambb + d2f_drbdgab*gamab
return fc,fcrhoa,fcrhob,fcgamaa,fcgamab,fcgambb
def cpbe(na,nb,ga,gab,gb):
"PBE Correlation Functional"
npts = len(na)
ec = np.zeros(npts,'d')
vca = np.zeros(npts,'d')
vcb = np.zeros(npts,'d')
vcga = np.zeros(npts,'d')
vcgab = np.zeros(npts,'d')
vcgb = np.zeros(npts,'d')
for i in range(npts):
ec[i],vca[i],vcb[i],vcga[i],vcgab[i],vcgb[i] = \
cpbe_point(na[i],nb[i],ga[i],gab[i],gb[i])
return ec,vca,vcb,vcga,vcgab,vcgb
def cpbe_point(rhoa,rhob,gama,gamb,gamab,tol=1e-10):
rho = rhoa+rhob
ec = vca = vcb = vcgama = vcgamb = vcgamab = 0
gam = 0.031091
ohm = 0.046644
bet = 0.066725
if rho > tol:
Rs = np.power(3./(4.*np.pi*rho),1./3.)
Zeta = (rhoa-rhob)/rho
Kf = np.power(3*np.pi*np.pi*rho,1./3.)
Ks = np.sqrt(4*Kf/np.pi)
Phi = 0.5*(np.power(1+Zeta,2./3.) + np.power(1-Zeta,2./3.))
Phi3 = Phi*Phi*Phi
gradrho = np.sqrt(gama+gamb+2.*gamab)
T = gradrho/(2*Phi*Ks*rho)
T2 = T*T
T4 = T2*T2
eps,vc0a,vc0b = cpbe_lsd(rhoa,rhob)
expo = (np.exp(-eps/(gam*Phi3))-1.)
A = bet/gam/expo
N = T2+A*T4
D = 1.+A*T2+A*A*T4
H = gam*Phi3*np.log(1.+(bet/gam)*N/D)
ec = rho*(eps+H)
# Derivative stuff
dZ_drhoa = (1.-Zeta)/rho
dZ_drhob = -(1.+Zeta)/rho
dPhi_dZ = np.power(1.+Zeta,-1./3.)/3.-np.power(1.-Zeta,-1./3.)/3.
dPhi_drhoa = dPhi_dZ*dZ_drhoa
dPhi_drhob = dPhi_dZ*dZ_drhob
dKs_drho = Ks/(6*rho)
dT_dPhi = -T/Phi
dT_dKs = -T/Ks
dT_drhoa = -T/rho + dT_dPhi*dPhi_drhoa + dT_dKs*dKs_drho
dT_drhob = -T/rho + dT_dPhi*dPhi_drhob + dT_dKs*dKs_drho
dA_dPhi = -A/expo*np.exp(-eps/(gam*Phi3))*(3*eps/(gam*Phi3*Phi))
dA_deps = -A/expo*np.exp(-eps/(gam*Phi3))*(-1/(gam*Phi3))
deps_drhoa = (vc0a-eps)/rho
deps_drhob = (vc0b-eps)/rho
dA_drhoa = dA_dPhi*dPhi_drhoa + dA_deps*deps_drhoa
dA_drhob = dA_dPhi*dPhi_drhob + dA_deps*deps_drhoa
dN_dT = 2*T+4*A*T2*T
dD_dT = 2*A*T + 4*A*A*T*T2
dN_dA = T4
dD_dA = T2+2*A*T4
dH_dPhi = 3*H/Phi
dH_dT = bet*Phi3/(1.+bet/gam*N/D)*(D*dN_dT-N*dD_dT)/D/D
dH_dA = bet*Phi3/(1.+bet/gam*N/D)*(D*dN_dA-N*dD_dA)/D/D
dH_drhoa = dH_dPhi*dPhi_drhoa + dH_dT*dT_drhoa + dH_dA*dA_drhoa
dH_drhob = dH_dPhi*dPhi_drhob + dH_dT*dT_drhob + dH_dA*dA_drhob
vca = vc0a + H + rho*dH_drhoa
vcb = vc0b + H + rho*dH_drhob
# Havent done the dE_dgamma derives yet
return ec,vca,vcb,vcgama,vcgamab,vcgamb
def vwn_xx(x,b,c): return x*x+b*x+c
def vwn_epsp(x): return vwn_eps(x,0.0310907,-0.10498,3.72744,12.9352)
#def vwn_epsf(x): return vwn_eps(x,0.01554535,-0.32500,7.06042,13.0045)
def vwn_epsf(x): return vwn_eps(x,0.01554535,-0.32500,7.06042,18.0578)
def vwn_eps(x,a,x0,b,c):
Q = np.sqrt(4*c-b*b)
eps = a*(np.log(x*x/vwn_xx(x,b,c))
- b*(x0/vwn_xx(x0,b,c))*np.log(np.power(x-x0,2)/vwn_xx(x,b,c))
+ (2*b/Q)*(1-(x0*(2*x0+b)/vwn_xx(x0,b,c))) * np.arctan(Q/(2*x+b)))
#eps = a*(np.log(x*x/vwn_xx(x,b,c)) + (2*b/Q)*np.arctan(Q/(2*x+b))
# - (b*x0/vwn_xx(x0,b,c))*np.log(np.power(x-x0,2)/vwn_xx(x,b,c))
# + (2*(b+2*x0)/Q)*np.arctan(Q/(2*x+b)))
return eps
def vwn_eps0(x,a,x0,b,c):
def X(x): return x*x+b*x+c
Q = np.sqrt(4*c-b*b)
eps = a*(np.log(x*x/X(x)) + (2*b/Q)*np.arctan(Q/(2*x+b))
- (b*x0/X(x0))*np.log(np.power(x-x0,2)/X(x))
+ (2*(b+2*x0)/Q)*np.arctan(Q/(2*x+b)))
return eps
def vwn_depsp(x): return vwn_deps(x,0.0310907,-0.10498,3.72744,12.9352)
#def vwn_depsf(x): return vwn_deps(x,0.01554535,-0.32500,7.06042,13.0045)
def vwn_depsf(x): return vwn_deps(x,0.01554535,-0.32500,7.06042,18.0578)
def vwn_deps(x,a,x0,b,c):
q = np.sqrt(4*c-b*b)
deps = a*(2/x - (2*x+b)/vwn_xx(x,b,c)
- 4*b/(np.power(2*x+b,2)+q*q) - (b*x0/vwn_xx(x0,b,c))
* (2/(x-x0)-(2*x+b)/vwn_xx(x,b,c)-4*(2*x0+b)/(np.power(2*x+b,2)+q*q)))
return deps
def vwn_g(z): return 1.125*(np.power(1+z,4./3.)+np.power(1-z,4./3.)-2)
def vwn_dg(z): return 1.5*(np.power(1+z,1./3.)-np.power(1-z,1./3.))
def b88_g(x,b=0.0042):
return -1.5*np.power(3./4./np.pi,1./3.)-b*x*x/(1.+6.*b*x*np.arcsinh(x))
def b88_dg(x,b=0.0042):
num = 6*b*b*x*x*(x/ | np.sqrt(x*x+1) | numpy.sqrt |
from math import radians
import pytest
from numpy import array, cos, sin, exp
from numpy.testing import assert_allclose
from touvlo.utils import (numerical_grad, g_grad, BGD, SGD, MBGD)
class TestLogisticRegression:
@pytest.fixture(scope="module")
def err(self):
return 0.0001
def test_numeric_grad_1(self, err):
def J(x):
return sum(3 * (x ** 2))
theta = array([[0], [4], [10]])
assert_allclose(array([[0], [24], [60]]),
numerical_grad(J, theta, err),
rtol=0, atol=0.001, equal_nan=False)
def test_numeric_grad_2(self, err):
def J(x):
return sum(1 / x)
theta = array([[5], [8], [20]])
assert_allclose(array([[-0.04], [-0.015625], [-0.0025]]),
numerical_grad(J, theta, err),
rtol=0, atol=0.001, equal_nan=False)
def test_numeric_grad_3(self, err):
def J(x):
return sum(cos(x))
theta = array([[radians(30)],
[radians(45)],
[radians(60)],
[radians(90)]])
assert_allclose(array([[-sin(radians(30))],
[-sin(radians(45))],
[-sin(radians(60))],
[-sin(radians(90))]]),
numerical_grad(J, theta, err),
rtol=0, atol=0.001, equal_nan=False)
def test_numeric_grad_4(self, err):
def J(x):
return sum( | exp(x) | numpy.exp |
# -*- coding: utf-8 -*-
"""GEModelClass
Solves an Aiygari model
"""
##############
# 1. imports #
##############
import time
import numpy as np
from numba import njit, prange
# consav
from consav import ModelClass, jit # baseline model class and jit
from consav import linear_interp # linear interpolation
from consav.grids import equilogspace # grids
from consav.markov import log_rouwenhorst # markov processes
from consav.misc import elapsed
############
# 2. model #
############
class GEModelClass(ModelClass):
#########
# setup #
#########
def settings(self):
""" fundamental settings """
# for safe type inference
self.not_floats = ['Ne','Na','max_iter_solve','max_iter_simulate','path_T']
def setup(self):
""" set baseline parameters """
par = self.par
# a. steady state values
par.r_ss = np.nan
par.w_ss = np.nan
par.K_ss = np.nan
par.Y_ss = np.nan
par.C_ss = np.nan
par.kd_ss = np.nan
par.ks_ss = np.nan
# b. preferences
par.sigma = 1.0 # CRRA coefficient
par.beta = 0.982 # discount factor
# c. production
par.Z = 1.0 # technology level in steady state
par.Z_sigma = 0.01 # shock
par.Z_rho = 0.90 # persistence
par.alpha = 0.11 # Cobb-Douglas coefficient
par.delta = 0.025 # depreciation rate
# d. income parameters
par.rho = 0.966 # AR(1) parameter
par.sigma_e = 0.10 # std. of persistent shock
par.Ne = 7 # number of states
# e. grids
par.a_max = 200.0 # maximum point in grid for a
par.Na = 500 # number of grid points
# f. misc.
par.path_T = 500 # length of path
par.max_iter_solve = 5000 # maximum number of iterations when solving
par.max_iter_simulate = 5000 # maximum number of iterations when simulating
par.solve_tol = 1e-10 # tolerance when solving
par.simulate_tol = 1e-10 # tolerance when simulating
def allocate(self):
""" allocate model, i.e. create grids and allocate solution and simluation arrays """
par = self.par
sol = self.sol
sim = self.sim
# a. grids
par.a_grid = np.zeros(par.Na)
par.e_grid = np.zeros(par.Ne)
par.e_trans = np.zeros((par.Ne,par.Ne))
par.e_ergodic = np.zeros(par.Ne)
par.e_trans_cumsum = np.zeros((par.Ne,par.Ne))
par.e_ergodic_cumsum = np.zeros(par.Ne)
self.create_grids()
# b. solution
sol_shape = (par.Ne,par.Na)
sol.a = np.zeros(sol_shape)
sol.m = np.zeros(sol_shape)
sol.c = np.zeros(sol_shape)
sol.Va = np.zeros(sol_shape)
sol.i = np.zeros(sol_shape,dtype=np.int_)
sol.w = np.zeros(sol_shape)
# path
path_sol_shape = (par.path_T,par.Ne,par.Na)
sol.path_a = np.zeros(path_sol_shape)
sol.path_m = np.zeros(path_sol_shape)
sol.path_c = np.zeros(path_sol_shape)
sol.path_Va = np.zeros(path_sol_shape)
sol.path_i = np.zeros(path_sol_shape,dtype=np.int_)
sol.path_w = np.zeros(path_sol_shape)
# c. simulation
sim_shape = sol_shape
sim.D = np.zeros(sim_shape)
# path
path_sim_shape = path_sol_shape
sim.path_D = np.zeros(path_sim_shape)
sim.path_K = np.zeros(par.path_T)
sim.path_C = np.zeros(par.path_T)
sim.path_Klag = np.zeros(par.path_T)
# jacobians
jac_shape = (par.path_T,par.path_T)
sol.jac_K = np.zeros(jac_shape)
sol.jac_C = np.zeros(jac_shape)
sol.jac_curlyK_r = np.zeros(jac_shape)
sol.jac_curlyK_w = np.zeros(jac_shape)
sol.jac_C_r = np.zeros(jac_shape)
sol.jac_C_w = np.zeros(jac_shape)
sol.jac_r_K = np.zeros(jac_shape)
sol.jac_w_K = np.zeros(jac_shape)
sol.jac_r_Z = np.zeros(jac_shape)
sol.jac_w_Z = np.zeros(jac_shape)
sol.H_K = np.zeros(jac_shape)
sol.H_Z = np.zeros(jac_shape)
sol.G = np.zeros(jac_shape)
def create_grids(self):
""" construct grids for states and shocks """
par = self.par
# a. assets
par.a_grid[:] = equilogspace(0,par.a_max,par.Na)
# b. productivity
e_objs = log_rouwenhorst(par.rho,par.sigma_e,par.Ne)
par.e_grid[:] = e_objs[0]
par.e_trans[:,:] = e_objs[1]
par.e_ergodic[:] = e_objs[2]
par.e_trans_cumsum[:,:] = e_objs[3]
par.e_ergodic_cumsum[:] = e_objs[4]
#########
# solve #
#########
def get_path_Z(self):
""" calculate Z path """
par = self.par
path_Z = np.ones(par.path_T)
path_Z[0] = par.Z*(1+par.Z_sigma)
for t in range(1,par.path_T):
path_Z[t] = (1-par.Z_rho)*par.Z + par.Z_rho*path_Z[t-1]
return path_Z
def implied_r(self,k,Z):
""" implied r given k = K/L and optimal firm behavior """
par = self.par
r = Z*par.alpha*k**(par.alpha-1)-par.delta
return r
def implied_w(self,r,Z):
""" implied w given r and optimal firm behavior """
par = self.par
w = Z*(1.0-par.alpha)*((r+par.delta)/(Z*par.alpha))**(par.alpha/(par.alpha-1))
return w
def firm_demand(self,r,Z):
""" firm demand for k = K/L given r and optimal firm behavior """
par = self.par
k = ((r+par.delta)/(Z*par.alpha))**(1/(par.alpha-1))
return k
def firm_production(self,k,Z):
""" firm production """
par = self.par
return Z*k**par.alpha
def steady_state(self,do_print=True):
""" computate steady state statistics """
par = self.par
sol = self.sol
sim = self.sim
# a. firm
par.w_ss = self.implied_w(par.r_ss,par.Z)
par.kd_ss = self.firm_demand(par.r_ss,par.Z)
par.Y_ss = self.firm_production(par.kd_ss,par.Z)
# b. solve household problem
self.solve_household_ss(par.r_ss,do_print=do_print)
self.simulate_household_ss(do_print=do_print)
# implied supply of capital and consumption
par.ks_ss = np.sum(sim.D*sol.a)
par.C_ss = | np.sum(sim.D*sol.c) | numpy.sum |
import numpy as np
import torch.nn as nn
import torch
from torch.nn import functional as F
from scipy.spatial.transform import Rotation as R
from scipy.ndimage import gaussian_filter
import scipy.io as sio
def OnehotEncoding(arr, val, c):
# val = np.repeat(np.array(val).reshape(2, 2).T, 3).reshape(-1, 1)
ind = (arr - val[0]) // ((val[1] - val[0]) / (c - 1))
ind = ind.type(dtype=torch.long)
out = torch.zeros((c, 1))
out[ind, :] = 1
return out
def OnehotDecoding(arr, val, c):
# val = np.repeat(np.array(val).reshape(2, 2).T, 3).reshape(-1, 6)
out = (arr * ((val[1] - val[0]) / (c - 1))) + val[0]
return out
def ReadText(vis):
testwindow = vis.text("Hello World!")
return 0
def PlotImage(vis, img, win, env, title=""):
# img = img.detach.cpu().numpy()
win = vis.images(img, win=win, opts=dict(title=title), env=env)
return win
def PlotLoss(vis, x, y, win, env, legend, title=""):
if win == None:
win = vis.line(Y=y, X=x, win=win, opts=dict(title=title, legend=legend, showlegend=True), env=env)
else:
win = vis.line(Y=y, X=x, win=win, opts=dict(title=title, legend=legend, showlegend=True), env=env,
update='append')
# win = vis.line(Y=y, X=x, win=win, opts=dict(title=title, legend=['Train', 'Validation'], showlegend=True), update='append')
return win
def crop_image(image, label, j):
sz = image.size()
x = [x for x in range(sz[2] //128)]
y = [y for y in range(sz[3] //128)]
x = np.repeat(np.tile(x, (1, sz[2] //128)).reshape((-1)), image.size()[-1]//32 + 1)
y = np.repeat(y, sz[3] //128 * (image.size()[-1]//32 + 1))
z = [z for z in range(image.size()[-1]//32 + 1)]
z = np.tile(z, (1, sz[2] //128 * sz[3] //128)).reshape((-1))
if j % (image.size()[-1]//32 + 1) == image.size()[-1]//32:
img = image[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, -32:]
lb = label[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, -32:]
else:
img = image[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, z[j] * 32:(z[j] + 1) * 32]
lb = label[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, z[j] * 32:(z[j] + 1) * 32]
return img, lb
def normalization(input):
min = input.min()
input = input - min
max = input.max()
output = input / max
return output
def standardization(input):
mean = input.mean()
std = torch.std(input)
input = input - mean
output = input/std
return output
def CE(output, target, weights):
nll = nn.NLLLoss(weight=torch.Tensor([1, 7500]).float())
return nll(output, target)
def dice_loss(true, logits, eps=1e-7):
"""Computes the SørensenDice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the SørensenDice loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[torch.tensor(true.squeeze(1), dtype=torch.long)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return (1 - dice_loss)
def cartesian_product(*arrays):
la = len(arrays)
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[..., i] = a
return arr.reshape(-1, la)
# intersection function
def isect_line_plane_v3(p0, p1, p_co, p_no, epsilon=1e-6):
"""
p0, p1: Define the line.
p_co, p_no: define the plane:
p_co Is a point on the plane (plane coordinate).
p_no Is a normal vector defining the plane direction;
(does not need to be normalized).
Return a Vector or None (when the intersection can't be found).
"""
# # Test
# p0 = torch.tensor([[0, 0, 0], [0, 0, 0]], dtype=torch.float32).view(2, 3).T
# p1 = torch.tensor([[0, 0, 1], [1, 2, 3]], dtype=torch.float32).view(2, 3).T
#
# p_co = torch.tensor([20, 10, 30], dtype=torch.float32).view(3, 1)
# p_no = torch.tensor([0, 0, 10], dtype=torch.float32).view(3, 1)
# Normalize the normal vector of the plane
n = torch.norm(p_no, dim=0)
p_no = p_no / n
# Normalize the direction vector of the line and calculate degree between the normal vector and the direction vector
u = p1 - p0
n = torch.norm(u, dim=0)
u = u / n
dot = torch.mm(u.T, p_no)
# idx = np.where(abs(dot.cpu()) > torch.tensor(epsilon))[0]
# p0 = p0[:, idx]
# p1 = p1[:, idx]
# u = p1 - p0
# n = torch.norm(u, dim=0)
# u = u / n
# dot = torch.mm(u.T, p_no)
# The factor of the point between p0 -> p1 (0 - 1)
# if 'fac' is between (0 - 1) the point intersects with the segment.
# Otherwise:
# < 0.0: behind p0.
# > 1.0: infront of p1.
w = p0 - p_co
fac = -torch.mm(w.T, p_no) / dot
u = u * fac.T
vec = p0 + u
# tt = vec.cpu().numpy()
return vec
# ----------------------
# generic math functions
def dot_v3v3(v0, v1):
return (
(v0[:, 0] * v1[:, 0]) +
(v0[:, 1] * v1[:, 1]) +
(v0[:, 2] * v1[:, 2])
)
def len_squared_v3(v0):
return dot_v3v3(v0, v0)
def mul_v3_fl(v0, f):
return (
v0[0] * f,
v0[1] * f,
v0[2] * f,
)
def create_ranges_nd(start, stop, N, endpoint=True):
if endpoint==1:
divisor = N-1
else:
divisor = N
steps = (1.0/divisor) * (stop - start)
return start[...,None] + steps[...,None]*np.arange(N)
def DRR_generation(CT, R_pred, num, proj_pix):
"""
:param CT:
:param R_pred:
:param num:
:param R_:
:return:
"""
ct_pix = [512, 512]
min_v = torch.tensor(np.array([-(ct_pix[0]-1)/2, -(ct_pix[1]-1)/2, -(CT.size(1)-1)/2]), dtype=torch.float32).cuda(1)
max_v = torch.tensor(np.array([(ct_pix[0]-1)/2, (ct_pix[1]-1)/2, (CT.size(1)-1)/2]), dtype=torch.float32).cuda(1)
# Camera matrix
R_pred = R_pred.cpu().detach().numpy()
# R_pred = np.array([[15, -15, 0, 0, 0, 0]], dtype=np.float32)
# R_pred = R_.cpu().numpy()
Rx = R.from_euler('x', -R_pred[:, 0], degrees=True)
Ry = R.from_euler('y', -R_pred[:, 1], degrees=True)
Rz = R.from_euler('z', -R_pred[:, 2], degrees=True)
r = Rx * Ry * Rz
O = torch.tensor([0, 0, -160], dtype=torch.float32).view(3, 1, 1).cuda(1)
t = -O - torch.tensor(np.array([[R_pred[:, 3]], [R_pred[:, 4]], [R_pred[:, 5]]])).cuda(1)
# t = (t - (min_v.reshape(3, 1, 1) + max_v.reshape(3, 1, 1))/2) / ((max_v.reshape(3, 1, 1) - min_v.reshape(3, 1, 1))/2)
f = 256
n = 200
K = torch.tensor([[f, 0, proj_pix[0]/2], [0, f, proj_pix[1]/2], [0, 0, 1]], dtype=torch.float32).cuda(1)
rot = torch.tensor(r.as_dcm(), dtype=torch.float32).cuda(1)
## For visualization (1)
# s_min, s_max = 0, 200
# ss = 1
# img_pts = np.array([np.mgrid[1:proj_pix[1]+1, 1:proj_pix[0]+1].T.reshape(-1, 2)] * int(((s_max-s_min)/ss)))
# img_pts = torch.tensor(img_pts, dtype=torch.float32).view((-1, 2))
# s = torch.tensor(np.mgrid[s_min:s_max:ss].repeat(proj_pix[0] * proj_pix[1]), dtype=torch.float32)
# s = s.view((-1, 1))
# img_pts = torch.cat([img_pts*s, s], dim=-1).numpy()
# img_pts = img_pts.reshape((int((s_max - s_min) / ss), proj_pix[0], proj_pix[1], 3)).transpose((3, 0, 1, 2)).reshape(
# 3, -1, 1)
# img_pts = torch.tensor(np.tile(img_pts, (1, 1, num)).transpose((2, 0, 1))).cuda(1)
# backp = torch.matmul(torch.matmul(torch.inverse(rot), torch.inverse(K)),
# img_pts - torch.matmul(K, t.view((3, num))).T.reshape((num, 3, 1)))
# backp = backp.view((num, 3, int((s_max - s_min) / ss), -1)).permute((0, 3, 2, 1)) # num, -1, 200, 3
## Original Code (2)
img_pts = np.array([np.mgrid[1:proj_pix[1] + 1, 1:proj_pix[0] + 1].T.reshape(-1, 2)] * 2)
img_pts = torch.tensor(img_pts, dtype=torch.float32).view((-1, 2))
s = torch.tensor(np.mgrid[0:2:1].repeat(proj_pix[0] * proj_pix[1]), dtype=torch.float32)
s = s.view((-1, 1))
img_pts = torch.cat([img_pts*s, s], dim=-1).numpy()
img_pts = img_pts.reshape((2, proj_pix[0], proj_pix[1], 3)).transpose((3, 0, 1, 2)).reshape(3, -1, 1)
img_pts = torch.tensor( | np.tile(img_pts, (1, 1, num)) | numpy.tile |
# -*- coding: utf-8 -*-
"""
CESSIPy: Civil Engineer Stochastic System Identification for Python
Author: <NAME>
Support email: <EMAIL>
Site: https://github.com/MatheusCarini/CESSIPy
MIT License
Federal University of Rio Grande do Sul, Porto Alegre, Brazil
Version: 1.1
Date: 20211012
"""
#=============================================================================
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from MRPy import MRPy
from scipy import signal
from scipy.optimize import curve_fit
from matplotlib.gridspec import GridSpec
plt.rcParams["font.family"] = "Times New Roman"
mpl.rcParams['mathtext.fontset'] = 'cm'
#=============================================================================
# Naked Class
#=============================================================================
class auxclass(np.ndarray):
"""
Create a simple class to improve code readability
"""
def __new__(cls, np_array):
return np.asarray(np_array).view(cls)
#=============================================================================
# Time-Domain
#=============================================================================
def rearrange_data(self,ref):
"""
Rearrange the l outputs by positioning the r reference outputs in the first
rows.
Parameters
-------
self : MRPy_like
Time data MRPy object.
ref: tupple, list
List of reference sensors.
Returns
-------
yk : MRPy_like
MRPy object that contains the reference outputs in the first rows and
the attributes r and l.
.. l : MRPy attribute
Number of outputs.
.. r : MRPy attribute
Number of reference outputs.
"""
r = len(ref)
l = self.shape[0]
yk = MRPy(np.empty((l,self.N)),fs=self.fs)
yk.r = r
yk.l = l
yk[:r,:] = self[ref,:]
yk[r:,:] = np.delete(self, ref, 0)
return yk
#-----------------------------------------------------------------------------
def Toeplitz(self, i):
"""
Create the block Toeplitz matriz, which gathers the output covariances
estimates up to 2*i-1 time lags.
Parameters
-------
self : MRPy_like
MRPy object that contains the time data and the attributes r and l.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
Returns
-------
T : auxclass_like
Auxclass object that contains the block Toeplitz matrix and the
attributes r, l and i.
"""
N = self.N - 2*i + 1
r = self.r
l = self.l
Ypref = np.zeros((r*i,N))
Yf = np.zeros((l*i,N))
for k in range(i):
Ypref[k*r:k*r+r,:] = self[:r,k:k+N]
Yf [k*l:k*l+l,:] = self[: ,k+i:k+i+N]
Ypref = Ypref/N**0.5
Yf = Yf /N**0.5
T = auxclass(Yf @ Ypref.T)
T.fs, T.r, T.l, T.i = self.fs, r, l, i
return T
#-----------------------------------------------------------------------------
def SSI_COV(T, no):
"""
Covariance-Driven Stochastic Subspace Identification Method
Estimate the eigenfrequencies, damping ratios and mode shapes of the block
Toeplitz matrix.
Parameters
-------
T : auxclass_like
Auxclass object that contains the block Toeplitz matrix and the
attributes SVD, r, l and i.
no : int
State space model order.
Returns
-------
fn : ndarray
Eigenfrequencies array.
zt : ndarray
Damping ratios array.
V : ndarray
Mode shapes array as columns.
See also
-------
Toeplitz, SSI_COV_iterator
"""
l = T.l
i = T.i
U, S, VT = T.SVD
U1 = U[:,:no]
S1 = np.eye(no)*S[:no]
Oi = U1 @ S1**0.5
C = Oi[:l,:]
A = np.linalg.pinv(Oi[:l*(i-1),:]) @ Oi[l:l*i+1,:]
Λd, Ψ = np.linalg.eig(A)
λ = np.log(Λd)*T.fs
fn = np.abs(λ)/(2*np.pi)
zt = -np.real(λ)/np.abs(λ)
V = C @ Ψ
return fn, zt, V
#-----------------------------------------------------------------------------
def SSI_COV_iterator(yk, i, nmin, nmax, incr=2, plot=False):
"""
Iterate the SSI_COV function for model orders from nmin to nmax and step
equal incr.
Estimate the eigenfrequencies, damping ratios and mode shapes using
SSI COV algorithm for increasing state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph of the Toeplitz matrix.
Default is false.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
"""
T = Toeplitz(yk, i)
T.method = 'SSI COV'
if plot: plot_singular_values(T)
T.SVD = np.linalg.svd(T)
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax))
ZT = np.zeros((n.shape[0],nmax))
VV = np.zeros((n.shape[0],T.l,nmax),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
FN[ii,:no], ZT[ii,:no], VV[ii,:,:no] = SSI_COV(T,no)
return FN, ZT, VV
#-----------------------------------------------------------------------------
def projection(yk, i):
"""
Compute the QR factorization of the Hankel matrix and calculate the
matrices Piref, Pi1ref and Yii.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
Returns
-------
Pi : auxclass_like
Auxclass object that contains the projection of the row space of the
future outputs into the rows space of the past reference outputs and
the attributes r, l and i.
Pi1 : array_like
Projection array changing the separation between past and future
outputs one row below.
Yii : array_like
Subset of the block Hankel matrix.
"""
N = yk.N - 2*i + 1
r = yk.r
l = yk.l
Ypref = np.zeros((r*i,N))
Yf = np.zeros((l*i,N))
for k in range(i):
Ypref[k*r:k*r+r,:] = yk[:r,k:k+N]
Yf [k*l:k*l+l,:] = yk[: ,k+i:k+i+N]
Ypref = Ypref/N**0.5
Yf = Yf /N**0.5
Href = np.vstack([Ypref,Yf])
R = np.linalg.qr(Href.T, mode='r').T
Pi = auxclass(R[r*i:,:r*i] @ np.eye(r*i,N))
Pi1 = R[r*i+l:,:r*i+r] @ np.eye(r*i+r,N)
Yii = R[r*i:r*i+l,:r*i+l] @ np.eye(r*i+l,N)
Pi.fs, Pi.r, Pi.l, Pi.i = yk.fs, r, l, i
return Pi, Pi1, Yii
#-----------------------------------------------------------------------------
def SSI_DATA(Pi, Pi1, Yii, no):
"""
Data-Driven Stochastic Subspace Identification Method
Estimate the eigenfrequencies, damping ratios and mode shapes of the
Piref, Pi1ref e Yii matrices.
Parameters
-------
Pi, Pi1, Yii
See projection.
no : int
State space model order.
Returns
-------
fn : ndarray
Eigenfrequencies array.
zt : ndarray
Damping ratios array.
V : ndarray
Mode shapes array as columns.
"""
U, S, VT = Pi.SVD
U1 = U[:,:no]
S1 = np.eye(no)*S[:no]
Oi = U1 @ S1**0.5
Oi1 = Oi[:-Pi.l,:]
Xi = np.linalg.pinv(Oi) @ Pi
Xi1 = np.linalg.pinv(Oi1) @ Pi1
AC = np.vstack([Xi1,Yii]) @ np.linalg.pinv(Xi)
A = AC[:no,:]
C = AC[no:,:]
Λd, Ψ = np.linalg.eig(A)
λ = np.log(Λd)*Pi.fs
fn = np.abs(λ)/(2*np.pi)
zt = -np.real(λ)/np.abs(λ)
V = C @ Ψ
return fn, zt, V
#-----------------------------------------------------------------------------
def SSI_DATA_iterator(yk, i, nmin, nmax, incr=2, plot=False):
"""
Iterate the SSI_DATA function for model orders from nmin to nmax and step
equal incr.
Estimate the eigenfrequencies, damping ratios and mode shapes using
SSI DATA algorithm for increasing state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph of the Pi matrix.
Default is false.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
"""
Pi, Pi1, Yii = projection(yk, i)
Pi.method = 'SSI DATA'
if plot: plot_singular_values(Pi)
Pi.SVD = np.linalg.svd(Pi)
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax))
ZT = np.zeros((n.shape[0],nmax))
VV = np.zeros((n.shape[0],Pi.l,nmax),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
FN[ii,:no],ZT[ii,:no],VV[ii,:,:no] = SSI_DATA(Pi,Pi1,Yii,no)
return FN, ZT, VV
#-----------------------------------------------------------------------------
def Fast_SSI(yk, i, nmin, nmax, incr=2, plot=False, based='COV'):
"""
Estimate the eigenfrequencies, damping ratios and mode shapes using Fast
Subspace-Based System Identification algorithm 2 from [1] for increasing
state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph. Default is false.
based : string, optinal
SSI based method. If 'COV', it uses the covariance-driven SSI. If
'DATA', it uses the data-driven SSI. Default is 'COV'.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
Reference
----------
.. [1] <NAME>; <NAME>. Fast Multi-Order Computation of System
Matrices in Subspace-Based System Identification. Control
Engineering Practice, Elsevier, 2012, 20 (9), pp.882-894.
10.1016/j.conengprac.2012.05.005. hal-00724068
"""
if based.lower() == 'cov':
T = Toeplitz(yk, i)
T.method = 'SSI COV'
if plot: plot_singular_values(T)
U, S, VT = np.linalg.svd(T)
U1 = U[:,:nmax]
S1 = np.eye(nmax)*S[:nmax]
Oi = U1 @ S1**0.5
elif based.lower() == 'data':
Pi, Pi1, Yii = projection(yk, i)
Pi.method = 'SSI DATA'
if plot: plot_singular_values(Pi)
U, S, VT = np.linalg.svd(Pi)
U1 = U[:,:nmax]
S1 = np.eye(nmax)*S[:nmax]
Oi = U1 @ S1**0.5
else:
sys.exit('based method must be COV or DATA')
l = yk.l
Oiu = Oi[:l*(i-1),:]
Oid = Oi[l:l*i+1 ,:]
C = Oi[:l,:]
Q, R = np.linalg.qr(Oiu)
St = Q.T @ Oid
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax))
ZT = np.zeros((n.shape[0],nmax))
VV = np.zeros((n.shape[0],l,nmax),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
A = np.linalg.inv(R[:no,:no]) @ St[:no,:no]
Cj = C[:,:no]
Λd, Ψ = np.linalg.eig(A)
λ = np.log(Λd)*yk.fs
FN[ii,:no] = np.abs(λ)/(2*np.pi)
ZT[ii,:no] = -np.real(λ)/np.abs(λ)
VV[ii,:,:no] = Cj @ Ψ
return FN, ZT, VV
#-----------------------------------------------------------------------------
def IV(T, no):
"""
Instrumental Variable Method
Estimate the eigenfrequencies, damping ratios and mode shapes of the block
Toeplitz matrix.
Parameters
-------
T : auxclass_like
Auxclass object that contains the block Toeplitz matrix and the
attributes SVD, r, l and i.
no : int
State space model order.
Returns
-------
fn : ndarray
Eigenfrequencies array.
zt : ndarray
Damping ratios array.
V : ndarray
Mode shapes array as columns.
See also
-------
Toeplitz
"""
r = T.r
l = T.l
αb = np.linalg.lstsq(T[:,-no*r:],
-T[:,-(no+1)*r:-no*r], rcond=None)[0]
Apcomp = np.zeros((no*r,no*r))
Apcomp[:-r,r:] += np.eye((no-1)*r)
for kk in range(no):
Apcomp[-r:,r*kk:r*(kk+1)] -= αb.T[:,r*(no-kk)-r:r*(no-kk)]
Λd, Ψ = np.linalg.eig(Apcomp)
λ = np.log(Λd)*T.fs
fn = np.abs(λ)/(2*np.pi)
zt = -np.real(λ)/np.abs(λ)
Gmref = (Ψ[:r,:]).T
Γmref = np.zeros((no*r,no*r),dtype=np.complex_)
for ii in range(no):
Γmref[:,ii*r:(ii+1)*r] = np.diag(Λd**(no-ii-1)) @ Gmref
V = T[:l,-no*r:] @ np.linalg.inv(Γmref)
return fn, zt, V
#-----------------------------------------------------------------------------
def IV_iterator(yk, i, nmin, nmax, incr=2, plot=False):
"""
Iterate the IV function for model orders from nmin to nmax and step equal
incr.
Estimate the eigenfrequencies, damping ratios and mode shapes using IV
algorithm for increasing state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph of the Toeplitz matrix.
Default is false.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The relation between ARMA order p and state space order n is n = p * r.
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
"""
T = Toeplitz(yk,i)
T.method = 'IV'
if plot: plot_singular_values(T)
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax*T.r))
ZT = np.zeros((n.shape[0],nmax*T.r))
VV = np.zeros((n.shape[0],T.l,nmax*T.r),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
FN[ii,:no*T.r], ZT[ii,:no*T.r], VV[ii,:,:no*T.r] = IV(T,no)
return FN, ZT, VV
#-----------------------------------------------------------------------------
def stabilization_diagram(FN, ZT, VV, title,
tol = np.array(([0.01,0, 100],
[0.05,0,0.05],
[0.10,0, 1])), plot=True):
"""
Compute the stable poles and plot the stabilization diagram
Parameters
-------
FN, ZT, VV
Modal parameters returned by SSI_COV_Iterator, SSI_DATA_Iterator and
IV_Iterator functions.
title : str
Graph title.
tol : ndarray, optional
Array of stabilization criteria.
Rows: frequencies, damping ratios and MAC values respectively.
Columns: percentage tolerance, minimum and maximum values respectively.
Default is:
[0.01,0,100 ] Δf = 1%; fmin = 0 Hz; fmax = 100 Hz
[0.05,0,0.05] Δζ = 5%; ζmin = 0%; ζmax = 5%
[0.10,0,1 ] MAC >= (1 - 0.10) = 0.90
plot : bool, optional
If true, plots the stabilization diagram. Default is false.
Returns
-------
stb : array_like
Boolean array that contains True for stable poles. Each row originates
from the same state space model.
Notes
-------
First stb index refers to model order. For example, the last stable poles
row stb[-1,:] originates from nmax model order.
"""
nmin = np.count_nonzero(FN, axis=1)[0]
nmax = np.count_nonzero(FN, axis=1)[-1]
incr = (nmax-nmin)//(FN.shape[0]-1)
n = np.arange(nmin,nmax+incr,incr)
stb = np.full(FN.shape, False)
stbf = np.full(FN.shape, False)
stbz = np.full(FN.shape, False)
for ii in range(1,n.shape[0]):
no = n[ii]; ia = ii - 1
na = n[ia]
# Frequencies
b1 = (FN[ii,:no] >= tol[0,1]) & (FN[ii,:no] <= tol[0,2])
dif = FN[ia,:na] - FN[ii,:no].reshape(-1,1)
ind = np.abs(dif).argmin(axis=1)
res = np.diagonal(dif[:,ind])
b1 = (np.abs(res/FN[ii,:no]) < tol[0,0]) & b1
# Damping ratios
b2 = (ZT[ii,:no] >= tol[1,1]) & (ZT[ii,:no] <= tol[1,2])
dif = ZT[ia,:na] - ZT[ii,:no].reshape(-1,1)
res = np.diagonal(dif[:,ind])
b2 = (np.abs(res/ZT[ii,:no]) < tol[1,0]) & b2 & b1
# MAC
MCv = MAC(VV[ia,:,:na],VV[ii,:,:no])
res = np.abs(np.diag(MCv[ind,:]))
b3 = (res > 1 - tol[2,0]) & b2
stbf[ii,:no] = b1
stbz[ii,:no] = b2
stb [ii,:no] = b3
if plot:
a_for = {'fontname':'Times New Roman','size':16}
l_for = {'fontname':'Times New Roman','size':14}
t_for = {'fontname':'Times New Roman','size':12}
g_for = {'family' :'Times New Roman','size':12}
plt.figure(figsize=(10,5))
for ii in range(n.shape[0]):
yi = n[ii]*np.ones(n[ii])
ko = plt.scatter(FN[ii,:n[ii]],yi,s=2,c='k')
go = plt.scatter(FN[ii,:n[ii]][stbf[ii,:n[ii]]],
yi[stbf[ii,:n[ii]]],s=4,c='g')
bo = plt.scatter(FN[ii,:n[ii]][stbz[ii,:n[ii]]],
yi[stbz[ii,:n[ii]]],s=4,c='b')
ro = plt.scatter(FN[ii,:n[ii]][stb [ii,:n[ii]]],
yi[stb [ii,:n[ii]]],s=8,c='r')
plt.xlim((0,tol[0,2]))
plt.ylim((0,n[-1]))
plt.xticks(**t_for)
plt.yticks(n,**t_for)
plt.xlabel('f (Hz)',**l_for)
plt.ylabel('Model Order',**l_for)
plt.suptitle(title + ' Stabilization Diagram',**a_for)
plt.legend([ko, go, bo, ro],
["New pole",
"Stable frequency",
"Stable frequency and damping",
"Stable frequency, damping and mode shape"], prop=g_for)
plt.tight_layout(rect=[0, 0, 1, 0.97])
return stb
#-----------------------------------------------------------------------------
def stable_modes(FN, ZT, V, stb, tol=0.01, spo=6):
"""
Gather close stable poles into the same mode.
Parameters
-------
FN, ZT, V
Modal parameters returned by SSI_COV_Iterator, SSI_DATA_Iterator and
IV_Iterator functions.
stb : array_like
Boolean array returned by stabilization_diagram function.
tol : float
Frequency tolerance. Close poles are gathered into a single mode.
Default is 0.01 = 1%.
spo : int
Minimum number of stable poles in order to assign the mode as stable.
Default is 6.
Returns
-------
fn : ndarray
Eigenfrequencies array.
zt : ndarray
Damping ratios array.
v : ndarray
Mode shapes array as columns.
Notes
-------
The same modal model is represented by two stable poles.
"""
FN = FN[stb]
ZT = ZT[stb]
VV = V[0,:,stb[0]].T
for j in range(stb.shape[0]):
VV = np.hstack((VV,V[j,:,stb[j]].T))
fsi = np.argsort(FN)
FNs, ZTs, VVs = FN[fsi], ZT[fsi], VV[:,fsi]
fn, zt, v = [], [], V[0,:,stb[0]].T
k = 0
for i in range(len(FN)):
b0 = (FNs > (1-tol)*FNs[k]) & (FNs < (1+tol)*FNs[k])
if b0.sum() >= spo:
fn = np.append(fn,(FNs[b0]).mean())
zt = np.append(zt,(ZTs[b0]).mean())
mv = np.argmax(np.abs(VVs[:,b0]),axis=0)[0]
nv = np.mean(VVs[:,b0]/VVs[mv,b0],axis=1).reshape(-1,1)
v = np.hstack((v,nv))
k += b0.sum()
if k > len(FN)-1: break
return fn, zt, v
#-----------------------------------------------------------------------------
def plot_singular_values(T, figsize=(14, 4), nmx=40):
"""
Compute and plot the singular values.
Parameters
-------
T : auxclass_like
Auxclass object that contains the matrix and the attribute method.
figsize : tuple, optional
Graph size. Default is (14,4).
nmx : int, optional
Number of singular values displayed in the graph.
"""
a_for = {'size':16}
l_for = {'size':16}
S = np.linalg.svd(T, compute_uv=False)[:nmx]
idx = np.argmin(S[1:]/S[:-1])
fig, ax = plt.subplots(1, 3,figsize=figsize)
fig.suptitle('%s Singular Values' %(T.method), **a_for)
label = ['\n(a) singular values',
'Model Order\n(b) normalized by the first',
'\n(c) normalized by the previous']
ax[0].plot(np.arange(1,nmx+1),S,'bo',ms=4)
ax[0].set_ylabel('Singular Values', **l_for)
ax[0].set_ylim(bottom=0)
ax[1].semilogy(np.arange(1,nmx+1),S/S[0],'b',idx+1,(S/S[0])[idx],'ro')
ax[1].annotate('%.0f' %(idx+1),(idx+1.5,(S/S[0])[idx]),**l_for)
ax[2].semilogy(np.arange(1,nmx+1), np.hstack((1,S[1:]/S[:-1])),'b',
idx+1,(S[1:]/S[:-1])[idx-1],'ro')
ax[2].annotate('%.0f' %(idx+1),(idx+1.5,(S[1:]/S[:-1])[idx-1]),**l_for)
for i in range(3):
ax[i].set_xticks(np.linspace(0,nmx,nmx//2+1))
ax[i].tick_params(labelsize=12)
ax[i].set_xlim((0,nmx))
ax[i].set_xlabel(label[i], **l_for)
fig.tight_layout(rect=[0, 0, 1, 0.97])
return
#=============================================================================
# Frequency-Domain
#=============================================================================
def SDM(self, nperseg=None, plot=False, window='hann', nfft=None,
figsize=(10,10)):
"""
Estimate the spectral density matrix.
The signals are divided into segments with nperseg values to obtain
smoothed estimates.
Parameters
-------
self : MRPy_like
MRPy object that contains the time data.
nperseg : int, optional
Length of each segment. Default is the signal length.
plot : bool, optional
If true, plots the spectral matrix. Default is false.
window : string, optional
Desired window to use. Default is 'hann'.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None, the
FFT length is nperseg. Defaults to None.
figsize : tuple, optional
Graph size. Default is (12,12)
Returns
-------
PSD : auxclass
Auxclass object that contains the spectral densities and the attributes
f and nperseg.
See also
-------
scipy.signal.csd
"""
if nperseg is None: nperseg = self.N
if nfft is None: nfft = nperseg
if nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
G = | np.empty((self.NX, self.NX, nfft//2+1), dtype=np.complex_) | numpy.empty |
import gsw
import xarray as xr
import subprocess
import numpy as np
import os
import pylab as plt
# Import utils and decorators
from tcoasts.utils.utils import *
from tcoasts.utils.decorators import _file_exists
class TransportAlongCoast(object):
'''
'''
def __init__(self,path,initpos,contour_file,distance=np.arange(-400,400,100),length=100):
self.path = path # Data path
self.initpos = initpos # Init location lat lon coordinates.
self.dist = distance # Units kilometers
self.length = length # Units kilometers
self.n = 4 # self.length/self.n corresponds to the segments
# on the perpendicular vector.
self.contour_file = contour_file # Contour filename.
self.tmpfile= 'tmp_interp_transects.nc' # Temporal file to store interpolated fields.
# Load required data
self.extract_contour()
self.initindex=find2d(self.coastline,initpos)
def extract_contour(self):
# Load contour file.
if './' not in self.contour_file:
self.contour_file=os.path.join(self.path,self.contour_file)
if os.path.isfile(self.contour_file):
self.coastline=np.loadtxt(self.contour_file)
else:
raise ValueError('''
Make sure the file path is correct.
The path should be relative to the location of
the running script, or relative to self.path.
''')
def coords2dist(self,lonlat,p=0):
'''
This function follows the GSW computation.
'''
distance=gsw.distance(lonlat[:,0],lonlat[:,1],p)
return distance
def distancefrominit(self):
'''
The distance definition points positive to the east.
'''
if self.initindex != 0:
# Compute cumulative distance to right of index [location,location]
postinit=np.cumsum(self.coords2dist(self.coastline[self.initindex:]))
# Compute cumulative distance to left of index [location,location]
neginit=-1*np.cumsum(np.flipud(self.coords2dist(self.coastline[:self.initindex])))
# Join cumulative distances.
cumdistance=np.hstack((np.flipud(neginit),postinit))
else:
# Compute cumulative distance starting from the index [0,0]
cumdistance=np.cumsum(self.coords2dist(self.coastline))
return cumdistance
def perploc(self):
#Find the user defined locations for the perpendicular vectors.
dist_coast=self.distancefrominit()
index_perp=[find(dist_coast,dis*1000) for dis in self.dist]
return index_perp
def perp2coast(self,method='smooth',x=10):
'''
Input:
method: [ mean ]
smooth - computes the mean over X number of slopes and
projects the perpendicular vector
byseg - computes the mean over each segment of the slope
local - computes the the perpendicular vector using the 2
adjacent locations
ext - computes the perpendicular vector using the slope at
x cells to the left and right of the desired
perpendicular location.
'''
index_perp=self.perploc()
# Method to find the location perpendicular vector.
if method =='local' and method =='ext':
# Compute slope from adjacent locations [loc-x,loc+x]
if method=='local':
x=1
slopes=np.array([slope(self.coastline[ii-x,0],self.coastline[ii+x,0],
self.coastline[ii-x,1],self.coastline[ii+x,1])
for ii in index_perp])
elif method == 'smooth':
# Compute average slope from all the indexes contained between locations [loc-x,loc+x]
slopes=np.array([np.mean([slope(self.coastline[ii-xx,0],self.coastline[ii+xx,0],
self.coastline[ii-xx,1],self.coastline[ii+xx,1])
for xx in range(1,x)])
for ii in index_perp])
else:
# Compute average slope from all segments from [loc-x,loc-x+(2x-1)]
slopes=np.array([np.mean([slope(self.coastline[ii-x,0],self.coastline[ii-x+xx,0],
self.coastline[ii-x,1],self.coastline[ii-x+xx,1])
for xx in range(1,(2*x-1))])
for ii in index_perp])
#Compute angles from slopes
angles=slope2angle(slopes)
#Shift angles to be perpendicular
perp_angle=angles+(np.pi/2)
#Normal vector
self.x_norm = np.squeeze(np.cos(angles))
self.y_norm = np.squeeze(np.sin(angles))
#Perpendicualar vector
self.x_perp = np.squeeze(np.cos(perp_angle))
self.y_perp = np.squeeze(np.sin(perp_angle))
# Return dictionary containing vector information
return {'Nvector':{'x':self.x_norm,'y':self.x_norm,'angle':angles,'slope':slopes},
'Pvector':{'x':self.x_perp,'y':self.y_perp,'angles':perp_angle,'slope':-1/slopes}}
def perpvecdist(self,index_perp,perp_angle):
#compute distances to scale perpendicular vectors.
### Note this will produce an error of 1e-4.
x=np.array([[self.coastline[index_perp][ii,0],
np.cos(perp_angle[ii])+self.coastline[index_perp][ii,0]]
for ii in range(len(index_perp))])
y=np.array([[self.coastline[index_perp][ii,1],
np.sin(perp_angle[ii])+self.coastline[index_perp][ii,1]]
for ii in range(len(index_perp))])
distances = gsw.distance(x,y)
return distances
# _file_exists will test if the tmporal file containing the interpolated
# data exits. If file exists it will load the contents, otherwise, it will
# interpolate the data.
@_file_exists
def inter2vector(self,ufiles='U.*.nc',vfiles='V.*.nc',tracerfile=None,dataset=None,save=True,shift=360,**kwargs):
'''
**kwargs inter2vector supports the all the kwargs of xr.open_mfdataset.
'''
# xr load parameters
xr_openmf_defaults={}
if '*' in ufiles and '*' in vfiles:
xr_openmf_defaults = {'concat_dim':'time','parallel':True,'combine':'nested'}
xr_openmf_defaults.update(kwargs)
print('Opening velocity files')
# Load data.
u = self.loaddata(file=ufiles,var='U',dataset=dataset,**xr_openmf_defaults)
v = self.loaddata(file=vfiles,var='V',dataset=dataset,**xr_openmf_defaults)
# Make sure the shape of the velocity fields are the same.
if u.shape != v.shape:
raise ValueError('The velocity fields should have the same shape.')
# Compute perpendicular vectors.
x_norm,y_norm,x_perp,y_perp,x_perp_all,y_perp_all=self.vertor_perp()
# Define locations to interpolate interpolation.
# !Important:
# x_perp,y_perp is defined in the center of the cells
x = xr.DataArray(x_perp, dims=('transect','n'))
y = xr.DataArray(y_perp, dims=('transect','n'))
# Define limits to slice data.
deltax = 2*max((abs(x_perp[:,0]-x_perp[:,1])))
slicevalx = [shift+x_perp.min()-deltax,shift+x_perp.max()+deltax]
deltay = 2*max((abs(y_perp[:,0]-y_perp[:,1])))
slicevaly = [y_perp.min()-deltay,y_perp.max()+deltay]
# Slice data to reduce memory issues.
u = u.sel({'lon':slice(slicevalx[0],slicevalx[1]),'lat':slice(slicevaly[0],slicevaly[1])})
v = v.sel({'lon':slice(slicevalx[0],slicevalx[1]),'lat':slice(slicevaly[0],slicevaly[1])})
# Interpolate data using xarray,
# Note that fields can not contain nans
# TO DO: Add support for data containing nans.
print('Interpolating velocity fields')
interp_u = u.chunk({'time':10,'depth':25,'lat':len(u.lat),'lon':len(u.lon)}).interp(lon=shift+x,lat=y).compute()
interp_u = interp_u.where(interp_u!=0,np.nan)
interp_v = v.chunk({'time':10,'depth':25,'lat':len(v.lat),'lon':len(v.lon)}).interp(lon=shift+x,lat=y).compute()
interp_v = interp_v.where(interp_v!=0,np.nan)
# Merge datasets
self.interp_data=xr.merge([interp_u.to_dataset(name='u'), interp_v.to_dataset(name='v')])
# Interpolate tracer fields to constrain transport.
if tracerfile != None:
print('Loadng and interpolating tracer')
tracer = self.loaddata(file=tracerfile,var='Tracer',dataset=dataset,**xr_openmf_defaults)
tracer = tracer.sel({'lon':slice(slicevalx[0],slicevalx[1]),'lat':slice(slicevaly[0],slicevaly[1])})
interp_tracer = tracer.interp(lon=shift+x,lat=y).compute()
interp_tracer = interp_tracer.where(interp_tracer!=0,np.nan)
self.interp_data = xr.merge([interp_u.to_dataset(name='u'), interp_v.to_dataset(name='v'),
interp_tracer.to_dataset(name='tracer')])
# Save data.
if save==True:
self.interp_data.to_netcdf('./tmp_interp_transects.nc')
return self.interp_data
def depth_profiles(self,bottom_vel):
'''
'''
# Maximum depth from interpolated field.
depth_index=self.interp_data.depth[np.isfinite(self.interp_data.u.where(abs(self.interp_data.u)>bottom_vel,np.nan).isel({'time':0})).argmin('depth')]
# xr.DataArray 2 multiply with field.
depth=(xr.zeros_like(self.interp_data.u.isel(time=0))+self.interp_data.depth)
# Mask depth to only contain values larger than index.
depth=depth.where(depth > depth_index,np.nan)
# Delta depth to compute area
delta_depth=depth.diff(dim='depth')
return delta_depth
def vel_magnitude(self):
# Magnitude of interpolated vectors.
magnitude = np.sqrt(self.interp_data.u**2+self.interp_data.v**2)
return magnitude
def dot_product(self):
# Dot product between interpolated vectors and normal vector
# from perpendicular transect to the coast.
return self.interp_data.u*self.x_norm[np.newaxis,np.newaxis,:,np.newaxis]+self.interp_data.v*self.y_norm[np.newaxis,np.newaxis,:,np.newaxis]
def compute_transport(self,bottom_vel=1e-5):
# Scalar projection of interpolated data
dotproduct = self.dot_product()
# Projected data over normal vectors to surface.
u_normal = dotproduct*self.x_norm[np.newaxis,np.newaxis,:,np.newaxis]
v_normal = dotproduct*self.y_norm[np.newaxis,np.newaxis,:,np.newaxis]
# Area of each grid cell.
dA = self.delta_area(bottom_vel)
# Multiplication of vector sum and the dA. Flux integral.
self.transport=(u_normal+v_normal)*dA
return self.transport.sum(dim={'depth','n'})
def delta_area(self,bottom_vel):
# Compute perpendicular vectors.
x_norm,y_norm,x_perp,y_perp,x_perp_all,y_perp_all=self.vertor_perp()
# Depth at each section of the transect.
delta_z=abs(self.depth_profiles(bottom_vel=bottom_vel))
# Distance between lon,lat points of transect.
delta_x=gsw.distance(x_perp_all,y_perp_all)
return delta_z*delta_x
def mask_transport(self,threshold,method='greater'):
'''
threshold [ float / list ]
Threshold to scale transport with tracers used for tracer.
method [ string ]
'greater' will compute the transport for all the values larger
than the threshold in the tracer field.
'smaller' will compute the transport for all the values smaller
than the threshold in the tracer field.
'both' will compute the transport for all the values within
the threshold interval in the tracer field.
'''
if type(threshold)==list:
threshold=np.array(threshold)
# TO DO: If u vertical grid != tracer vertical grid then interpolate tracer to velocity grid.
if method=='smaller' and type(threshold)==float:
scaled_transport=self.transport.where(self.interp_data.tracer.isel(depth=slice(0,-1))<threshold)
elif method=='greater' and type(threshold)==float:
scaled_transport=self.transport.where(self.interp_data.tracer.isel(depth=slice(0,-1))>threshold)
elif method=='both' and type(threshold)==np.ndarray:
scaled_transport=self.transport.where(self.interp_data.tracer.isel(depth=slice(0,-1))>threshold.min()).where(self.interp_data.tracer<threshold.max())
else:
raise ValueError('''Threshold must be an float or list/array in which the
min and max value will define the threshold interval.''')
return scaled_transport.sum(dim={'depth','n'})
def loaddata(self,file=None,var='U',dataset=None,**kwargs):
# Check if file or dataset is defined.
if file == None and dataset==None:
raise ValueError('''file should be the path to the netCDF files or
dataset should contain a dataset with a variable
containing the string defined as var.
''')
elif file != None and (type(dataset) == 'NoneType' or dataset==None):
results = subprocess.check_output(['find', self.path, '-name', file])
results=[s for s in results.decode('utf-8').split()]
results.sort()
data=xr.open_mfdataset(results,**kwargs)
elif dataset != None:
data=dataset
else:
raise ValueError('Only one of the arguments [file or dataset] can be defined.')
# Extract variables from dataset
varname= [key for key,items in data.data_vars.items()]
# Rename variable for easier manipulation.
if len(varname)==1:
variable=data.rename({varname[0]:var})
else:
varname=[var for varn in varname if var in varn]
variable=data.rename({varname[0]:var})
# Extract only the variable of interest.
data=variable[var]
if type(data) != xr.core.dataarray.DataArray:
raise ValueError('The provided data should be a xr.DataArray.')
else:
return data
def vector_scale(self,index_perp,perp_angle):
'''
Scale vector to desired distance self.length
'''
# Scale perpendicular vector to distance self.length
return np.squeeze((self.length*1000)/self.perpvecdist(index_perp,perp_angle))
def vertor_perp(self,shift=0):
# Nearest location of perpendicular vectors from coastline grid.
index_perp=self.perploc()
# Compute perpendicular vectors.
perp_dict=self.perp2coast()
# Scale perpendicular vector to desired distance self.length.
scale=self.vector_scale(index_perp,perp_dict['Pvector']['angles'])
# Gridded normal vector
x_norm=(np.squeeze(np.linspace(0,scale,self.length//self.n)[:,np.newaxis]*self.x_norm)
+self.coastline[index_perp][:,0]).T+shift
y_norm=(np.squeeze( | np.linspace(0,scale,self.length//self.n) | numpy.linspace |
import os
import numpy as np
import pandas as pd
from math import pi
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.animation as animation
MAX_DAY_KEEP_RAD = 11
FPS_SAVE = 30
class VirusSpreadingSimulation:
"""
@title : VirusSpreadingSimulation (VSS)
@author : <NAME>
@version : 0.0.12
How to visualize the global impact of containment ?
To understand, I choose to code in python a **Virus Spreading Simulation (VSS)**
considering multiple factors close to the real situation:
- infection radius and probability, death rate,
incubation period, healing duration,
asymptomatic rate, containment threshold,
% of population staying contain, immunity spreading.
Each dot represents a person. A first point is infect by the virus.
Different parameters affect the propagation. Dots can propagate the virus among themselves.
Here are some points to understand before looking at the simulations:
- The aim is to see the effects of isolation on people.
- The quarantine area represents an area where sick dots cannot infect any healthy dot.
- When the containment threshold is achieved a majority of dots does not move.
"""
def __init__(self, population=100, xlim=5., ylim=5.,
radius_infection="default", p_infection=0.1, incubation_period=14,
healing_duration=14, death_rate=0.1, without_symptom_rate=0.3,
quarantine_zone=True, isolation_threshold=0.05, pct_pop_isolated=0.9,
immunity_spreading=True, speed_avg="default", mpd=5, fpm=2, random_seed=123):
"""
__init__
Parameters :
------------
- population : int ∈ N+
number of point in population
- xlim : float ∈ R+*
x width
- ylim : float ∈ R+*
y width
- radius_infection : "default", float ∈ R+*
contagion distance
if default radius_infection is set up at : (xlim + ylim) / 100
- p_infection : float ∈ [0,1]
probability to be infected by a point in contagion distance
- incubation_period : int ∈ N+
number of days to have symptoms
- healing_duration : int ∈ N+
number of days to heal
- death_rate : float ∈ [0,1]
probability to die if infected
- without_symptom_rate : float ∈ [0,1]
probability to not show any symptoms
- quarantine_zone : bool
create a quarantine area (separation between sick and healthy points)
- isolation_threshold : float ∈ [0,1]
threshold of sick points from which movements will stop
- pct_pop_isolated : float ∈ [0,1]
pct of population who will really stop moving
- speed_avg : 'default', float ∈ R+*
average speed of a point
if default speed_avg is set up at : (xlim + ylim) / 20
- mpd : int ∈ N+*
number of movement per day (possible to be infect at each movement)
- fpm : int ∈ N+*
number of frame per movement (actualisation of point positions/color on DataViz)
- random_seed : int or 1-d array_like or (Default : 123)
Seed for RandomState. Must be convertible to 32 bit unsigned integers.
"""
# set up default arg
if speed_avg == "default":
speed_avg = (xlim + ylim) / 10
if radius_infection == "default":
radius_infection = (xlim + ylim) / 50
np.random.seed(random_seed)
self.initial_seed = random_seed
# Set population
self.pop_size = population
self.xlim = xlim
self.ylim = ylim
self.pop = pd.DataFrame({
"x": np.random.uniform(0, xlim, population),
"y": np.random.uniform(0, ylim, population),
"speed": np.random.chisquare(speed_avg, population),
"healthy": np.full(population, True),
"infected_day": np.full(population, -1),
"quarantine_zone": np.full(population, False),
"stay_confined": np.random.choice([True, False], population, p=[pct_pop_isolated, 1 - pct_pop_isolated]),
"recovered": np.full(population, False),
"dead": np.full(population, False),
# day of death (will not die if "inf")
"death_day": np.where(np.random.uniform(0, 1, population) > death_rate, float("inf"),
np.random.choice(
range(incubation_period, incubation_period + healing_duration), population)),
"radian": np.random.uniform(-pi, pi, population),
"days_keeping_radian": np.random.randint(1, MAX_DAY_KEEP_RAD, population),
# Possible situations ["not_infected", "infected", "sick", "recovered", "dead"]
"situation": np.full(population, "not_infected"),
"without_symptoms": np.random.choice([True, False], population,
p=[without_symptom_rate, 1 - without_symptom_rate])
})
# first sick person in middle of area => patient zero is average person
self.pop.loc[0, ["x", "y", "speed", "healthy", "infected_day", "situation", "without_symptoms"]] = \
[xlim / 2, ylim / 2, speed_avg, False, 0, "infected", False]
# parameters about how virus is spreading and how population react
self.radius_infection = radius_infection
self.p_infection = p_infection
self.incubation_period = incubation_period
self.healing_duration = healing_duration
self.death_rate = death_rate
self.without_symptom_rate = without_symptom_rate
self.quarantine_zone = quarantine_zone
self.isolation_threshold = isolation_threshold
self.pct_pop_isolated = pct_pop_isolated
self.immunity_spreading = immunity_spreading
self.speed_avg = speed_avg
# no isolation at the beginning
self.isolation = False
# counting time
self.mpd = mpd
self.fpm = fpm
self.day = 0
self.mvt = 0
self.mvt_in_day = 0
self.nb_step = 0
# init spreading counters
# MAJ each day
self.nb_not_infected = population - 1
self.nb_infected = 1
self.nb_sick = 0
self.nb_recovered = 0
self.nb_dead = 0
self.nb_immune = 0
# init spreading statistics
self.stats = pd.DataFrame({
"nb_not_infected": [self.nb_not_infected],
"nb_infected": [self.nb_infected],
"nb_sick": [self.nb_sick],
"nb_recovered": [self.nb_recovered],
"nb_dead": [self.nb_dead],
"nb_immune": [self.nb_immune]},
index=['Day_' + str(0)]
)
def __repr__(self):
"""print class"""
return self.__str__()
def __str__(self):
"""print class"""
string = "<ViruseSpredingSimulation class ("
string += "pop_size:" + str(self.pop_size) + ", "
string += "(xlim, ylim):" + str((self.xlim, self.ylim)) + ", "
string += "radius_infection:" + str(self.radius_infection) + ", "
string += "p_infection:" + str(self.p_infection) + ", "
string += "incubation_period:" + str(self.incubation_period) + ", "
string += "healing_duration:" + str(self.healing_duration) + ", "
string += "death_rate:" + str(self.death_rate) + ", "
string += "without_symptom_rate:" + str(self.without_symptom_rate) + ", "
string += "quarantine_zone:" + str(self.quarantine_zone) + ", "
string += "isolation_threshold:" + str(self.isolation_threshold) + ", "
string += "pct_pop_isolated:" + str(self.pct_pop_isolated) + ", "
string += "speed_avg:" + str(self.speed_avg) + ", "
string += "mpd:" + str(self.mpd) + ", "
string += "fpm:" + str(self.fpm) + ", "
string += "initial_random_seed:" + str(self.initial_seed) + ")>"
return string
def __len__(self):
"""number of days"""
return self.day
def __add__(self, nb):
"""increase simulation of nb day"""
try:
nb = int(nb)
except:
raise ValueError
if nb < 0:
raise ValueError
start = self.day
while start + nb != self.day:
self._frame(verbose=0)
def _frame(self, verbose=1):
"""
Update positions at every frame of gif
Parameters :
------------
- verbose : int (Default 1)
information display level
"""
self.nb_step += 1
# update positions
immobile_points = (self.pop["situation"].isin(["dead", "sick"])) | (self.isolation & self.pop["stay_confined"])
self.pop["x"] = \
self.pop["x"] + self.pop["speed"] * (1 - immobile_points) / self.mpd / self.fpm * np.cos(self.pop["radian"])
self.pop["y"] = \
self.pop["y"] + self.pop["speed"] * (1 - immobile_points) / self.mpd / self.fpm * np.sin(self.pop["radian"])
# check for crossing boundary
crossed_x1 = (self.pop["x"] < 0)
crossed_x2 = (self.pop["x"] > self.xlim)
crossed_y1 = (self.pop["y"] < 0)
crossed_y2 = (self.pop["y"] > self.xlim)
# Calculate new position and new radian if boundary crossed
# new position on border, new radian calculate for each point (on available directions)
# -> crossing line
self.pop.loc[crossed_x1, ["x", "radian"]] = [0, np.random.uniform(-pi / 4, pi / 4, sum(crossed_x1))]
self.pop.loc[crossed_x2, ["x", "radian"]] = [self.xlim,
np.random.uniform(3 * pi / 4, 5 * pi / 4, sum(crossed_x2))]
self.pop.loc[crossed_y1, ["y", "radian"]] = [0, np.random.uniform(pi / 4, 3 * pi / 4, sum(crossed_y1))]
self.pop.loc[crossed_y2, ["y", "radian"]] = [self.ylim,
np.random.uniform(5 * pi / 4, 7 * pi / 4, sum(crossed_y2))]
# -> crossing corner
self.pop.loc[crossed_x1 & crossed_y1, ["x", "y", "radian"]] = \
[0, 0, np.random.uniform(pi / 8, 3 * pi / 8, sum(crossed_x1 & crossed_y1))]
self.pop.loc[crossed_x1 & crossed_y2, ["x", "y", "radian"]] = \
[0, self.ylim, np.random.uniform(13 * pi / 8, 15 * pi / 8, sum(crossed_x1 & crossed_y2))]
self.pop.loc[crossed_x2 & crossed_y2, ["x", "y", "radian"]] = \
[self.xlim, self.ylim, np.random.uniform(9 * pi / 8, 11 * pi / 8, sum(crossed_x2 & crossed_y2))]
self.pop.loc[crossed_x2 & crossed_y1, ["x", "y", "radian"]] = \
[self.xlim, 0, np.random.uniform(5 * pi / 8, 7 * pi / 8, sum(crossed_x2 & crossed_y1))]
if self.nb_step == self.fpm:
self.nb_step = 0
self.movement(verbose=verbose)
def movement(self, verbose=0):
"""
Movement is represent in a second on the plot, with fpm frame per movement
At each movement, can be contaminate
Parameters :
------------
- verbose : int (Default 0)
information display level
"""
# Contaminate with probability
self.contamination()
# immunity spreading
if self.immunity_spreading:
self.immunity_spreading_function()
self.mvt += 1
self.mvt_in_day += 1
if self.mvt_in_day == self.mpd:
self.mvt_in_day = 0
self.day += 1
self.new_day(verbose=verbose)
def contamination(self):
"""Contamination depending p_infection"""
# identify caring points
if not self.quarantine_zone:
caring_points = self.pop.loc[(self.pop["situation"].isin(["infected", "sick"]) &
(self.pop["infected_day"] != self.day)), ["x", "y"]]
else:
# if quarantine, sick points can't infect
caring_points = self.pop.loc[(self.pop["situation"] == "infected") &
(self.pop["infected_day"] != self.day), ["x", "y"]]
if len(caring_points) == 0:
return
# contaminate close points with probability
for index, row in self.pop.iterrows():
if row["situation"] == "not_infected":
# probability depending of the number of points around
# each point have probability self.p_infection to infect
first_filter = (caring_points["x"].between(row["x"] - self.radius_infection,
row["x"] + self.radius_infection, inclusive=False)) & \
(caring_points["y"].between(row["y"] - self.radius_infection,
row["y"] + self.radius_infection, inclusive=False))
if sum(first_filter) == 0:
continue
p_stay_safe = (1 - self.p_infection) ** self._get_nb_caring_points_around((row["x"], row["y"]),
caring_points.loc[
first_filter])
if p_stay_safe < np.random.uniform():
self.pop.loc[index, ["healthy", "infected_day", "situation"]] = [False, self.day, "infected"]
def immunity_spreading_function(self):
"""Contamination depending p_infection"""
# identify caring points of immunity
caring_points = self.pop.loc[self.pop["situation"].isin(["immune", "recovered"]), ["x", "y"]]
if len(caring_points) == 0:
return
# contaminate close points with probability
for index, row in self.pop.iterrows():
# only not_infected can receive immunity
if row["situation"] == "not_infected":
# probability depending of the number of points around
# each point have probability self.p_infection to infect
first_filter = (caring_points["x"].between(row["x"] - self.radius_infection,
row["x"] + self.radius_infection, inclusive=False)) & \
(caring_points["y"].between(row["y"] - self.radius_infection,
row["y"] + self.radius_infection, inclusive=False))
if sum(first_filter) == 0:
continue
p_stay_usual = (1 - self.p_infection) ** self._get_nb_caring_points_around((row["x"], row["y"]),
caring_points.loc[
first_filter])
if p_stay_usual < | np.random.uniform() | numpy.random.uniform |
from gym import spaces
import numpy as np
class Building:
def __init__(self, buildingId, dhw_storage = None, cooling_storage = None, heating_storage = None, electrical_storage = None, dhw_heating_device = None, hvac_device = None, save_memory = True):
"""
Args:
buildingId (int)
dhw_storage (EnergyStorage)
cooling_storage (EnergyStorage)
heating_storage (EnergyStorage)
electrical_storage (Battery)
dhw_heating_device (ElectricHeater or HeatPump)
hvac_device (HeatPump)
"""
# Building attributes
self.building_type = None
self.climate_zone = None
self.solar_power_capacity = None
self.buildingId = buildingId
self.dhw_storage = dhw_storage
self.cooling_storage = cooling_storage
self.heating_storage = heating_storage
self.electrical_storage = electrical_storage
self.dhw_heating_device = dhw_heating_device
self.hvac_device = hvac_device
self.observation_space = None
self.action_space = None
self.time_step = 0
self.sim_results = {}
self.save_memory = save_memory
if self.dhw_storage is not None:
self.dhw_storage.reset()
if self.cooling_storage is not None:
self.cooling_storage.reset()
if self.heating_storage is not None:
self.heating_storage.reset()
if self.electrical_storage is not None:
self.electrical_storage.reset()
if self.dhw_heating_device is not None:
self.dhw_heating_device.reset()
if self.hvac_device is not None:
self.hvac_device.reset()
self._electric_consumption_cooling_storage = 0.0
self._electric_consumption_heating_storage = 0.0
self._electric_consumption_dhw_storage = 0.0
self.cooling_demand_building = []
self.heating_demand_building = []
self.dhw_demand_building = []
self.electric_consumption_appliances = []
self.electric_generation = []
self.electric_consumption_cooling = []
self.electric_consumption_heating = []
self.electric_consumption_cooling_storage = []
self.electric_consumption_heating_storage = []
self.electric_consumption_dhw = []
self.electric_consumption_dhw_storage = []
self.net_electric_consumption = []
self.net_electric_consumption_no_storage = []
self.net_electric_consumption_no_pv_no_storage = []
self.hvac_device_to_building = []
self.cooling_storage_to_building = []
self.heating_storage_to_building = []
self.hvac_device_to_cooling_storage = []
self.hvac_device_to_heating_storage = []
self.cooling_storage_soc = []
self.heating_storage_soc = []
self.dhw_heating_device_to_building = []
self.dhw_storage_to_building = []
self.dhw_heating_device_to_storage = []
self.dhw_storage_soc = []
self.electrical_storage_electric_consumption = []
self.electrical_storage_soc = []
self.__validate_energy_models()
def set_state_space(self, high_state, low_state):
# Setting the state space and the lower and upper bounds of each state-variable
self.observation_space = spaces.Box(low=low_state, high=high_state, dtype=np.float32)
def set_action_space(self, max_action, min_action):
# Setting the action space and the lower and upper bounds of each action-variable
self.action_space = spaces.Box(low=min_action, high=max_action, dtype=np.float32)
def set_storage_electrical(self, action):
"""
Args:
action (float): Amount of heating energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
-1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= 1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the heating supply unit, the DHW demand of the
building (which limits the maximum amount of DHW that the energy storage can provide to the building), and the state of charge of the
energy storage unit itself
Return:
elec_demand_heating (float): electricity consumption needed for space heating and heating storage
"""
electrical_energy_balance = self.electrical_storage.charge(action*self.electrical_storage.capacity)
if self.save_memory == False:
self.electrical_storage_electric_consumption.append(electrical_energy_balance)
self.electrical_storage_soc.append(self.electrical_storage._soc)
self.electrical_storage.time_step += 1
return electrical_energy_balance
def set_storage_dhw(self, action):
"""
Args:
action (float): Amount of heating energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
-1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= 1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the heating supply unit, the DHW demand of the
building (which limits the maximum amount of DHW that the energy storage can provide to the building), and the state of charge of the
energy storage unit itself
Return:
elec_demand_heating (float): electricity consumption needed for space heating and heating storage
"""
# Heating power that could be possible to supply to the storage device to increase its State of Charge once the heating demand of the building has been satisfied
heat_power_avail = self.dhw_heating_device.get_max_heating_power() - self.sim_results['dhw_demand'][self.time_step]
# The storage device is charged (action > 0) or discharged (action < 0) taking into account the max power available and that the storage device cannot be discharged by an amount of energy greater than the energy demand of the building.
heating_energy_balance = self.dhw_storage.charge(max(-self.sim_results['dhw_demand'][self.time_step], min(heat_power_avail, action*self.dhw_storage.capacity)))
if self.save_memory == False:
self.dhw_heating_device_to_storage.append(max(0, heating_energy_balance))
self.dhw_storage_to_building.append(-min(0, heating_energy_balance))
self.dhw_heating_device_to_building.append(self.sim_results['dhw_demand'][self.time_step] + min(0, heating_energy_balance))
self.dhw_storage_soc.append(self.dhw_storage._soc)
# The energy that the energy supply device must provide is the sum of the energy balance of the storage unit (how much net energy it will lose or get) plus the energy supplied to the building. A constraint is added to guarantee it's always positive.
heating_energy_balance = max(0, heating_energy_balance + self.sim_results['dhw_demand'][self.time_step])
# Electricity consumed by the energy supply unit
elec_demand_heating = self.dhw_heating_device.set_total_electric_consumption_heating(heat_supply = heating_energy_balance)
# Electricity consumption used (if +) or saved (if -) due to the change in the state of charge of the energy storage device
self._electric_consumption_dhw_storage = elec_demand_heating - self.dhw_heating_device.get_electric_consumption_heating(heat_supply = self.sim_results['dhw_demand'][self.time_step])
if self.save_memory == False:
self.electric_consumption_dhw.append(elec_demand_heating)
self.electric_consumption_dhw_storage.append(self._electric_consumption_dhw_storage)
self.dhw_heating_device.time_step += 1
return elec_demand_heating
def set_storage_cooling_and_heating(self, action_cooling, action_heating):
elec_demand_cooling = self.__set_storage_cooling(action_cooling)
elec_demand_heating = self.__set_storage_heating(action_heating)
self.hvac_device.time_step += 1
return elec_demand_cooling, elec_demand_heating
def __set_storage_cooling(self, action):
"""
Args:
action (float): Amount of cooling energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= -1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the cooling supply unit, the cooling demand of the
building (which limits the maximum amount of cooling energy that the energy storage can provide to the building), and the state of charge of the energy storage unit itself
Return:
elec_demand_cooling (float): electricity consumption needed for space cooling and cooling storage
"""
# Cooling power that could be possible to supply to the storage device to increase its State of Charge once the heating demand of the building has been satisfied
cooling_power_avail = self.hvac_device.get_max_cooling_power() - self.sim_results['cooling_demand'][self.time_step]
# The storage device is charged (action > 0) or discharged (action < 0) taking into account the max power available and that the storage device cannot be discharged by an amount of energy greater than the energy demand of the building.
cooling_energy_balance = self.cooling_storage.charge(max(-self.sim_results['cooling_demand'][self.time_step], min(cooling_power_avail, action*self.cooling_storage.capacity)))
if self.save_memory == False:
self.hvac_device_to_cooling_storage.append(max(0, cooling_energy_balance))
self.cooling_storage_to_building.append(-min(0, cooling_energy_balance))
self.hvac_device_to_building.append(self.sim_results['cooling_demand'][self.time_step] + min(0, cooling_energy_balance))
self.cooling_storage_soc.append(self.cooling_storage._soc)
# The energy that the energy supply device must provide is the sum of the energy balance of the storage unit (how much net energy it will lose or get) plus the energy supplied to the building. A constraint is added to guarantee it's always positive.
cooling_energy_balance = max(0, cooling_energy_balance + self.sim_results['cooling_demand'][self.time_step])
# Electricity consumed by the energy supply unit
elec_demand_cooling = self.hvac_device.set_total_electric_consumption_cooling(cooling_supply = cooling_energy_balance)
# Electricity consumption used (if +) or saved (if -) due to the change in the state of charge of the energy storage device
self._electric_consumption_cooling_storage = elec_demand_cooling - self.hvac_device.get_electric_consumption_cooling(cooling_supply = self.sim_results['cooling_demand'][self.time_step])
if self.save_memory == False:
self.electric_consumption_cooling.append(np.float32(elec_demand_cooling))
self.electric_consumption_cooling_storage.append(np.float32(self._electric_consumption_cooling_storage))
return elec_demand_cooling
def __set_storage_heating(self, action):
"""
Args:
action (float): Amount of heating energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= -1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the cooling supply unit, the heating demand of the
building (which limits the maximum amount of heating energy that the energy storage can provide to the building), and the state of charge of the energy storage unit itself
Return:
elec_demand_heating (float): electricity consumption needed for space heating and heating storage
"""
# Heating power that could be possible to supply to the storage device to increase its State of Charge once the heating demand of the building has been satisfied
heating_power_avail = self.hvac_device.get_max_heating_power() - self.sim_results['heating_demand'][self.time_step]
# The storage device is charged (action > 0) or discharged (action < 0) taking into account the max power available and that the storage device cannot be discharged by an amount of energy greater than the energy demand of the building.
heating_energy_balance = self.heating_storage.charge(max(-self.sim_results['heating_demand'][self.time_step], min(heating_power_avail, action*self.heating_storage.capacity)))
if self.save_memory == False:
self.hvac_device_to_heating_storage.append(max(0, heating_energy_balance))
self.heating_storage_to_building.append(-min(0, heating_energy_balance))
self.hvac_device_to_building.append(self.sim_results['heating_demand'][self.time_step] + min(0, heating_energy_balance))
self.heating_storage_soc.append(self.heating_storage._soc)
# The energy that the energy supply device must provide is the sum of the energy balance of the storage unit (how much net energy it will lose or get) plus the energy supplied to the building. A constraint is added to guarantee it's always positive.
heating_energy_balance = max(0, heating_energy_balance + self.sim_results['heating_demand'][self.time_step])
# Electricity consumed by the energy supply unit
elec_demand_heating = self.hvac_device.set_total_electric_consumption_heating(heating_supply = heating_energy_balance)
# Electricity consumption used (if +) or saved (if -) due to the change in the state of charge of the energy storage device
self._electric_consumption_heating_storage = elec_demand_heating - self.hvac_device.get_electric_consumption_heating(heating_supply = self.sim_results['heating_demand'][self.time_step])
if self.save_memory == False:
self.electric_consumption_heating.append(np.float32(elec_demand_heating))
self.electric_consumption_heating_storage.append(np.float32(self._electric_consumption_heating_storage))
return elec_demand_heating
def get_non_shiftable_load(self):
return self.sim_results['non_shiftable_load'][self.time_step]
def get_solar_power(self):
return self.sim_results['solar_gen'][self.time_step]
def get_dhw_electric_demand(self):
return self.dhw_heating_device._electrical_consumption_heating
def get_cooling_electric_demand(self):
return self.hvac_device._electrical_consumption_cooling
def get_heating_electric_demand(self):
return self.hvac_device._electrical_consumption_heating
def get_hvac_electric_demand(self):
return self.get_cooling_electric_demand() + self.get_heating_electric_demand()
def reset(self):
self.current_net_electricity_demand = self.sim_results['non_shiftable_load'][self.time_step] - self.sim_results['solar_gen'][self.time_step]
if self.dhw_storage is not None:
self.dhw_storage.reset()
if self.cooling_storage is not None:
self.cooling_storage.reset()
if self.heating_storage is not None:
self.heating_storage.reset()
if self.electrical_storage is not None:
self.electrical_storage.reset()
if self.dhw_heating_device is not None:
self.dhw_heating_device.reset()
self.current_net_electricity_demand += self.dhw_heating_device.get_electric_consumption_heating(self.sim_results['dhw_demand'][self.time_step])
if self.hvac_device is not None:
self.hvac_device.reset()
self.current_net_electricity_demand += self.hvac_device.get_electric_consumption_cooling(self.sim_results['cooling_demand'][self.time_step])
self.current_net_electricity_demand += self.hvac_device.get_electric_consumption_heating(self.sim_results['heating_demand'][self.time_step])
self._electric_consumption_cooling_storage = 0.0
self._electric_consumption_heating_storage = 0.0
self._electric_consumption_dhw_storage = 0.0
self.cooling_demand_building = []
self.heating_demand_building = []
self.dhw_demand_building = []
self.electric_consumption_appliances = []
self.electric_generation = []
self.electric_consumption_cooling = []
self.electric_consumption_cooling_storage = []
self.electric_consumption_heating = []
self.electric_consumption_heating_storage = []
self.electric_consumption_dhw = []
self.electric_consumption_dhw_storage = []
self.net_electric_consumption = []
self.net_electric_consumption_no_storage = []
self.net_electric_consumption_no_pv_no_storage = []
self.hvac_device_to_building = []
self.cooling_storage_to_building = []
self.heating_storage_to_building = []
self.hvac_device_to_cooling_storage = []
self.hvac_device_to_heating_storage = []
self.cooling_storage_soc = []
self.heating_storage_soc = []
self.dhw_heating_device_to_building = []
self.dhw_storage_to_building = []
self.dhw_heating_device_to_storage = []
self.dhw_storage_soc = []
self.electrical_storage_electric_consumption = []
self.electrical_storage_soc = []
def terminate(self):
if self.dhw_storage is not None:
self.dhw_storage.terminate()
if self.cooling_storage is not None:
self.cooling_storage.terminate()
if self.heating_storage is not None:
self.heating_storage.terminate()
if self.electrical_storage is not None:
self.electrical_storage.terminate()
if self.dhw_heating_device is not None:
self.dhw_heating_device.terminate()
if self.hvac_device is not None:
self.hvac_device.terminate()
if self.save_memory == False:
self.cooling_demand_building = np.array(self.sim_results['cooling_demand'][:self.time_step])
self.heating_demand_building = np.array(self.sim_results['heating_demand'][:self.time_step])
self.dhw_demand_building = np.array(self.sim_results['dhw_demand'][:self.time_step])
self.electric_consumption_appliances = np.array(self.sim_results['non_shiftable_load'][:self.time_step])
self.electric_generation = np.array(self.sim_results['solar_gen'][:self.time_step])
elec_consumption_dhw = 0
elec_consumption_dhw_storage = 0
if self.dhw_heating_device.time_step == self.time_step and self.dhw_heating_device is not None:
elec_consumption_dhw = np.array(self.electric_consumption_dhw)
elec_consumption_dhw_storage = np.array(self.electric_consumption_dhw_storage)
elec_consumption_cooling = 0
elec_consumption_cooling_storage = 0
if self.hvac_device.time_step == self.time_step and self.hvac_device is not None:
elec_consumption_cooling = np.array(self.electric_consumption_cooling)
elec_consumption_cooling_storage = np.array(self.electric_consumption_cooling_storage)
elec_consumption_heating = 0
elec_consumption_heating_storage = 0
if self.hvac_device.time_step == self.time_step and self.hvac_device is not None:
elec_consumption_heating = | np.array(self.electric_consumption_heating) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import shutil
from onnx import defs
from onnx import helper
from onnx import TensorProto
import numpy as np
import tensorflow as tf
from onnx_tf.backend import onnx_graph_to_tensorflow_rep
from onnx_tf.common.legacy import legacy_opset_pre_ver
from onnx_tf.common.pooling_helper import py_pool
class TestDynamicShape(unittest.TestCase):
""" Tests for dynamic shape support
"""
def _get_rnd_float32(self, low=-1.0, high=1.0, shape=None):
output = np.random.uniform(low, high, shape)
if shape is None:
return np.float32(output)
else:
return output.astype(np.float32)
def _get_rnd_int(self, low, high=None, shape=None, dtype=np.int32):
return np.random.randint(low, high, size=shape, dtype=dtype)
def test_arg_max(self):
if legacy_opset_pre_ver(12):
raise unittest.SkipTest(
"ONNX version {} doesn't support select_last_index attribute for ArgMax that depends on shape."
.format(defs.onnx_opset_version()))
axis = 1
node_def = helper.make_node("ArgMax",
inputs=['X'],
outputs=['Y'],
axis=axis,
keepdims=0,
select_last_index=1)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None, None])
])
x = np.array([[1, 2, 3, 5, 3, 4, 5, 1], [2, 9, 3, 5, 9, 4, 5,
1]]).astype(np.float32)
# get tf_rep
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
# export to tf.saved_model
model_path = 'test_dynamic_shape/arg_max'
tf_rep.export_graph(model_path)
# load the saved_model back
tf_model = tf.saved_model.load(model_path)
# run the model
tf_model_output = tf_model(X=x)
expected_output = np.argmax(np.flip(x, axis), axis=axis)
expected_output = x.shape[axis] - expected_output - 1
np.testing.assert_almost_equal(tf_model_output["Y"], expected_output)
def test_arg_min(self):
if legacy_opset_pre_ver(12):
raise unittest.SkipTest(
"ONNX version {} doesn't support select_last_index attribute for ArgMin that depends on shape."
.format(defs.onnx_opset_version()))
axis = 1
node_def = helper.make_node("ArgMin",
inputs=['X'],
outputs=['Y'],
axis=axis,
keepdims=0,
select_last_index=1)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None, None])
])
x = np.array([[1, 2, 3, 5, 3, 4, 5, 1], [2, 7, 3, 5, 2, 4, 5,
6]]).astype(np.float32)
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
# export to tf.saved_model
model_path = 'test_dynamic_shape/arg_min'
tf_rep.export_graph(model_path)
# load the saved_model back
tf_model = tf.saved_model.load(model_path)
# run the model
tf_model_output = tf_model(X=x)
expected_output = np.argmin(np.flip(x, axis), axis=axis)
expected_output = x.shape[axis] - expected_output - 1
np.testing.assert_almost_equal(tf_model_output["Y"], expected_output)
def _batch_normalization(self, x, mean, variance, bias, scale,
variance_epsilon):
inv = np.reciprocal(np.sqrt(variance + variance_epsilon))
if scale is not None:
inv *= scale
return x * inv + (bias - mean * inv if bias is not None else -mean * inv)
def test_batch_normalization(self):
if legacy_opset_pre_ver(6):
raise unittest.SkipTest("Backend doesn't support consumed flag")
node_def = helper.make_node("BatchNormalization",
["X", "scale", "bias", "mean", "var"], ["Y"],
epsilon=0.001)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT,
[None, None, None, None]),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [None]),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, [None]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [None]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [None])
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT,
[None, None, None, None])
])
x_shape = [3, 5, 4, 2]
param_shape = [5]
_param_shape = [1, 5, 1, 1]
x = self._get_rnd_float32(0, 1, shape=x_shape)
m = self._get_rnd_float32(0, 1, shape=param_shape)
_m = m.reshape(_param_shape)
v = self._get_rnd_float32(0, 1, shape=param_shape)
_v = v.reshape(_param_shape)
scale = self._get_rnd_float32(0, 1, shape=param_shape)
_scale = scale.reshape(_param_shape)
bias = self._get_rnd_float32(0, 1, shape=param_shape)
_bias = bias.reshape(_param_shape)
golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
# export to tf.saved_model
model_path = 'test_dynamic_shape/batch_normalization'
tf_rep.export_graph(model_path)
# load the saved_model back
tf_model = tf.saved_model.load(model_path)
# run the model
tf_model_output = tf_model(X=x, scale=scale, bias=bias, mean=m, var=v)
np.testing.assert_almost_equal(tf_model_output["Y"], golden, decimal=5)
def test_compress(self):
if legacy_opset_pre_ver(9):
raise unittest.SkipTest(
"ONNX version {} doesn't support Compress.".format(
defs.onnx_opset_version()))
axis = 1
node_def = helper.make_node("Compress",
inputs=['X', 'condition'],
outputs=['Y'],
axis=axis)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT,
[None, None, None]),
helper.make_tensor_value_info("condition", TensorProto.BOOL, [None])
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT,
[None, None, None])
])
x = self._get_rnd_float32(shape=[5, 5, 5])
cond = np.array([1, 0, 1]).astype(np.bool)
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
# export to tf.saved_model
model_path = 'test_dynamic_shape/compress'
tf_rep.export_graph(model_path)
# load the saved_model back
tf_model = tf.saved_model.load(model_path)
# run the model
tf_model_output = tf_model(X=x, condition=cond)
np.testing.assert_almost_equal(tf_model_output["Y"],
np.compress(cond, x, axis=axis))
def test_conv_transpose(self):
# test dynamic batch size on transpose of 2d convolution
pads = [1, 1, 1, 1]
x_shape = [1, 3, 4, 6]
x = self._get_rnd_float32(shape=x_shape)
weight_shape = [3, 5, 2, 2]
weights = self._get_rnd_float32(shape=weight_shape)
node_def = helper.make_node("ConvTranspose", ["X", "weights"], ["Y"],
pads=pads)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT,
[None, None, None, None]),
helper.make_tensor_value_info("weights", TensorProto.FLOAT,
[None, None, None, None])
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT,
[None, None, None, None])
])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
# export to tf.saved_model
model_path = 'test_dynamic_shape/conv_transpose'
tf_rep.export_graph(model_path)
# load the saved_model back
tf_model = tf.saved_model.load(model_path)
# run the model
tf_model_output = tf_model(X=x, weights=weights)
padh_left = weight_shape[2] - 1 - pads[0]
padh_right = weight_shape[2] - 1 - pads[1]
padw_left = weight_shape[3] - 1 - pads[2]
padw_right = weight_shape[3] - 1 - pads[3]
kh = weight_shape[2]
kw = weight_shape[3]
outh = x_shape[2] + padh_right + padh_right - (kh - 1)
outw = x_shape[3] + padw_right + padw_right - (kw - 1)
out_shape = [x_shape[0], weight_shape[1], outh, outw]
test_output = np.zeros(out_shape)
for b in range(0, x_shape[0]):
for m in range(0, weight_shape[1]):
for c in range(0, x_shape[1]):
for h in range(0, outh):
for w in range(0, outw):
for k1 in range(h, h + kh):
for k2 in range(w, w + kw):
if (k1 - padh_left >= 0 and k2 - padw_left >= 0):
test_output[b][m][h][w] += x[b][c][k1 - padh_left][
k2 - padw_left] * weights[c][m][kh + h - 1 -
k1][kw + w - 1 - k2]
np.testing.assert_almost_equal(tf_model_output["Y"], test_output, decimal=5)
def test_depth_to_space(self):
b, c, h, w = shape = [2, 48, 5, 6]
blocksize = 4
x = self._get_rnd_float32(shape=shape)
node_def = helper.make_node("DepthToSpace", ["X"], ["Y"],
blocksize=blocksize,
mode="DCR")
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT,
[None, None, None, None])
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT,
[None, None, None, None])
])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
# export to tf.saved_model
model_path = 'test_dynamic_shape/depth_to_space'
tf_rep.export_graph(model_path)
# load the saved_model back
tf_model = tf.saved_model.load(model_path)
# run the model
tf_model_output = tf_model(X=x)
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
np.testing.assert_almost_equal(tf_model_output["Y"], y)
def test_eye_like(self):
if legacy_opset_pre_ver(9):
raise unittest.SkipTest("ONNX version {} doesn't support EyeLike.".format(
defs.onnx_opset_version()))
shape = [6, 10]
off_diagonal_offset = -3
x = self._get_rnd_int(0, 100, shape=shape)
y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
node_def = helper.make_node("EyeLike", ["x"], ["y"],
dtype=TensorProto.FLOAT,
k=off_diagonal_offset)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("x", TensorProto.INT32, [None, None])
],
outputs=[
helper.make_tensor_value_info("y", TensorProto.FLOAT, [None, None])
])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
# export to tf.saved_model
model_path = 'test_dynamic_shape/eye_like'
tf_rep.export_graph(model_path)
# load the saved_model back
tf_model = tf.saved_model.load(model_path)
# run the model
tf_model_output = tf_model(x=x)
| np.testing.assert_equal(tf_model_output["y"], y) | numpy.testing.assert_equal |
#!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import numpy as np
from histomicstk.filters.shape import clog, cdog
from skimage.feature import peak_local_max
from .datastore import datastore
def assert_array_almost_equal_neighborhood_lines(im, gt, decimal=4):
"""Wrapper around assert_array_almost_equal to work around scikit-image bug.
`skimage.transform.resize()` can return different results on different platforms. This
is an uncorrected bug mostly due to some floating point computation differences on
different platforms, hardware, or numerical computation library backend [1].
Due to that bug, the image (array) returned by the `resize()` function may have a few
lines in the result that differ from the ground truth computed on a different computer.
The work around implemented in this function compares each line of the input image with
each line of a ground truth image, but also compares the line before and the line after.
Only the minimum error is kept. This allows to ignore 'rogue' lines in the image, that
are different only due to that scikit-image bug.
[1] https://github.com/scikit-image/scikit-image/issues/3445
"""
shift = [-1, 0, 1]
min_array = np.full_like(im, np.inf)
for s in shift:
rolled_image = np.roll(im, s, 1)
for s in shift:
rolled_gt = np.roll(gt, s, 1)
abs_array = np.abs(rolled_gt - rolled_image)
min_array = np.minimum(min_array, abs_array)
np.testing.assert_array_almost_equal(min_array, np.zeros_like(im), decimal=decimal)
def _sort_list(input_list):
return input_list[input_list[:, 0].argsort()]
def compare_maxima(input_im, gtruth_im, min_distance=10, threshold_abs=20):
"""Compares image maxima
Compare that the maxima found in an image matches the maxima found in a ground truth image.
This function is a wrapper around `skimage.feature.peak_local_max()`. It calls this function
on both images that are passed as arguments, and asserts if the resulting maxima arrays
returned by this function match.
"""
gtruth_coordinates = _sort_list(peak_local_max(gtruth_im, min_distance=min_distance,
threshold_abs=threshold_abs))
input_coordinates = _sort_list(peak_local_max(input_im, min_distance=min_distance,
threshold_abs=threshold_abs))
np.testing.assert_array_equal(gtruth_coordinates, input_coordinates)
class TestBlobDetectionFilters:
def test_clog(self):
im_nuclei_stain_data = np.load(
datastore.fetch('Easy1_nuclei_stain.npz'))
im_nuclei_stain = im_nuclei_stain_data['Easy1_nuclei_stain']
im_nuclei_fgnd_mask_data = np.load(
datastore.fetch('Easy1_nuclei_fgnd_mask.npz'))
im_nuclei_fgnd_mask = im_nuclei_fgnd_mask_data['Easy1_nuclei_fgnd_mask']
sigma_min = 10.0 / | np.sqrt(2.0) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Automatic port-to-port routing functions.
"""
import math
import numpy as np
from samplemaker.devices import DevicePort
import samplemaker.makers as sm
from copy import deepcopy
# The following are routines for the connector
def __connectable_facing(port1: "DevicePort",port2: "DevicePort",
rad: float = 3):
"""
This function returns true and a sequence
if two ports are directly connectable and facing
each other. The sequence is either a straight line
or a cosine bend
Parameters
----------
port1 : "DevicePort"
Start port for the connection.
port2 : "DevicePort"
End port for the connection.
rad : float, optional
The maximum bend radius in um. The default is 3.
Returns
-------
bool
True if connection succeded, False otherwise.
list
A sequence to perform the connection.
"""
# Get the vector from port 1 to port 2
dx = port2.x0-port1.x0
dy = port2.y0-port1.y0
if(port1.dx()!=0):
# Case1: port 1 is horizontal
if(abs(dy)<2*rad):
# the y offset is small enough to use a C bend
dxsign = 1
if(abs(dx)!=0): # Note: sometimes this can be zero
dxsign = dx/abs(dx)
if(port1.dx()+port2.dx()==0 and dxsign==port1.dx()):
# facing each other checks
if(abs(dy)<1e-3):
# will use straight line
return True,[["S",abs(dx)]]
else:
# will create a C bend
slen = (abs(dx)-2*rad)/2
if(slen<0):
return True,[["C",port1.dx()*dy,abs(dx)/2]]
else:
return True,[["S",slen],["C",port1.dx()*dy,rad],["S",slen]]
return False, []
else: #Case2 : port 1 is vertical
if(abs(dx)<2*rad):
# the y offset is small enough to use a C bend
dysign = 1
if(abs(dy)!=0):
dysign = dy/abs(dy)
if(port1.dy()+port2.dy()==0 and dysign==port1.dy()):
# facing each other checks
if(abs(dx)<1e-3):
# will use straight line
return True,[["S",abs(dy)]]
else:
# will create a C bend
slen = (abs(dy)-2*rad)/2
if(slen<0):
return True,[["C",-port1.dy()*dx,abs(dy)/2]]
else:
return True,[["S",slen],["C",-port1.dy()*dx,rad],["S",slen]]
return False, []
def __connectable_bend(port1: "DevicePort",port2: "DevicePort",
rad: float = 3):
"""
This function calculates if two ports can be connected with a single bend
It calculates the projected intersection of two straight paths and returns
a sequence that connects the ports. It might sometimes fail if ports are
too close
Parameters
----------
port1 : "DevicePort"
Start port for the connection.
port2 : "DevicePort"
End port for the connection.
rad : float, optional
The maximum bend radius in um. The default is 3.
Returns
-------
bool
True if connection succeded, False otherwise.
list
A sequence to perform the connection.
"""
dx1 = port1.dx()
dx2 = port2.dx()
dy1 = port1.dy()
dy2 = port2.dy()
det = -dx1*dy2+dx2*dy1
if(det == 0):
return False, []
dx = port2.x0-port1.x0
dy = port2.y0-port1.y0
t = (-(dx)*dy2+dy*dx2)/det
s = (-(dx)*dy1+dy*dx1)/det
if(t>0 and s>0):
xstp = (t-rad)*port1.dx()
ystp = (t-rad)*port1.dy()
s1 = math.sqrt(xstp*xstp+ystp*ystp)
#xstp = (s-rad)*port2.dx()
#ystp = (s-rad)*port2.dy()
#s2 = math.sqrt(xstp*xstp+ystp*ystp)
p1 = deepcopy(port1)
p1.S(s1)
if(det>0):
p1.BL(rad)
else:
p1.BR(rad)
res=__connectable_facing(p1, port2,rad)
seq = [['S',s1],['B',det*90,rad]]+res[1]
return True, seq
else:
return False, []
def __connect_step(port1: "DevicePort",port2: "DevicePort",
rad: float = 3):
"""
Performs a single connection step, attempts at getting port1 closer to
port2 by bending left or right or going straight. This connector works
well for optical waveguides
Parameters
----------
port1 : "DevicePort"
Start port for the connection.
port2 : "DevicePort"
End port for the connection.
rad : float, optional
The maximum bend radius in um. The default is 3.
Returns
-------
bool
True if connection succeded, False otherwise.
list
A sequence to perform the connection.
"""
seq = []
if(port1.dx() !=0):
if(abs(port2.y0-port1.y0)<2*rad): # It's better to bend if too close
SLen=-1
else:
SLen = port1.dx()*(port2.x0+port2.dx()*rad-port1.x0)-rad
#print("slen in x",SLen)
if(port2.dx()==0):
if(abs(port2.x0-port1.x0)<4*rad):
SLen+=2*rad
else:
SLen-=2*rad
else:
if(abs(port2.x0-port1.x0)<2*rad): # It's better to bend if too close
SLen=-1
else:
SLen = port1.dy()*(port2.y0+port2.dy()*rad-port1.y0)-rad
#print("slen in y",SLen)
if(port2.dy()==0):
if(abs(port2.y0-port1.y0)<4*rad):
SLen+=2*rad
else:
SLen-=2*rad
if(SLen>0):
#print("Guessing I should move S by ", SLen)
port1.S(SLen)
seq = [["S",SLen]]
# Now see if we get closer by going left or right
p1 = deepcopy(port1)
p1.fix()
p1.BL(rad)
dL = p1.dist(port2)
res = __connectable_bend(p1,port2,rad)
if(res[0]):
seq += [["B",90,rad]]+res[1]
return True,seq
p1.reset()
p1.BR(rad)
dR = p1.dist(port2)
res = __connectable_bend(p1,port2,rad)
if(res[0]):
seq += [["B",-90,rad]]+res[1]
return True,seq
#print("L distance is ", dL)
#print("R distance is ", dR)
# Should I go left or right?
if(dL<dR):
port1.BL(rad)
port1.fix()
return False,(seq+[["B",90,rad]])
else:
port1.BR(rad)
port1.fix()
return False,(seq+[["B",-90,rad]])
def WaveguideConnect(port1: "DevicePort",port2: "DevicePort",
rad: float = 3):
"""
Simple waveguide connector for two ports. Given a start port and an
end port, the function attempts to connect the ports using
a sequence of straight lines (sequencer command S), 90 degrees bends
(sequencer command B) and cosine bends (sequencer command C).
The bending radius is also given. If the ports are too close
to be connected via Manhattan-style connectors the function returns
False.
The sequence can be used in combination with any
`samplemaker.sequencer.Sequencer` class that implements the commands
S, C, and B.
Parameters
----------
port1 : "DevicePort"
Start port for the connection.
port2 : "DevicePort"
End port for the connection.
rad : float, optional
The maximum bend radius in um. The default is 3.
Returns
-------
bool
True if connection succeded, False otherwise.
list
A sequence that realizes the connection.
"""
# Trivial cases first
res = __connectable_facing(port1, port2,rad)
if(res[0]):
#print("connectable facing")
return True,res[1]
res = __connectable_bend(port1,port2,rad);
if(res[0]):
#print("connectable")
return True,res[1]
else:
p1 = deepcopy(port1)
seq = []
for i in range(4):
res = __connect_step(p1, port2,rad)
seq += res[1]
if(res[0]): break
if(i<4):
return True,seq
return False,[]
def ElbowRouter(port1: "DevicePort",port2: "DevicePort", offset: float = 5):
"""
Simple elbow connector based on Bezier curve, typically used for electrical interconnects.
Does not check collisions.
The offset parameter controls how far should the connector go straight out
of the ports before attempting a connection (using cubic Bezier).
Parameters
----------
port1 : "DevicePort"
Start port for the connection.
port2 : "DevicePort"
End port for the connection.
offset : float, optional
How far should the connector stick away from ports. The default is 5.
Returns
-------
xpts : list
X coordinates of the connector path.
ypts : list
Y coordinates of the connector path.
"""
x0 = port1.x0;
y0 = port1.y0;
r0 = port1.angle();
# Rotate all in the reference of port1
p2dot = sm.make_dot(port2.x0, port2.y0)
p2dot.rotate(x0, y0, -math.degrees(r0))
x1 = p2dot.x-x0;
y1 = p2dot.y-y0;
if(abs(y1) < 0.005):
xpts = [0,x1];
ypts = [0,y1];
else:
aout = port2.angle()-r0%(2*math.pi);
# offset
xs = offset;
xs1 = xs+3*offset;
xe = x1+offset*math.cos(aout);
ye = y1+offset*math.sin(aout);
xe1 = xe+3*offset*math.cos(aout);
ye1 = ye+3*offset*math.sin(aout);
t = np.array([0,0.25,0.5,0.75,1]);
xpts = np.power(1-t,3)*xs+3*np.power(1-t,2)*t*xs1+3*(1-t)*np.power(t,2)*xe1+np.power(t,3)*xe
ypts = 3*(1-t)*np.power(t,2)*ye1+np.power(t,3)*ye;
xpts = np.append([0],xpts);
xpts = np.append(xpts,[x1])
ypts = | np.append([0],ypts) | numpy.append |
import copy
from PIL import Image
from PIL import ImageEnhance
from torch.utils.data import DataLoader, Dataset
import torch
import numpy as np
import glob
import torchvision
import matplotlib.pyplot as plt
import random
import cv2
from torchvision import transforms
np.seterr(divide='ignore', invalid='ignore')
def takeSecond(elem):
return elem[0]**2+elem[1]**2
class heatmap_dataset(Dataset):
def __init__(self, ds_dir, sigma, setname='train', transform=None, norm_factor=256, rgb2gray=False, resize=True):
self.ds_dir = ds_dir
self.setname = setname
self.transform = transform
self.norm_factor = norm_factor
self.rgb2gray = rgb2gray
self.__sigma = sigma
self.resize = resize
self.c = 0
self.s = 0
self.r = 0
if setname == 'train':
data = []
gt = []
train_list = '/media/home_bak/ziqi/park/Hourglass_twopoint/dataset/train.txt'
f = open(train_list)
for line in f:
line_data = line.strip('\n')
line_gt = line_data.replace(
'perspective_img', 'point').replace('.jpg', '_OA.txt')
data.append(line_data)
gt.append(line_gt)
self.data = data
self.gt = gt
if setname == 'val':
data = []
gt = []
test_list = '/media/home_bak/ziqi/park/Hourglass_twopoint/dataset/val.txt'
f = open(test_list)
for line in f:
line_data = line.strip('\n')
line_gt = line_data.replace(
'perspective_img', 'point').replace('.jpg', '_OA.txt')
data.append(line_data)
gt.append(line_gt)
self.data = data
self.gt = gt
def __len__(self):
return len(self.data)
def get_affine_transform(self, center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
scale_tmp = scale * 200
# print('scale_tmp',scale_tmp)
# print("scale_tmp: {}".format(scale_tmp))
# print("output_size: {}".format(output_size)) # W H
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = self.get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
# print("src_dir: {}".format(src_dir))
# print("dst_dir: {}".format(dst_dir))
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
# print("center: {}".format(center))
src[0, :] = center + scale_tmp * shift
# print("src[0, :]: {}".format(src[0, :]))
# print("src_dir: {}".format(src_dir))
src[1, :] = center + src_dir + scale_tmp * shift
# print("src[1, :]: {}".format(src[1, :]))
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = self.get_3rd_point(src[0, :], src[1, :])
# print("src[2:, :]: {}".format(src[2:, :]))
dst[2:, :] = self.get_3rd_point(dst[0, :], dst[1, :])
# print('src', src,dst)
# print("src:\n{}".format(src))
# print("dst:\n{}".format(dst))
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
# exit(1)
return trans
def get_dir(self, src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(self, a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def _box2cs(self, size, aspect_ratio=None, scale_factor=None):
x, y, w, h = 0, 0, size[0], size[1]
return self._xywh2cs(x, y, w, h,
aspect_ratio,
scale_factor)
def _xywh2cs(self, x, y, w, h, aspect_ratio, scale_factor):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / 200, h * 1.0 / 200],
dtype=np.float32)
return center, scale
def __getitem__(self, item):
if item < 0 or item >= self.__len__():
return None
# Read images
# data = Image.open(str(self.data[item]))
data = cv2.imread(str(self.data[item]))
imgPath = str(self.data[item])
gt = [[0, 0], [0, 0]]
# gt = np.loadtxt(str(self.data[item]))
with open(str(self.gt[item]), "r") as f:
lines = f.readlines() # 把全部数据文件读到一个列表lines中
# 表示矩阵的行,从0行开始
row = 0
# 把lines中的数据逐行读取出来
for line in lines:
# 处理逐行数据:strip表示把头尾的'\n'去掉,split表示以空格来分割行数据,然后把处理后的行数据返回到list列表中
list = line.strip('\n').split(' ')
gt[row][0] = float(list[0])
gt[row][1] = float(list[1])
# print("point:", list[0], list[1])
# 然后方阵A的下一行接着读
row = row + 1
if row == 2:
break
# gt.sort(key=takeSecond)
# print("file", imgPath)
H, W = 384, 384
# print(type(data))
# 数据增强
# data = self.randomBlur(data)
data = self.RandomBrightness(data)
data = self.RandomHue(data)
data = self.RandomSaturation(data)
# data = self.randomColor(data)
data = self.randomGaussian(data, mean=0.2, sigma=0.3)
data = 255 * np.array(data).astype('uint8')
data = cv2.cvtColor(np.array(data), cv2.COLOR_RGB2BGR) # PIL转cv2
if self.rgb2gray:
t = torchvision.transforms.Grayscale(1)
data = t(data)
# Convert to numpy
data = np.array(data, dtype=np.float32) / self.norm_factor
# gt = np.array(gt, dtype=np.float32) / 384
gt = np.array(gt, dtype=np.float32)
size = [384, 384]
mask = np.zeros((size[0], size[1]), dtype=np.float)
heatmaps = self._putGaussianMaps(gt, H, W, 1, self.__sigma)
heatmaps = heatmaps.astype(np.float32)
# print(heatmaps)
c, s = self._box2cs(size, aspect_ratio=1)
r = 0
# print(r)
trans = self.get_affine_transform(c, s, r, size)
# data = cv2.warpAffine(
# data, trans, (size[0], size[1]), flags=cv2.INTER_LINEAR)
mask = cv2.warpAffine(
mask, trans, (size[0], size[1]), flags=cv2.INTER_LINEAR, borderValue=255)
# Expand dims into Pytorch format
data = np.transpose(data, (2, 0, 1))
# Convert to Pytorch Tensors
data = torch.tensor(data, dtype=torch.float)
gt = torch.tensor(gt, dtype=torch.float32)
# print("gt,imgPath:", gt, imgPath)
mask = torch.tensor(mask, dtype=torch.float)
return data, gt, mask, item, imgPath, heatmaps
def randomColor(image):
"""
对图像进行颜色抖动
:param image: PIL的图像image
:return: 有颜色色差的图像image
"""
random_factor = np.random.randint(0, 31) / 10. # 随机因子
color_image = ImageEnhance.Color(
image).enhance(random_factor) # 调整图像的饱和度
random_factor = np.random.randint(10, 21) / 10. # 随机因子
brightness_image = ImageEnhance.Brightness(
color_image).enhance(random_factor) # 调整图像的亮度
random_factor = np.random.randint(10, 21) / 10. # 随机因子
contrast_image = ImageEnhance.Contrast(
brightness_image).enhance(random_factor) # 调整图像对比度
random_factor = np.random.randint(0, 31) / 10. # 随机因子
# 调整图像锐度
return ImageEnhance.Sharpness(contrast_image).enhance(random_factor)
def randomGaussian(self, image, mean, sigma):
"""
对图像进行高斯噪声处理
:param image:
:return:
"""
def gaussianNoisy(im, mean, sigma):
"""
对图像做高斯噪音处理
:param im: 单通道图像
:param mean: 偏移量
:param sigma: 标准差
:return:
"""
for _i in range(len(im)):
im[_i] += random.gauss(mean, sigma)
return im
# 将图像转化成数组
img = np.asarray(image)
img.flags.writeable = True # 将数组改为读写模式
width, height = img.shape[:2]
img_r = gaussianNoisy(img[:, :, 0].flatten(), mean, sigma)
img_g = gaussianNoisy(img[:, :, 1].flatten(), mean, sigma)
img_b = gaussianNoisy(img[:, :, 2].flatten(), mean, sigma)
img[:, :, 0] = img_r.reshape([width, height])
img[:, :, 1] = img_g.reshape([width, height])
img[:, :, 2] = img_b.reshape([width, height])
return Image.fromarray(np.uint8(img))
def RandomBrightness(self, bgr):
if random.random() < 0.5:
hsv = self.BGR2HSV(bgr)
h, s, v = cv2.split(hsv)
adjust = random.choice([0.5, 1.5])
v = v*adjust
v = np.clip(v, 0, 255).astype(hsv.dtype)
hsv = cv2.merge((h, s, v))
bgr = self.HSV2BGR(hsv)
return bgr
def RandomSaturation(self, bgr):
if random.random() < 0.5:
hsv = self.BGR2HSV(bgr)
h, s, v = cv2.split(hsv)
adjust = random.choice([0.5, 1.5])
s = s*adjust
s = | np.clip(s, 0, 255) | numpy.clip |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_arrays
from ..utils import atleast2d_or_csc
from ..utils import array2d
from ..utils import atleast2d_or_csr
from ..utils import safe_asarray
from ..utils import warn_if_not_float
from ..utils.sparsefuncs import inplace_csr_row_normalize_l1
from ..utils.sparsefuncs import inplace_csr_row_normalize_l2
from ..utils.sparsefuncs import inplace_csr_column_scale
from ..utils.sparsefuncs import mean_variance_axis0
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'Scaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis0(X)
var[var == 0.0] = 1.0
inplace_csr_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
`min_` : ndarray, shape (n_features,)
Per feature adjustment for minimum.
`scale_` : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
data_range[data_range == 0.0] = 1.0
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
`mean_` : array of floats with shape [n_features]
The mean value for each feature in the training set.
`std_` : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_arrays(X, copy=self.copy, sparse_format="csr")[0]
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis0(X)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
X = check_arrays(X, copy=copy, sparse_format="csr")[0]
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_csr_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_csr_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class Scaler(StandardScaler):
def __init__(self, copy=True, with_mean=True, with_std=True):
warnings.warn("Scaler was renamed to StandardScaler. The old name "
" will be removed in 0.15.", DeprecationWarning)
super(Scaler, self).__init__(copy, with_mean, with_std)
def normalize(X, norm='l2', axis=1, copy=True):
"""Normalize a dataset along any axis
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)[:, np.newaxis]
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis]
norms[norms == 0.0] = 1.0
X /= norms
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
atleast2d_or_csr(X)
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
sparse_format = "csr" # We force sparse format to be either csr or csc.
if hasattr(X, "format"):
if X.format in ["csr", "csc"]:
sparse_format = X.format
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = array2d(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
K = array2d(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = safe_asarray(X)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = atleast2d_or_csc(X, copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix were each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
Attributes
----------
`active_features_` : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
`feature_indices_` : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
`n_values_` : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
n_values='auto')
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_arrays(X, sparse_format='dense', dtype=np.int)[0]
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = | np.cumsum(n_values) | numpy.cumsum |
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import spdiags
from scipy.sparse import linalg as lg
# Quadratic function
class Quad(object):
def __init__(self, Q, p):
self.Q = Q
self.p = p
def func(self, x):
r_0 = self.p - np.dot(self.Q, x)
return r_0
# Solver
class MINRES(object):
def __init__(self, A, b, x_0, TOL, MAXIT):
self.func = Quad(A, b)
self.TOL = TOL
self.MAXIT = MAXIT
self.Q = self.func.Q
self.x = x_0
self.r_vec = [self.func.func(self.x)]
self.p_vec = [self.r_vec[-1]]
self.Qr_vec = [np.dot(self.Q, self.r_vec[-1])]
self.res_vec = [np.linalg.norm(self.r_vec[-1])]
self.Qp = np.dot(self.Q, self.p_vec[-1])
self.k = 1
def calc(self, ):
while self.res_vec[-1] > self.TOL and self.k < self.MAXIT:
alpha = np.divide(np.dot(self.r_vec[-1].T, self.Qr_vec[-1]), np.dot(self.Qp.T, self.Qp))
self.x += alpha * self.p_vec[-1]
self.r_vec.append(self.r_vec[-1] - (alpha * self.Qp))
self.Qr_vec.append(np.dot(self.Q, self.r_vec[-1]))
self.res_vec.append(np.linalg.norm(self.r_vec[-1]))
beta = np.divide(np.dot(self.r_vec[-1].T, self.Qr_vec[-1]), | np.dot(self.r_vec[-2].T, self.Qr_vec[-2]) | numpy.dot |
import numpy as np
import tensorflow as tf
from tensorflow.contrib.keras import backend as K
import os
import matplotlib.pyplot as plt
from tensorflow.python.keras.models import Sequential, load_model
from tensorflow.python.keras.layers import Dense, Activation, Dropout, Flatten, BatchNormalization, Bidirectional, LSTM
from sklearn.metrics import confusion_matrix, classification_report
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
from collections import Counter
import models.gpuutils as gpuutils
from models.cpuutils import *
import time
from sklearn.utils import class_weight
def deep_rnnblocks(inputdim, inputshape):
if inputdim < 2:
return (Bidirectional(LSTM(10, return_sequences=True), input_shape=inputshape, name='input'), Bidirectional(LSTM(20, return_sequences=False)))
elif inputdim < 4:
return (Bidirectional(LSTM(15, return_sequences=True), input_shape=inputshape, name='input'), Bidirectional(LSTM(30, return_sequences=False)))
elif inputdim < 6:
return (Bidirectional(LSTM(20, return_sequences=True), input_shape=inputshape, name='input'), Bidirectional(LSTM(40, return_sequences=False)))
else:
return (Bidirectional(LSTM(30, return_sequences=True), input_shape=inputshape, name='input'), Bidirectional(LSTM(60, return_sequences=False)))
def deep_units(timesteps, inputdim):
if inputdim < 2:
return (64, 32)
elif inputdim < 4:
return (128, 32)
elif inputdim < 6:
return (256, 32)
else:
return (256, 64)
def build_model(timesteps, inputdim, num_classes):
model = Sequential()
rnns = deep_rnnblocks(inputdim, (timesteps, inputdim))
units = deep_units(timesteps, inputdim)
for layer in rnns:
model.add(layer)
model.add(Dropout(0.5))
for unit in units:
model.add(Dense(unit, activation=tf.nn.relu))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
def train_model(timesteps, inputdim, train_dataprovider, train_steps_per_epoch, valid_x, valid_y, test_x, test_y, time_steps, num_classes, modelfile, batchsize, maxepochs, patience):
model = build_model(timesteps, inputdim, num_classes)
checkpointer = gpuutils.AutoCheckStopping(filepath=modelfile, verbose=1, save_best_only = True, patience = patience)
try:
model.fit_generator(train_dataprovider, steps_per_epoch=train_steps_per_epoch, epochs=maxepochs, validation_data =[valid_x, valid_y], callbacks= [checkpointer])
bestmodel = build_model(timesteps, inputdim, num_classes)
bestmodel.load_weights(modelfile)
predict = bestmodel.predict(test_x, verbose=1, batch_size= batchsize)
except KeyboardInterrupt:
pass
return predict, test_y, checkpointer.stopped_epoch
def run_LSTM(timesteps, numclasses, train_x, train_y, train_lens, valid_x, valid_y, valid_lens, test_x, test_y, test_lens, modeldir, batchsize, maxepochs, patience, expsnum, **kwargs):
K.set_session(gpuutils.get_session())
if len(np.shape(train_x))<3:
train_x = np.expand_dims(train_x, 2)
if len(np.shape(valid_x))<3:
valid_x = np.expand_dims(valid_x, 2)
if len(np.shape(test_x))<3:
test_x = np.expand_dims(test_x, 2)
valid_y = encode_target(valid_y, numclasses)
test_y = encode_target(test_y, numclasses)
preds = []
tests = []
epochs = []
print('start...')
unique_y = list(range(numclasses))
if kwargs.get('balancedgenerator')==True:
train_dataprovider = gpuutils.SourceGenerator(gpuutils.BalancedGenerator(train_x, train_y, unique_y, int(batchsize/numclasses), True))
else:
train_dataprovider = gpuutils.SourceGenerator(gpuutils.RandomGenerator(train_x, train_y, unique_y, batchsize, True))
timesteps = | np.shape(train_x) | numpy.shape |
#!/usr/bin/env python
# Copyright 2017-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from WORC.processing.ExtractNLargestBlobsn import ExtractNLargestBlobsn
import SimpleITK as sitk
from skimage import morphology
import scipy.ndimage as nd
import numpy as np
import WORC.IOparser.config_segmentix as config_io
from WORC.processing import helpers as h
import pydicom
import WORC.addexceptions as ae
def get_ring(contour, radius=5):
"""Get a ring on the boundary of the contour.
PARAMETERS
----------
contour: numpy array
Array containing the contour
radius: int, default 5
Radius of ring to be extracted.
"""
contour = contour.astype(bool)
radius = int(radius)
disk = morphology.disk(radius)
# Dilation with radius in axial direction
for ind in range(contour.shape[0]):
contour_d = morphology.binary_dilation(contour[ind, :, :], disk)
contour_e = morphology.binary_erosion(contour[ind, :, :], disk)
contour[ind, :, :] = np.bitwise_xor(contour_d, contour_e)
return contour
def dilate_contour(contour, radius=5):
"""Dilate the contour.
PARAMETERS
----------
contour: numpy array
Array containing the contour
radius: int, default 5
Radius of ring to be extracted.
"""
contour = contour.astype(bool)
radius = int(radius)
disk = morphology.disk(radius)
# Dilation with radius in axial direction
for ind in range(contour.shape[0]):
contour[ind, :, :] = morphology.binary_dilation(contour[ind, :, :],
disk)
return contour
def mask_contour(contour, mask, method='multiply'):
"""Apply a mask to a contour.
PARAMETERS
----------
contour: numpy array
Array containing the contour
mask: string
path referring to the mask used for the final segmentation.
Should be a format compatible with ITK, e.g. .nii, .nii.gz, .mhd,
.raw, .tiff, .nrrd.
method: string
How masking is applied: can be subtract or pairwise
"""
mask = sitk.ReadImage(mask)
mask = sitk.GetArrayFromImage(mask)
mask = nd.binary_fill_holes(mask)
mask = mask.astype(bool)
if method == 'subtract':
contour = np.bitwise_xor(contour, mask)
elif method == "multiply":
contour = np.multiply(contour, mask)
return contour
def segmentix(parameters, image=None, segmentation=None,
output=None, metadata_file=None, mask=None):
"""Alter a segmentation.
Segmentix is a mixture of processing methods that can be applied to
agument a segmentation. Examples include selecting only the largest blob
and the application of morphological operations.
Parameters
----------
parameters: string, mandatory
Contains the path referring to a .ini file in which the
parameters to be used are specified. See the Github Wiki
for more details on the format and content.
image: string, optional
Note implemented yet! Image to be used for automatic segmentation.
segmentation: string, currently mandatory
path referring to the input segmentation file. Should be a
format compatible with ITK, e.g. .nii, .nii.gz, .mhd,
.raw, .tiff, .nrrd.
output: string, mandatory
path referring to the output segmentation file. Should be a
format compatible with ITK, e.g. .nii, .nii.gz, .mhd,
.raw, .tiff, .nrrd.
metadata_file: string, optional
Note implemented yet! Path referring to the .dcm from which
fields can be used as metadata for segmentation.
mask: string, optional
path referring to the mask used for the final segmentation.
Should be a format compatible with ITK, e.g. .nii, .nii.gz, .mhd,
.raw, .tiff, .nrrd.
"""
# Load variables from the confilg file
config = config_io.load_config(parameters)
# Load segmentation and perform routines
if type(segmentation) is list:
segmentation = ''.join(segmentation)
if type(image) is list:
image = ''.join(image)
# Load the image onvert to array
contour_original = sitk.ReadImage(segmentation)
contour = sitk.GetArrayFromImage(contour_original)
if config['Segmentix']['fillholes']:
print('[Segmentix] Filling holes.')
contour = nd.binary_fill_holes(contour)
if config['Segmentix']['remove_small_objects']:
print('[Segmentix] Removing small objects.')
min_size = config['Segmentix']['min_object_size']
contour = morphology.remove_small_objects(contour, min_size=min_size,
connectivity=2,
in_place=False)
if config['Segmentix']['N_blobs'] != 0:
print('[Segmentix] Extracting largest blob.')
contour = contour.astype(bool)
contour = ExtractNLargestBlobsn(contour, 1)
# Expand contour depending on settings
# TODO: this is a workaround for 3-D morphology
if config['Segmentix']['type'] == 'Ring':
print('[Segmentix] Converting contour to ring around existing contour.')
radius = config['Segmentix']['radius']
contour = get_ring(contour, radius)
elif config['Segmentix']['type'] == 'Dilate':
print('[Segmentix] Dilating contour.')
radius = config['Segmentix']['radius']
contour = dilate_contour(contour, radius)
# Mask the segmentation if necessary
if mask is not None:
method = config['Segmentix']['mask']
print('[Segmentix] Masking contour.')
if type(mask) is list:
mask = ''.join(mask)
new_contour = mask_contour(contour, mask, method)
if | np.sum(new_contour) | numpy.sum |
"""
Define Grid Persistence Landscape class.
"""
import numpy as np
import itertools
from .landscape_auxiliary import pairs_snap, union_vals, ndsnap_regular
from operator import itemgetter, attrgetter
from .PersLandscape import PersLandscape
class PersLandscapeGrid(PersLandscape):
"""
Persistence Landscape Grid class.
This class implements an approximate version of Persistence Landscape,
given by sampling the landscape functions on a grid. This version is only an
approximation to the true landscape, but given a fine enough grid, this should
suffice for most applications. If an exact
calculation with no approximation is desired, consider `PersLandscapeExact`.
The default parameters for start and stop favor dgms over values. That
is, if both dgms and values are passed but start and stop are not, the
start and stop values will be determined by dgms.
Parameters
----------
start : float, optional
The start parameter of the approximating grid.
stop : float, optional
The stop parameter of the approximating grid.
num_dims : int, optional
The number of dimensions of the approximation, equivalently the
number of steps in the grid.
dgms : list[list]
A list lists of birth-death pairs for each homological degree.
homological_degree : int
represents the homology degree of the persistence diagram.
vales
Methods
-------
Examples
--------
"""
def __init__(
self, start: float = None, stop: float = None, num_dims: int = 500,
dgms: list = [], homological_degree: int = 0,
values=np.array([]), compute: bool = False) -> None:
super().__init__(dgms=dgms, homological_degree=homological_degree)
if dgms: # diagrams are passed
self.dgms = dgms[self.homological_degree]
# remove infity values
# ~: indexes everything but values satisfying the condition
# axis = 1: checks the condition for each row
# np.any: if any element in the row satisfies the condition
# it gets indexed
self.dgms = self.dgms[~np.any(self.dgms == np.inf, axis=1)]
# calculate start and stop
if start is None:
start = min(self.dgms, key=itemgetter(0))[0]
if stop is None:
stop = max(self.dgms, key=itemgetter(1))[1]
elif values.size > 0: # values passed, diagrams weren't
self.dgms = dgms
if start is None:
raise ValueError(
'start parameter must be passed if values are passed.')
if stop is None:
raise ValueError(
'stop parameter must be passed if values are passed.')
# stop = np.amax(values)
self.start = start
self.stop = stop
self.values = values
self.num_dims = num_dims
if compute:
self.compute_landscape()
def __repr__(self) -> str:
return ('The persistence landscapes of diagrams in homological '
f'degree {self.homological_degree} on grid from {self.start} to {self.stop}'
f' with step size {self.num_dims}')
def compute_landscape(self, verbose: bool = False) -> list:
verboseprint = print if verbose else lambda *a, **k: None
if self.values.size:
verboseprint('values was stored, exiting')
return
verboseprint('values was empty, computing values')
# make grid
grid_values, step = np.linspace(self.start, self.stop, self.num_dims,
retstep=True)
#grid_values = list(grid_values)
#grid = np.array([[x,y] for x in grid_values for y in grid_values])
bd_pairs = self.dgms
# create list of triangle top for each birth death pair
birth: 'np.ndarray' = bd_pairs[:, 0]
death: 'np.ndarray' = bd_pairs[:, 1]
triangle_top_ycoord = (death - birth) / 2
triangle_top = np.array(
list(zip((birth + death) / 2, (death - birth) / 2)))
# snap birth-death pairs and triangle tops to grid
#bd_pairs_grid = pairs_snap(bd_pairs, grid)
bd_pairs_grid = ndsnap_regular(bd_pairs, *(grid_values, grid_values))
#triangle_top_grid = pairs_snap(triangle_top, grid)
triangle_top_grid = ndsnap_regular(
triangle_top, *(grid_values, grid_values))
# make grid dictionary
index = list(range(self.num_dims))
dict_grid = dict(zip(grid_values, index))
# initialze W to a list of 2m + 1 empty lists
W = [[] for _ in range(self.num_dims)]
# for each birth death pair
for ind_in_bd_pairs, bd in enumerate(bd_pairs_grid):
[b, d] = bd
ind_in_Wb = dict_grid[b] # index in W
ind_in_Wd = dict_grid[d] # index in W
# step through by x value
j = 0
# j in (b, b+d/2]
for _ in np.arange(
triangle_top_grid[ind_in_bd_pairs, 0], b, -step):
j += 1
# j*step: adding points from a line with slope 1
W[ind_in_Wb + j].append(j * step)
j = 0
# j in (b+d/2, d)
for _ in np.arange(
triangle_top_grid[ind_in_bd_pairs, 0] + step, d, step):
j += 1
W[ind_in_Wd - j].append(j * step)
# sort each list in W
for i in range(len(W)):
W[i] = sorted(W[i], reverse=True)
# calculate k: max length of lists in W
K = max([len(_) for _ in W])
# initialize L to be a zeros matrix of size K x (2m+1)
L = np.array([ | np.zeros(self.num_dims) | numpy.zeros |
from __future__ import division, print_function
import os
import fnmatch
import numpy as np
import pandas as pd
import re
def hysplit_filelister(signature):
"""
List all HYSPLIT files matching a given signature.
Parameters
----------
signature : string
Signature shared by group of HYSPLIT simulation files from a single or
multiple model runs (if multiple, must contain same output variables).
This is a Bash-style signature, not a real expression. The '*' char is
a wildcard. Can include an absolute or relative path, or no path.
Returns
-------
matching_files : list of strings
List of files matching ``signature``
Notes
-----
Any Bash-style signature is supported.
The file search is non-recursive.
"""
# Initialize
orig_dir = os.getcwd()
matching_files = []
try:
head, tail = os.path.split(signature)
os.chdir(head)
# os.walk obtains list in top-down manner (files in order)
_, _, files = next(os.walk('.'))
for each_file in files:
if fnmatch.fnmatch(each_file, tail):
matching_files.append(each_file)
finally:
os.chdir(orig_dir)
if len(matching_files) == 0:
raise LookupError("Could not find any files matching the provided "
"signature `{0}`, please check your paths and "
"try again.".format(signature))
return matching_files
def load_hysplitfile(filename):
"""
Load data from each trajectory into a ``NumPy ndarray``.
Parameters
----------
filename : string
The name of a trajectory file
Returns
-------
hydata : (M, N) ndarray of floats or list of ndarrays
Ndarray with M time steps and N variables representing one trajectory.
If there are multiple trajectories in a file, ``multiple_traj`` will
be ``True`` and ``hydata`` will be a list of ndarrays, potentially of
different sizes.
pathdata : (M, 3) ndarray of floats or list of ndarrays
The path information in lon, lat, z. If there are multiple
trajectories in a file, ``multiple_traj`` will
be ``True`` and ``pathdata`` will be a list of ndarrays.
header : list of N strings
The column headers for ``hydata`` arrays. Used to parse ``hydata``
into different trajectory attributes
datetime : DateTime index of length M
multiple_traj : Boolean
"""
# Every header- first part
header = ['Parcel Number',
'Timestep']
with open(filename, 'r') as hyfile:
contents = hyfile.readlines()
skip = False
atdata = False
# Entire contents because otherwise it misses last line
for ind, line in enumerate(contents):
if skip:
skip = False
continue
# This happens third and goes until end
if atdata:
data = [float(x) for x in line.split()]
if multiline:
data.extend([float(x) for x in contents[ind + 1].split()])
skip = True
# year, month, day, hour, minute
timedata[arr_ind, :] = data[2:7]
# parcel, timestep, along-traj data
hydata[arr_ind, :] = [data[0]] + [data[8]] + data[12:]
# lats, lons, altitude
pathdata[arr_ind, :] = data[9:12]
arr_ind += 1
continue
# OMEGA happens first
if 'OMEGA' in line:
num_parcels = int(line.split()[0])
multiple_traj = False
if num_parcels > 1:
multiple_traj = True
# Number of data rows = length of contents minus the number of
# lines before OMEGA, @OMEGA, between OMEGA and first time pt
flen = len(contents) - (2 + num_parcels) - ind
continue
# PRESSURE happens second
if 'PRESSURE' in line:
new_header = line.split()[1:]
columns = 12 + len(new_header)
header.extend(new_header)
multiline = False
# Pre-Jan 2017 files may have timepoints running onto second
# lines, which are always short
if len(contents[ind + 1]) > len(contents[ind + 2]):
multiline = True
# Data file is only half as many lines as it looks
flen /= 2
# Initialize empty data arrays
hydata = np.empty((int(flen), columns - 10))
pathdata = np.empty((int(flen), 3))
timedata = np.empty((int(flen), 5))
atdata = True
arr_ind = 0
continue
# Catch the vast majority of non-HYSPLIT files if passed
# Works because the above conditionals fall through; vars never defined
if 'multiline' not in locals() or 'flen' not in locals():
raise IOError("The file, `{0}`, does not appear to be "
"a valid HYSPLIT file. Please double check "
"your paths.".format(filename))
# Determine what century files are from
# Requires a length 10 run of digits in filename
# If unable to determine, defaults to 2000
century = _getcentury(filename)
# Get pathdata in x, y, z from lats (y), lons (x), z
pathdata = pathdata[:, np.array([1, 0, 2])]
# Split hydata into individual trajectories (in case there are multiple)
if multiple_traj:
hydata, pathdata, datetime = _trajsplit(hydata, pathdata, timedata,
century)
else:
datetime = _getdatetime(century, timedata)
return hydata, pathdata, header, datetime, multiple_traj
def _trajsplit(hydata, pathdata, timedata, century):
"""
Split arrays into lists of arrays by unique trajectory.
Parameters
----------
hydata : (L, N) ndarray of floats
Array with L rows and N variables, introspected from a hysplit
data file.
pathdata : (L, 3) ndarray of floats
Array with L rows and x, y z (lons, lats, altitude) columns
timedata : (L, 5) ndarray of floats
Array with L rows and year, month, day, hour, and minute
columns
century : int
The century at time 0 of the trajectories
Returns
-------
split_hydata : list of (?, N) ndarrays of floats
``hydata`` split into individual trajectories
split_pathdata : list of (?, 3) ndarrays of floats
``pathdata split into individual trajectories
datetime : list of pandas DatetimeIndex
List of DatetimeIndex
"""
# Find number of unique trajectories within `hydata`
unique_traj = | np.unique(hydata[:, 0]) | numpy.unique |
import numpy as np
import tqdm
def backProjectFeatures(K, features, depths, poses):
assert len(features.shape) == 3
assert features.shape[1] == 1
assert features.shape[2] == 2
assert len(K.shape) == 2
assert K.shape[0] == 3
assert K.shape[1] == 3
assert len(depths.shape) == 1
assert depths.shape[0] == features.shape[0]
assert len(poses.shape) == 3
assert poses.shape[1] == 4
assert poses.shape[2] == 4
features_x_camera = depths * (features[:,0,0] - K[0, 2]) / K[0, 0]
features_y_camera = depths * (features[:,0,1] - K[1, 2]) / K[1, 1]
landmarks_local_hom = np.stack([features_x_camera, features_y_camera, depths, np.ones(len(depths))]).T.reshape(-1, 4, 1)
landmarks_global_hom = np.matmul(poses, landmarks_local_hom)
landmarks_global = landmarks_global_hom[:, :3,0] / landmarks_global_hom[:, 3:4,0]
return landmarks_global
def projectLandmarks(landmarks_global, pose, K):
assert len(landmarks_global.shape) == 2
assert landmarks_global.shape[1] == 3
assert len(pose.shape) == 2
assert pose.shape[0] == 4
assert pose.shape[1] == 4
assert len(K.shape) == 2
assert K.shape[0] == 3
assert K.shape[1] == 3
inv_pose = np.linalg.inv(pose)
ones = np.ones((landmarks_global.shape[0], 1))
landmarks_global_hom = np.concatenate([landmarks_global, ones], 1)
landmarks_global_hom = landmarks_global_hom.reshape(-1, 4, 1)
landmarks_local_hom = np.matmul(inv_pose, landmarks_global_hom)
landmarks_local_hom = landmarks_local_hom[:, :3, :] / landmarks_local_hom[:, 3:4, :]
features_hom = np.matmul(K, landmarks_local_hom)
features = features_hom[:, :2, 0] / features_hom[:, 2:3, 0]
features = features.reshape((-1, 1, 2))
return features
def getTrackData(path, delimiter=" ", filter_too_short=False):
data = np.genfromtxt(path, delimiter=delimiter)
valid_ids, data = filter_first_tracks(data, filter_too_short)
track_data = {i: data[data[:,0]==i, 1:] for i in valid_ids}
return track_data
def filter_first_tracks(tracks, filter_too_short=False):
tmin = tracks[0, 1]
valid_ids = np.unique(tracks[tracks[:, 1] == tmin, 0]).astype(int)
all_ids = np.unique(tracks[:, 0]).astype(int)
for id in all_ids:
if id not in valid_ids:
tracks = tracks[tracks[:, 0] != id]
else:
if filter_too_short:
num_samples = len(tracks[tracks[:,0]==id])
if num_samples < 3:
tracks = tracks[tracks[:, 0] != id]
valid_ids = valid_ids[valid_ids!=id]
return valid_ids, tracks
def getError(est_data, gt_data):
# discard gt which happen after last est_data
gt_data = gt_data[gt_data[:, 0] <= est_data[-1, 0]]
est_t, est_x, est_y = est_data.T
gt_t, gt_x, gt_y = gt_data.T
if np.abs(gt_t[0] - est_t[0]) < 1e-5:
gt_t[0] = est_t[0]
if len(est_t) < 2:
return gt_t, np.array([0]), np.array([0])
# find samples which have dt > threshold
error_x = np.interp(gt_t, est_t, est_x) - gt_x
error_y = np.interp(gt_t, est_t, est_y) - gt_y
return gt_t, error_x, error_y
def compareTracks(est_track_data, gt_track_data):
error_data = np.zeros(shape=(0, 4))
for track_id, est_track in tqdm.tqdm(est_track_data.items()):
gt_track = gt_track_data[track_id]
# interpolate own track at time points given in gt track
gt_t, e_x, e_y = getError(est_track, gt_track)
if len(gt_t) != 0:
ids = (track_id * np.ones_like(e_x)).astype(int)
added_data = np.stack([ids, gt_t, e_x, e_y]).T
error_data = np.concatenate([error_data, added_data])
# sort times
error_data = error_data[error_data[:, 1].argsort()]
return error_data
def q_to_R(q):
if len(q.shape) == 1:
q = q[None, :]
w, x, y, z = q.T
return np.dstack([w ** 2 + x ** 2 - y ** 2 - z ** 2, 2 * x * y - 2 * w * z, 2 * x * z + 2 * w * y, \
2 * x * y + 2 * w * z, w ** 2 - x ** 2 + y ** 2 - z ** 2, 2 * y * z - 2 * w * x, \
2 * x * z - 2 * w * y, 2 * y * z + 2 * w * x, w ** 2 - x ** 2 - y ** 2 + z ** 2]).reshape(len(w), 3, 3)
def get_left_right_dt(tq, ts, vs):
left_index = np.clip(np.searchsorted(ts, tq) - 1, 0, len(ts) - 1)
right_index = np.clip(left_index + 1, 0, len(ts) - 1)
dt = (tq - ts[left_index]) / (ts[right_index] - ts[left_index])
left_q = vs[left_index]
right_q = vs[right_index]
return left_q, right_q, dt
def slerp(tq, ts, qs):
left_q, right_q, dt = get_left_right_dt(tq, ts, qs)
# perform slerp
omega = np.arccos((left_q * right_q).sum(1))
omega[omega==0] = 1e-4
so = np.sin(omega)
q_interp = (np.sin((1.0 - dt) * omega) / so)[:,None] * left_q + ( | np.sin(dt * omega) | numpy.sin |
"""
File containing functions linked to the constraint matrix
"""
import numpy as np
from numba import jit
from tqdm import tqdm
from scipy.sparse import coo_matrix, find
def completion_constraint(constraint_matrix, force = False):
"""
Complete the constraints matrix by
forcing consistency and transitive closure
NB: Matrix will be dense
Arguments:
constraint_matrix {sparse array} -- Constrained on data points
+1 => Constraint the points to be in the same cluster
-1 => Constraint the points to be in separate clusters
Returns:
Completed constraint matrix {sparse array}
"""
constraint_matrix = constraint_matrix.todense()
assert np.array_equal(constraint_matrix.T, constraint_matrix)
# Transitive closure on positive constraint
# Adaptated Floyd–Warshall algorithm
positive = np.where(constraint_matrix > 0, constraint_matrix, | np.zeros_like(constraint_matrix) | numpy.zeros_like |
"""
the core part of this experiment is to first take differences between times in
order to obtain conceptual shifts and then cluster these shift vectors. This way
we can observe
- whether certain shifts belong to a shared topic
-
"""
import numpy as np
from pandas import DataFrame
import utils
import eval_utils
import os
import math
from typing import Dict, List, Tuple
import SpacePair
def output_pairdists(sp:SpacePair, PX:np.ndarray,
word_pairs:List[Tuple[str,str]]=None,
neighbors:int=10, use_csls:bool=False,
out_dir:str=None, partition_name:str=""):
"""
For all given word pairs (u,v), compute the cosine distance between
Px_u and y_v, and find the nearest neighbors of x_u in X and y_v in Y.
Writes results to a file if out_dir is specified.
Prefixes output files with partition_name if specified.
Uses word_pairs instead of SpacePair.T if specified.
:param sp: SpacePair object holding X and Y as well as the word pairs T
:param PX: X, projected onto Y (i.e. aligned)
:param neighbors: number of nearest neighbors to be reported
:param use_csls: rank nearest neighbors not by cosine, but by CSLS instead
:param out_dir: needs to end with "/"
:param partition_name: used to distinguish matching/mismatching word pairs
"""
if word_pairs is None:
word_pairs = sorted(sp.T, key=sp.T.get, reverse=True)
pairdists = {}
for u,v in word_pairs:
pairdists[(u,v)] = utils.pairwise_cos_dist(np.array([PX[sp.voc_x[u]]]),
np.array([sp.Y[sp.voc_y[v]]]))[0]
dist_ranked_pairs = sorted(pairdists, key=pairdists.get, reverse=True)
# just some printouts
top = 10
print(f"\nPair Distances (top {top} {partition_name}):\n"
f"{'dcos(Px_u,y_v)'} {'word_u':<12} {'word_v':<12}")
for u,v in dist_ranked_pairs[:top]:
print(f"{pairdists[(u,v)]:<13.5f} {u:<12} {v:<12}")
print(f"\nFinding the {neighbors} nearest neighbors for each u and v...")
U, V = zip(*word_pairs)
src_nbs = find_closest_concepts(sp.X[[sp.voc_x[u] for u in U]], sp.X, sp.voc_x, k=neighbors, csls=use_csls)
trg_nbs = find_closest_concepts(sp.Y[[sp.voc_y[v] for v in V]], sp.Y, sp.voc_y, k=neighbors, csls=use_csls)
if out_dir is not None:
if not os.path.isdir(out_dir): os.makedirs(out_dir)
filepath = out_dir + partition_name + "_pairdists.tsv"
print(f"writing pair distances to {filepath}...\n")
df = DataFrame({"distance":[pairdists[pair] for pair in dist_ranked_pairs],
"src_word": [pair[0] for pair in dist_ranked_pairs],
"trg_word": [pair[1] for pair in dist_ranked_pairs],
"src_neighbors":src_nbs,
"trg_neighbors":trg_nbs})
df.to_csv(filepath, sep='\t')
def reduce_bilingual_signal(pairs:List[Tuple[str,str]],
spacepair:SpacePair,
min_count:int=1,
spaces_mincount:int=0):
"""
Reduce the list of word pairs (and the embedding spaces) to those words which
occur at least min_count (and spaces_mincount) times in the underlying corpus.
This function uses an approximation method to check whether the frequencies in
the SpacePair objects have been log-flattened; it can thus only handle raw or
log-flattened frequencies. Returns the word pairs and, if altered, the SpacePair.
:param min_count: usual values: 5, 10, 20
:param spaces_mincount: usual values: 1, 3, 5, 10
:return: List[Tuple[str,str]], optionally: SpacePair
"""
# reconstruct whether frequencies have been flattened
flattening_applies = True
overlap = list(set([p[0] for p in pairs]).intersection(set(spacepair.freq_x.keys())))
for i in range(min(len(overlap),10)):
reconstructed_freq = math.e**spacepair.freq_x[overlap[i]]
if not np.isclose(reconstructed_freq, round(reconstructed_freq), atol=1e-5):
flattening_applies = False
if flattening_applies:
min_count = np.log(min_count)
src_words = [w for w,f in spacepair.freq_x.items() if f >= min_count]
trg_words = [w for w,f in spacepair.freq_y.items() if f >= min_count]
reduced_signal = [pair for pair in pairs if pair[0] in src_words and pair[1] in trg_words]
if spaces_mincount > 0:
if flattening_applies:
spaces_mincount = np.log(spaces_mincount)
src_words = [w for w, f in spacepair.freq_x.items() if f >= spaces_mincount]
trg_words = [w for w, f in spacepair.freq_y.items() if f >= spaces_mincount]
spacepair.X, spacepair.voc_x = utils.select_subspace(spacepair.X, spacepair.voc_x, src_words)
spacepair.Y, spacepair.voc_y = utils.select_subspace(spacepair.Y, spacepair.voc_y, trg_words)
return reduced_signal, spacepair
else:
return reduced_signal
def reorganize(labels:List[int], selected_D:np.ndarray,
ind_sD:Dict[int,str])->(List[np.ndarray], List[List[str]]):
"""
From the information about which embedding belongs to which cluster, create
one 2D-array of embeddings per cluster and a list of lists with the words
instead of the embeddings. Return both structures.
:param labels: Cluster labels (length: m)
:param selected_D: Embeddings (shape: (m,d))
:param ind_sD: mapping of embedding/label indices to words (size: m)
:return: List[np.ndarray]], List[List[str]]
"""
clusters = {}
cluster_words = {}
for i, label in enumerate(labels): # the index becomes the key
if label not in clusters:
clusters[label] = [selected_D[i]]
else:
clusters[label].append(selected_D[i])
if label not in cluster_words:
cluster_words[label] = [ind_sD[i]]
else:
cluster_words[label].append(ind_sD[i])
# sort by label to be better accessible
clusters = [np.array(clusters[i]) for i in sorted(clusters.keys())]
cluster_words = [cluster_words[i] for i in sorted(cluster_words.keys())]
return clusters, cluster_words
def normalize_shifts_by_frequency(D:np.ndarray, sorted_pairs:List[Tuple[str,str]],
freq1:Dict[str,int], freq2:Dict[str,int]) -> np.ndarray:
"""
Normalize difference vectors with the logarithm of the difference of the
original words' corpus frequencies. This is inspired by Cafagna et al. (2019)
:param D: difference vectors (= shift vectors)
:param sorted_pairs: word pairs in the same order as the shifts in D
:param freq1: corpus frequencies of the first words
:param freq2: corpus frequencies of the second words
:return: shift vectors with 'normalized' lengths
"""
flattening_applies = True
overlap = list(set([p[0] for p in sorted_pairs]).intersection(set(freq1.keys())))
for i in range(min(len(overlap),10)): # have a look at the first couple of words
reconstructed_freq = math.e ** freq1[overlap[i]]
if not np.isclose(reconstructed_freq, round(reconstructed_freq), atol=1e-5):
flattening_applies = False # no flattening if any of the 'reconstructed' frequencies is not a whole number
if flattening_applies: # restore absolute frequencies, but only of the needed entries
freq1 = {w: round(math.e ** freq1[w], 0) for w, _ in sorted_pairs}
freq2 = {w: round(math.e ** freq2[w], 0) for _, w in sorted_pairs}
norms = np.array([max(np.log(abs(freq2[w2]-freq1[w1])),1) for w1,w2 in sorted_pairs])
return (D.T/norms).T
def cluster_length(cluster:np.ndarray, length_metric:str) -> float:
"""
Compute the average length of a cluster using different averaging methods.
All vectors are measured with L2 norm first (np.linalg.norm()).
:param cluster: m-sized cluster of shape (m,d)
:param length_metric: one of 'mean', 'max', 'median', 'std'
:return: average length
"""
sizes = np.linalg.norm(cluster, axis=1)
if length_metric == "max": return float(np.max(sizes))
elif length_metric == "mean": return float(np.mean(sizes))
elif length_metric == "median": return float(np.median(sizes))
elif length_metric == "std": return float( | np.std(sizes) | numpy.std |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import netCDF4 as nc
import datetime
import sys
if len(sys.argv) == 4:
year = int(sys.argv[1])
experiment = str(sys.argv[2])
num_ensemble = int(sys.argv[3])
else:
year = 2010
experiment = "DA"
num_ensemble = 96
if experiment == "OL":
ensemble_description = "OpenLoop."
else:
ensemble_description = "Data Assimilation SoilNet daily SWC."
ensemble_description += " Using perturbed soil properties (sand&clay % [uniform] and forcings (precipitation, short- and longwave radiation, 2m temperature [correlated according to ]."
if experiment == "OL":
dst = nc.Dataset("Ensemble_Collection_OL." + str(year) + ".nc", "w", format="NETCDF4")
else:
dst = nc.Dataset("Ensemble_Collection_DA." + str(year) + ".nc", "w", format="NETCDF4")
if experiment == "OL":
cname = "WU_OL"
else:
cname = "WU_DA"
base_src = nc.Dataset(str(cname)+".clm2_0000.h0."+str(year)+"-01-01-00000.nc", "r")
# copy attributes
for name in base_src.ncattrs():
dst.setncattr("original_attribute_" + name, base_src.getncattr(name))
# copy / reduce dimensions
for name, dimension in base_src.dimensions.items():
dst.createDimension( name, len(dimension))
dst.createDimension("num_ens", num_ensemble)
dst.setncattr("Collected_Ensemble_From", "CLM+PDAF (TSMP)")
ensemble_dimensions = [('time', 'lndgrid'),
('time', 'levsoi', 'lndgrid'),
('time', 'levgrnd', 'lndgrid')]
# Copy all variables not to be collected into ensemble statistics
for name, var in base_src.variables.items():
if not var.dimensions in ensemble_dimensions:
dst.createVariable(name, var.datatype, var.dimensions)
dst[name].setncatts(base_src[name].__dict__)
dst[name][:] = base_src[name][:]
#print("----- copied all non-ensemble variables -------")
# Collect ensemble variables into new file
for name, var in base_src.variables.items():
if not var.dimensions in ensemble_dimensions:
continue
#print(name)
# Get size of dimensions for the current variable
dim_vals = tuple([base_src.dimensions[base_src.variables[name].dimensions[i]].size for i in range(len(base_src.variables[name].dimensions))])
dim_names = tuple([base_src.dimensions[base_src.variables[name].dimensions[i]].name for i in range(len(base_src.variables[name].dimensions))])
min_val = np.inf * np.ones(dim_vals)
min_ind = -1 * np.ones(dim_vals)
max_val = -1.0 * np.ones(dim_vals)
max_ind = -1 * np.ones(dim_vals)
mean_val = -1.0 * np.ones(dim_vals)
new_mean = np.zeros(dim_vals)
prevariance = np.zeros(dim_vals)
std_val = np.zeros(dim_vals)
# Iterate through ensembles
for ens in range(0, num_ensemble):
src = nc.Dataset(str(cname)+".clm2_" + str(ens).zfill(4) + ".h0."+str(year)+"-01-01-00000.nc", "r").variables[name][:]
if ens == 0:
min_val[:] = src.data[:]
max_val[:] = src.data[:]
mean_val[:] = src.data[:]
min_ind[:] = 0
max_ind[:] = 0
else:
min_val[:] = np.minimum(min_val[:], src.data[:])
min_ind[np.where(min_val[:] == src.data[:])] = ens
max_val[:] = np.maximum(max_val[:], src.data[:])
max_ind[np.where(max_val[:] == src.data[:])] = ens
new_mean[:] = np.add(mean_val[:], np.subtract(src.data[:], mean_val[:]) / (ens + 1))
prevariance[:] = np.add(prevariance[:], np.multiply( | np.subtract(src.data[:], mean_val[:]) | numpy.subtract |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 14:48:57 2021
@author: <NAME>
"""
import pandas as pd, numpy as np, os, igraph as ig, leidenalg as la
import cvxpy as cp
from sklearn.neighbors import NearestNeighbors, radius_neighbors_graph
from kneed import KneeLocator
from sklearn.utils.validation import check_symmetric
from scipy.sparse import csr_matrix
from matplotlib import pyplot as plt
from sklearn.neighbors import kneighbors_graph
from Bipartite_Ensembling import BGPA
def read_in_data(directory_names, years):
data = {}
for year in years:
data_modes=[]
for directory in directory_names:
for filename in os.listdir(os.path.join('C:\\Users\\Gian Maria\\Desktop\\Unitn\\Iain\\CORRECT_DATA\\Data', directory)):
if year in filename:
datum = pd.read_csv(os.path.join('C:\\Users\\Gian Maria\\Desktop\\Unitn\\Iain\\CORRECT_DATA\\Data',directory, filename), index_col=0)
datum.fillna(value=0, inplace=True)
data_modes.append(datum)
data_modes_index = np.unique(np.concatenate([mode.index for mode in data_modes]))
data_modes = [mode.reindex(data_modes_index) for mode in data_modes]
data_modes = [mode.fillna(value=0) for mode in data_modes]
data[year] = data_modes.copy()
return data
class Leiden_Unimodal:
def __init__(self, obj_type='RB_Mod', resolution=1.0, n_iterations =-1):
obj_types = {'CPM': la.CPMVertexPartition,
'RBER': la.RBERVertexPartition,
'RB_Mod': la.RBConfigurationVertexPartition,
'Mod': la.ModularityVertexPartition,
'Surprise': la.SurpriseVertexPartition
}
self.obj_type = obj_type
self.obj_func = obj_types[obj_type]
self.resolution = resolution
self.n_iterations = n_iterations
def fit_transform(self, graph):
if type(graph) is ig.Graph:
G =graph
else:
G = self._scipy_to_igraph(graph)
if self.obj_type in ['CPM', 'RBER', 'RB_Mod']:
partition = la.find_partition(G, self.obj_func, n_iterations=self.n_iterations,
resolution_parameter=self.resolution)
else:
partition = la.find_partition(G, self.obj_func, n_iterations=self.iterations)
self.modularity_ = partition.quality()
self.labels_ = np.array(partition.membership)
return self.labels_
def _scipy_to_igraph(self, matrix):
# matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = matrix[sources, targets]
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
return graph
class Leiden_Multiplex:
def __init__(self, obj_types=None, resolutions=None, modal_weights=None, n_iterations=-1):
self.obj_types = obj_types
self.resolutions = resolutions
self.modal_weights = modal_weights
self.n_iterations = n_iterations
def fit_transform(self, graphs):
obj_table = {'CPM': la.CPMVertexPartition,
'RBER': la.RBERVertexPartition,
'RB_Mod': la.RBConfigurationVertexPartition,
'Mod': la.ModularityVertexPartition,
'Surprise': la.SurpriseVertexPartition
}
G=[]
for graph in graphs:
if type(graph) is ig.Graph:
G.append(graph)
else:
G.append(self._scipy_to_igraph(graph))
optimiser = la.Optimiser()
partitions = []
for i in range(len(G)):
if self.obj_types is None:
partitions.append(la.RBConfigurationVertexPartition(G[i], resolution_parameter=1.0))
elif self.resolutions is None:
obj = obj_table[self.obj_types[i]]
partitions.append(obj(G[i]))
else:
obj = obj_table[self.obj_types[i]]
partitions.append(obj(G[i], resolution_parameter=self.resolutions[i]))
if self.modal_weights is None:
diff = optimiser.optimise_partition_multiplex(partitions, n_iterations=self.n_iterations)
else:
diff = optimiser.optimise_partition_multiplex(partitions, layer_weights = self.modal_weights, n_iterations=self.n_iterations)
self.modularities = [part.modularity for part in partitions]
self.labels_ = | np.array(partitions[0].membership) | numpy.array |
# coding=utf-8
"""This module contains all the response functions available in Pastas.
"""
import numpy as np
from pandas import DataFrame
from scipy.integrate import quad
from scipy.special import gammainc, gammaincinv, k0, exp1, erfc, lambertw, \
erfcinv
__all__ = ["Gamma", "Exponential", "Hantush", "Polder", "FourParam",
"DoubleExponential", "One", "Edelman", "HantushWellModel"]
class RfuncBase:
_name = "RfuncBase"
def __init__(self, up, meanstress, cutoff):
self.up = up
# Completely arbitrary number to prevent division by zero
if 1e-8 > meanstress > 0:
meanstress = 1e-8
elif meanstress < 0 and up is True:
meanstress = meanstress * -1
self.meanstress = meanstress
self.cutoff = cutoff
def get_init_parameters(self, name):
"""Get initial parameters and bounds. It is called by the stressmodel.
Parameters
----------
name : str
Name of the stressmodel
Returns
-------
parameters : pandas DataFrame
The initial parameters and parameter bounds used by the solver
"""
pass
def get_tmax(self, p, cutoff=None):
"""Method to get the response time for a certain cutoff
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
cutoff: float, optional
float between 0 and 1.
Returns
-------
tmax: float
Number of days when 99.9% of the response has effectuated, when the
cutoff is chosen at 0.999.
"""
pass
def step(self, p, dt=1, cutoff=None, maxtmax=None):
"""Method to return the step function.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float
timestep as a multiple of of day.
cutoff: float, optional
float between 0 and 1.
maxtmax: int, optional
Maximum timestep to compute the block response for.
Returns
-------
s: numpy.array
Array with the step response.
"""
pass
def block(self, p, dt=1, cutoff=None, maxtmax=None):
"""Method to return the block funtion.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float
timestep as a multiple of of day.
cutoff: float, optional
float between 0 and 1.
maxtmax: int, optional
Maximum timestep to compute the block response for.
Returns
-------
s: numpy.array
Array with the block response.
"""
s = self.step(p, dt, cutoff, maxtmax)
return np.append(s[0], np.subtract(s[1:], s[:-1]))
def get_t(self, p, dt, cutoff, maxtmax=None):
"""Internal method to detemine the times at which to evaluate the step-
response, from t=0
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float
timestep as a multiple of of day.
cutoff: float
float between 0 and 1, that determines which part of the step-
response is taken into account.
maxtmax: float, optional
The maximum time of the response, usually set to the simulation
length.
Returns
-------
t: numpy.array
Array with the times
"""
if isinstance(dt, np.ndarray):
return dt
else:
tmax = self.get_tmax(p, cutoff)
if maxtmax is not None:
tmax = min(tmax, maxtmax)
tmax = max(tmax, 3 * dt)
return np.arange(dt, tmax, dt)
class Gamma(RfuncBase):
"""Gamma response function with 3 parameters A, a, and n.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = At^{n-1} e^{-t/a}
"""
_name = "Gamma"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 1e-5,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress,
-1e-5, True, name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
# if n is too small, the length of response function is close to zero
parameters.loc[name + '_n'] = (1, 0.1, 100, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 1e4, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
return gammaincinv(p[1], cutoff) * p[2]
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = p[0] * gammainc(p[1], t / p[2])
return s
class Exponential(RfuncBase):
"""Exponential response function with 2 parameters: A and a.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = A e^{-t/a}
"""
_name = "Exponential"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 2
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 1e-5,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress,
-1e-5, True, name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 1000, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
return -p[1] * np.log(1 - cutoff)
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = p[0] * (1.0 - np.exp(-t / p[1]))
return s
class HantushWellModel(RfuncBase):
"""
A special implementation of the Hantush well function for multiple wells.
Parameters
----------
up: bool, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False)
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. Default is 0.999.
Notes
-----
The Hantush well function is explained in [hantush_1955]_,
[veling_2010]_ and [asmuth_2008]_. The impulse response function may be
written as:
.. math:: \\theta(t) = \\frac{A}{t} \\exp(-t/a -b/t)
.. math:: p[0] = A # TBD \\frac{1}{4 \\pi kD}
.. math:: p[1] = a = cS
.. math:: p[2] = b = 1^2 / (4 \\lambda^2)
.. math:: p[3] = r \\text{(not optimized)}
where :math:`\\lambda = \\sqrt{kDc}`
The parameter r (distance from the well to the observation point)
is passed as a known value, and is used to scale the response function.
The optimized parameters are slightly different from the original
Hantush implementation:
- A: the parameter is the same as the original Hantush, except that
the distance (r) is set to 1.0
- a = cS: stays the same
- b = 1 / (4 * lambda): r is used internally to scale with distance
- r: distance, not optimized but used to scale A and b
"""
_name = "HantushWellModel"
def __init__(self, up=False, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 0,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress, 0, True,
name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress, np.nan,
np.nan, True, name)
parameters.loc[name + '_a'] = (100, 1e-3, 1e4, True, name)
parameters.loc[name + '_b'] = (1, 1e-4, 25, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
r = 1.0 if len(p) == 3 else p[3]
# approximate formula for tmax
if cutoff is None:
cutoff = self.cutoff
cS = p[1]
rho = np.sqrt(4 * r ** 2 * p[2])
k0rho = k0(rho)
return lambertw(1 / ((1 - cutoff) * k0rho)).real * cS
@staticmethod
def gain(p):
r = 1.0 if len(p) == 3 else p[3]
rho = np.sqrt(4 * r ** 2 * p[2])
return p[0] * k0(rho)
def step(self, p, dt=1, cutoff=None, maxtmax=None):
r = 1.0 if len(p) == 3 else p[3]
cS = p[1]
rho = np.sqrt(4 * r ** 2 * p[2])
k0rho = k0(rho)
t = self.get_t(p, dt, cutoff, maxtmax)
tau = t / cS
tau1 = tau[tau < rho / 2]
tau2 = tau[tau >= rho / 2]
w = (exp1(rho) - k0rho) / (exp1(rho) - exp1(rho / 2))
F = np.zeros_like(tau)
F[tau < rho / 2] = w * exp1(rho ** 2 / (4 * tau1)) - (w - 1) * exp1(
tau1 + rho ** 2 / (4 * tau1))
F[tau >= rho / 2] = 2 * k0rho - w * exp1(tau2) + (w - 1) * exp1(
tau2 + rho ** 2 / (4 * tau2))
return p[0] * F / 2
class Hantush(RfuncBase):
"""
The Hantush well function, using the standard A, a, b parameters
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The Hantush well function is explained in [hantush_1955]_, [veling_2010]_
and [asmuth_2008]_. The impulse response function may be written as:
.. math:: \\theta(t) = K_0(\\sqrt(4b)) \\frac{A}{t} \\exp(-t/a - ab/t)
.. math:: p[0] = A = \\frac{1}{2 \\pi kD}
.. math:: p[1] = a = cS
.. math:: p[2] = b = r^2 / (4 \\lambda^2)
where :math:`\\lambda = \\sqrt{kDc}`
References
----------
.. [hantush_1955] <NAME>., & <NAME>. (1955). Non‐steady
radial flow in an infinite leaky aquifer. Eos, Transactions American
Geophysical Union, 36(1), 95-100.
.. [veling_2010] <NAME>., & <NAME>. (2010). Hantush well function
revisited. Journal of hydrology, 393(3), 381-388.
.. [asmuth_2008] <NAME>., <NAME>., <NAME>., & Petersen,
J. (2008). Modeling time series of ground water head fluctuations
subjected to multiple stresses. Ground Water, 46(1), 30-40.
"""
_name = "Hantush"
def __init__(self, up=False, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 0,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress, 0, True,
name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress, np.nan,
np.nan, True, name)
parameters.loc[name + '_a'] = (100, 1e-3, 1e4, True, name)
parameters.loc[name + '_b'] = (1, 1e-6, 25, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
# approximate formula for tmax
if cutoff is None:
cutoff = self.cutoff
cS = p[1]
rho = np.sqrt(4 * p[2])
k0rho = k0(rho)
return lambertw(1 / ((1 - cutoff) * k0rho)).real * cS
def gain(self, p):
return p[0] * k0(np.sqrt(4 * p[2]))
def step(self, p, dt=1, cutoff=None, maxtmax=None):
cS = p[1]
rho = np.sqrt(4 * p[2])
k0rho = k0(rho)
t = self.get_t(p, dt, cutoff, maxtmax)
tau = t / cS
tau1 = tau[tau < rho / 2]
tau2 = tau[tau >= rho / 2]
w = (exp1(rho) - k0rho) / (exp1(rho) - exp1(rho / 2))
F = np.zeros_like(tau)
F[tau < rho / 2] = w * exp1(rho ** 2 / (4 * tau1)) - (w - 1) * exp1(
tau1 + rho ** 2 / (4 * tau1))
F[tau >= rho / 2] = 2 * k0rho - w * exp1(tau2) + (w - 1) * exp1(
tau2 + rho ** 2 / (4 * tau2))
return p[0] * F / 2
class Polder(RfuncBase):
"""The Polder function, using the standard A, a, b parameters
Notes
-----
The Polder function is explained in [polder]_. The impulse response
function may be written as:
.. math:: \\theta(t) = \\exp(-\\sqrt(4b)) \\frac{A}{t^{-3/2}}
\\exp(-t/a -b/t)
.. math:: p[0] = A = \\exp(-x/\\lambda)
.. math:: p[1] = a = \\sqrt{\\frac{1}{cS}}
.. math:: p[2] = b = x^2 / (4 \\lambda^2)
where :math:`\\lambda = \\sqrt{kDc}`
References
----------
.. [polder] <NAME> (1999). Analytical solutions of
geohydrological problems. Elsevier Science. Amsterdam, Eq. 123.32
"""
_name = "Polder"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
parameters.loc[name + '_A'] = (1, 0, 2, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 1000, True, name)
parameters.loc[name + '_b'] = (1, 1e-6, 25, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
_, a, b = p
b = a * b
x = np.sqrt(b / a)
inverfc = erfcinv(2 * cutoff)
y = (-inverfc + np.sqrt(inverfc ** 2 + 4 * x)) / 2
tmax = a * y ** 2
return tmax
def gain(self, p):
# the steady state solution of Mazure
g = p[0] * np.exp(-np.sqrt(4 * p[2]))
if not self.up:
g = -g
return g
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
A, a, b = p
s = A * self.polder_function(np.sqrt(b), np.sqrt(t / a))
# / np.exp(-2 * np.sqrt(b))
if not self.up:
s = -s
return s
@staticmethod
def polder_function(x, y):
s = 0.5 * np.exp(2 * x) * erfc(x / y + y) + \
0.5 * np.exp(-2 * x) * erfc(x / y - y)
return s
class One(RfuncBase):
"""Instant response with no lag and one parameter d.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True) or down (False), if None (default) the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
"""
_name = "One"
def __init__(self, up=None, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 1
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_d'] = (
self.meanstress, 0, np.nan, True, name)
elif self.up is False:
parameters.loc[name + '_d'] = (
-self.meanstress, np.nan, 0, True, name)
else:
parameters.loc[name + '_d'] = (
self.meanstress, np.nan, np.nan, True, name)
return parameters
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
if isinstance(dt, np.ndarray):
return p[0] * np.ones(len(dt))
else:
return p[0] * np.ones(1)
def block(self, p, dt=1, cutoff=None, maxtmax=None):
return p[0] * np.ones(1)
class FourParam(RfuncBase):
"""Four Parameter response function with 4 parameters A, a, b, and n.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = At^{n-1} e^{-t/a -ab/t}
If Fourparam.quad is set to True, this response function uses np.quad to
integrate the Four Parameter response function, which requires more
calculation time.
"""
_name = "FourParam"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 4
self.quad = False
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 0,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress, 0, True,
name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
parameters.loc[name + '_n'] = (1, -10, 10, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 5000, True, name)
parameters.loc[name + '_b'] = (10, 1e-6, 25, True, name)
return parameters
@staticmethod
def function(t, p):
return (t ** (p[1] - 1)) * np.exp(-t / p[2] - p[2] * p[3] / t)
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
if self.quad:
x = np.arange(1, 10000, 1)
y = np.zeros_like(x)
func = self.function(x, p)
func_half = self.function(x[:-1] + 1 / 2, p)
y[1:] = y[0] + np.cumsum(1 / 6 *
(func[:-1] + 4 * func_half + func[1:]))
y = y / quad(self.function, 0, np.inf, args=p)[0]
return np.searchsorted(y, cutoff)
else:
t1 = -np.sqrt(3 / 5)
t2 = 0
t3 = np.sqrt(3 / 5)
w1 = 5 / 9
w2 = 8 / 9
w3 = 5 / 9
x = np.arange(1, 10000, 1)
y = np.zeros_like(x)
func = self.function(x, p)
func_half = self.function(x[:-1] + 1 / 2, p)
y[0] = 0.5 * (w1 * self.function(0.5 * t1 + 0.5, p) +
w2 * self.function(0.5 * t2 + 0.5, p) +
w3 * self.function(0.5 * t3 + 0.5, p))
y[1:] = y[0] + np.cumsum(1 / 6 *
(func[:-1] + 4 * func_half + func[1:]))
y = y / quad(self.function, 0, np.inf, args=p)[0]
return np.searchsorted(y, cutoff)
@staticmethod
def gain(p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
if self.quad:
t = self.get_t(p, dt, cutoff, maxtmax)
s = np.zeros_like(t)
s[0] = quad(self.function, 0, dt, args=p)[0]
for i in range(1, len(t)):
s[i] = s[i - 1] + quad(self.function, t[i - 1], t[i], args=p)[
0]
s = s * (p[0] / (quad(self.function, 0, np.inf, args=p))[0])
return s
else:
t1 = -np.sqrt(3 / 5)
t2 = 0
t3 = np.sqrt(3 / 5)
w1 = 5 / 9
w2 = 8 / 9
w3 = 5 / 9
if dt > 0.1:
step = 0.1 # step size for numerical integration
tmax = max(self.get_tmax(p, cutoff), 3 * dt)
t = np.arange(step, tmax, step)
s = np.zeros_like(t)
# for interval [0,dt] :
s[0] = (step / 2) * \
(w1 * self.function((step / 2) * t1 + (step / 2), p) +
w2 * self.function((step / 2) * t2 + (step / 2), p) +
w3 * self.function((step / 2) * t3 + (step / 2), p))
# for interval [dt,tmax]:
func = self.function(t, p)
func_half = self.function(t[:-1] + step / 2, p)
s[1:] = s[0] + np.cumsum(step / 6 *
(func[:-1] + 4 * func_half + func[
1:]))
s = s * (p[0] / quad(self.function, 0, np.inf, args=p)[0])
return s[int(dt / step - 1)::int(dt / step)]
else:
t = self.get_t(p, dt, cutoff, maxtmax)
s = np.zeros_like(t)
# for interval [0,dt] Gaussian quadrate:
s[0] = (dt / 2) * \
(w1 * self.function((dt / 2) * t1 + (dt / 2), p) +
w2 * self.function((dt / 2) * t2 + (dt / 2), p) +
w3 * self.function((dt / 2) * t3 + (dt / 2), p))
# for interval [dt,tmax] Simpson integration:
func = self.function(t, p)
func_half = self.function(t[:-1] + dt / 2, p)
s[1:] = s[0] + np.cumsum(dt / 6 *
(func[:-1] + 4 * func_half + func[
1:]))
s = s * (p[0] / quad(self.function, 0, np.inf, args=p)[0])
return s
class DoubleExponential(RfuncBase):
"""Gamma response function with 3 parameters A, a, and n.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = A (1 - \\alpha) e^{-t/a_1} + A \\alpha e^{-t/a_2}
"""
_name = "DoubleExponential"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 4
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 0,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress, 0, True,
name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
parameters.loc[name + '_alpha'] = (0.1, 0.01, 0.99, True, name)
parameters.loc[name + '_a1'] = (10, 0.01, 5000, True, name)
parameters.loc[name + '_a2'] = (10, 0.01, 5000, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
if p[2] > p[3]: # a1 > a2
return -p[2] * np.log(1 - cutoff)
else: # a1 < a2
return -p[3] * np.log(1 - cutoff)
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = p[0] * (1 - ((1 - p[1]) * np.exp(-t / p[2]) +
p[1] * np.exp(-t / p[3])))
return s
class Edelman(RfuncBase):
"""The function of Edelman, describing the propagation of an instantaneous
water level change into an adjacent half-infinite aquifer.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The Edelman function is emplained in [5]_. The impulse response function
may be written as:
.. math:: \\text{unknown}
It's parameters are:
.. math:: p[0] = \\beta = \\frac{\\sqrt{\\frac{4kD}{S}}}{x}
References
----------
.. [5] http://grondwaterformules.nl/index.php/formules/waterloop/peilverandering
"""
_name = "Edelman"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 1
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
beta_init = 1.0
parameters.loc[name + '_beta'] = (beta_init, 0, 1000, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
return 1. / (p[0] * erfcinv(cutoff * erfc(0))) ** 2
@staticmethod
def gain(p):
return 1.
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = erfc(1 / (p[0] * | np.sqrt(t) | numpy.sqrt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.