prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 14:17:33 2020
@author: <NAME>, Biomedical Engineer
PhD candidate in Neuroscience
University of Salerno, Fisciano, Italy
"""
import os
import pickle
import numpy as np
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
from itertools import combinations
#%%############################################################################
# Functions
###############################################################################
def cmdscale(D,n_comp):
"""
Classical multidimensional scaling (MDS)
Parameters
----------
D : (n, n) array
Symmetric distance matrix.
n : int
Number of components requested
Returns
-------
Y : (n, p) array
Configuration matrix. Each column represents a dimension. Only the
p dimensions corresponding to positive eigenvalues of B are returned.
Note that each dimension is only determined up to an overall sign,
corresponding to a reflection.
e : (n,) array
Eigenvalues of B.
"""
# Number of points
n = len(D)
# Centering matrix
H = np.eye(n) - np.ones((n, n))/n
# YY^T
B = -H.dot(D**2).dot(H)/2
# Diagonalize
evals, evecs = np.linalg.eigh(B)
# Sort by eigenvalue in descending order
idx = np.argsort(evals)[::-1]
evals = evals[idx]
evecs = evecs[:,idx]
#computing the matrix only for the positive-eigenvalued components
w, = np.where(evals > 0)
L = np.diag(np.sqrt(evals[w]))
V = evecs[:,w]
Y = V.dot(L)
#transformation matrix for only the first 2 components
coords = Y[:,:n_comp]
inv_mat = evecs[:,:n_comp].T/(np.sqrt(evals[:n_comp]))[:,None]
return coords, inv_mat
def first_order_rdm(data, dist_metric):
"""
Return a Representation Dissimilarity Matrix estimated with a specific
metric.
Input:
data = matrix of the values extracted from the ROI. The format accepted
is NxV (N condition and V voxels)
dist_metric = a number that specifies the metric to use
1 = Pearson correlation
2 = Euclidian distance
3 = Absolute activation difference
Output:
RDM = a NxN matrix where the dissimilarity between each condition is
stored
"""
data = data.T
if dist_metric == 1:
# Use correlation distance
RDM = 1-np.corrcoef(data)
elif dist_metric == 2:
# Use Eucledian distance
RDM = cdist(data,data,'euclidean')
elif dist_metric == 3:
# Use absolute activation difference
means = np.mean(data,axis=1) # Determine mean activation per condition
m, n =
|
np.meshgrid(means,means)
|
numpy.meshgrid
|
from __future__ import absolute_import
from datashape import dshape
import pandas as pd
import numpy as np
import pytest
from datashader.glyphs import Point, LinesAxis1, Glyph
from datashader.glyphs.area import _build_draw_trapezoid_y
from datashader.glyphs.line import (
_build_map_onto_pixel_for_line,
_build_draw_segment,
_build_extend_line_axis0,
)
from datashader.glyphs.trimesh import(
_build_map_onto_pixel_for_triangle,
_build_draw_triangle,
_build_extend_triangles
)
from datashader.utils import ngjit
def test_point_bounds_check():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [5, 6, 7]})
p = Point('x', 'y')
assert p._compute_bounds(df['x'].values) == (1, 3)
assert p._compute_bounds(df['y'].values) == (5, 7)
def test_point_validate():
p = Point('x', 'y')
p.validate(dshape("{x: int32, y: float32}"))
with pytest.raises(ValueError):
p.validate(dshape("{x: string, y: float32}"))
@ngjit
def append(i, x, y, agg):
agg[y, x] += 1
@ngjit
def tri_append(x, y, agg, n):
agg[y, x] += n
def new_agg():
return np.zeros((5, 5), dtype='i4')
mapper = ngjit(lambda x: x)
map_onto_pixel_for_line = _build_map_onto_pixel_for_line(mapper, mapper)
map_onto_pixel_for_triangle = _build_map_onto_pixel_for_triangle(mapper, mapper)
# Line rasterization
expand_aggs_and_cols = Glyph._expand_aggs_and_cols(append, 1)
_draw_segment = _build_draw_segment(append, map_onto_pixel_for_line, expand_aggs_and_cols)
extend_line, _ = _build_extend_line_axis0(_draw_segment, expand_aggs_and_cols)
# Triangles rasterization
draw_triangle, draw_triangle_interp = _build_draw_triangle(tri_append)
extend_triangles = _build_extend_triangles(draw_triangle, draw_triangle_interp, map_onto_pixel_for_triangle)
# Trapezoid y rasterization
_draw_trapezoid = _build_draw_trapezoid_y(
append, map_onto_pixel_for_line, expand_aggs_and_cols
)
bounds = (-3, 1, -3, 1)
vt = (1., 3., 1., 3.)
def draw_segment(x0, y0, x1, y1, i, segment_start, agg):
"""
Helper to draw line with fixed bounds and scale values.
"""
sx, tx, sy, ty = 1, 0, 1, 0
xmin, xmax, ymin, ymax = 0, 5, 0, 5
_draw_segment(
i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, x0, x1, y0, y1, agg)
def draw_trapezoid(x0, x1, y0, y1, y2, y3, i, trapezoid_start, stacked, agg):
"""
Helper to draw line with fixed bounds and scale values.
"""
sx, tx, sy, ty = 1, 0, 1, 0
xmin, xmax, ymin, ymax = 0, 5, 0, 5
_draw_trapezoid(
i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
x0, x1, y0, y1, y2, y3, trapezoid_start, stacked, agg)
def test_draw_line():
x0, y0 = (0, 0)
x1, y1 = (3, 3)
out = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]])
agg = new_agg()
draw_segment(x0, y0, x1, y1, 0, True, agg)
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_segment(x1, y1, x0, y0, 0, True, agg)
np.testing.assert_equal(agg, out)
# plot_start = False
agg = new_agg()
draw_segment(x0, y0, x1, y1, 0, False, agg)
out[0, 0] = 0
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_segment(x1, y1, x0, y0, 0, False, agg)
out[0, 0] = 1
out[3, 3] = 0
np.testing.assert_equal(agg, out)
# Flip coords
x0, y0 = (0, 4)
x1, y1 = (3, 1)
out = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]])
agg = new_agg()
draw_segment(x0, y0, x1, y1, 0, True, agg)
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_segment(x1, y1, x0, y0, 0, True, agg)
|
np.testing.assert_equal(agg, out)
|
numpy.testing.assert_equal
|
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf, derivLogCdfNormal, logCdfNormal
from . import link_functions
from .likelihood import Likelihood
from ..core.parameterization import Param
from paramz.transformations import Logexp
class CensoredGaussian(Likelihood):
"""
Censored Gaussian likelihood
.. See also::
likelihood.py, for the parent class
"""
def __init__(self, gp_link=None, variance=1., censoring=None, name='CensoredGaussian'):
if gp_link is None:
gp_link = link_functions.Identity()
if not isinstance(gp_link, link_functions.Identity):
print("Warning, Exact inference is not implemeted for non-identity link functions,\
if you are not already, ensure Laplace inference_method is used")
super(CensoredGaussian, self).__init__(gp_link, name=name)
self.variance = Param('variance', variance, Logexp())
self.link_parameter(self.variance)
if isinstance(gp_link, link_functions.Identity):
self.log_concave = True
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(CensoredGaussian, self)._save_to_input_dict()
input_dict["class"] = "GPy.likelihoods.CensoredGaussian"
input_dict["variance"] = self.variance.values.tolist()
return input_dict
def _preprocess_values(self, Y, censoring):
"""
Check if the values of the observations correspond to the values
assumed by the likelihood function.
..Note:: Binary classification algorithm works better with classes {-1, 1}
"""
Y_prep = Y.copy()
Y1 = Y[censoring.flatten()==1].size
Y2 = Y[censoring.flatten()==0].size
assert Y1 + Y2 == Y.size, 'Number of censored observation + non-censored does not match size of Y.'
return Y_prep
def moments_match_ep(self, Y_i, tau_i, v_i, censoring_i, Y_metadata_i=None):
"""
Moments match of the marginal approximation in EP algorithm
:param i: number of observation (int)
:param censoring_i: indicator function regarding the censorship of Y_i (1 if censored, 0 otherwise)
:param tau_i: precision of the cavity distribution (float)
:param v_i: mean/variance of the cavity distribution (float)
"""
if censoring_i == 1:
pass
elif censoring_i == 0:
pass
else:
raise ValueError("bad value for censoring_i observation (0, 1)")
variance = self.variance.copy()
if censoring_i == 0:
Z_hat = (1/np.sqrt(2 * np.pi * (variance + 1/tau_i))) * np.exp(-0.5 * ((Y_i - v_i/tau_i)**2)/(variance + 1/tau_i))
mu_hat = v_i/tau_i + 1/tau_i * ((Y_i - v_i/tau_i)/(variance + 1/tau_i))
sigma2_hat = 1/tau_i - 1/(tau_i**2 * (variance + 1/tau_i))
return Z_hat, mu_hat, sigma2_hat
else:
z = (v_i/tau_i - Y_i)/np.sqrt(variance + 1./tau_i)
phi_div_Phi = derivLogCdfNormal(z)
log_Z_hat = logCdfNormal(z)
mu_hat = v_i/tau_i + phi_div_Phi/np.sqrt(variance*tau_i**2 + tau_i)
sigma2_hat = 1./tau_i - (phi_div_Phi/(variance*tau_i**2 + tau_i))*(z+phi_div_Phi)
return np.exp(log_Z_hat), mu_hat, sigma2_hat
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Probit):
if gh_points is None:
gh_x, gh_w = self._gh_points()
else:
gh_x, gh_w = gh_points
gh_w = gh_w / np.sqrt(np.pi)
shape = m.shape
m,v,Y = m.flatten(), v.flatten(), Y.flatten()
Ysign = np.where(Y==1,1,-1)
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + (m*Ysign)[:,None]
p = std_norm_cdf(X)
p = np.clip(p, 1e-9, 1.-1e-9) # for numerical stability
N = std_norm_pdf(X)
F = np.log(p).dot(gh_w)
NoverP = N/p
dF_dm = (NoverP*Ysign[:,None]).dot(gh_w)
dF_dv = -0.5*(NoverP**2 + NoverP*X).dot(gh_w)
return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), None
else:
raise NotImplementedError
def predictive_mean(self, mu, variance, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Probit):
return std_norm_cdf(mu/np.sqrt(1+variance))
elif isinstance(self.gp_link, link_functions.Heaviside):
return std_norm_cdf(mu/np.sqrt(variance))
else:
raise NotImplementedError
def predictive_variance(self, mu, variance, pred_mean, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Heaviside):
return 0.
else:
return np.nan
def pdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Likelihood function given inverse link of f.
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})^{y_{i}}(1-f_{i})^{1-y_{i}}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: likelihood evaluated for this point
:rtype: float
.. Note:
Each y_i must be in {0, 1}
"""
#objective = (inv_link_f**y) * ((1.-inv_link_f)**(1.-y))
return np.where(y==1, inv_link_f, 1.-inv_link_f)
def logpdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Log Likelihood function given inverse link of f.
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = y_{i}\\log\\lambda(f_{i}) + (1-y_{i})\\log (1-f_{i})
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: log likelihood evaluated at points inverse link of f.
:rtype: float
"""
#objective = y*np.log(inv_link_f) + (1.-y)*np.log(inv_link_f)
p = np.where(y==1, inv_link_f, 1.-inv_link_f)
return np.log(np.clip(p, 1e-9 ,np.inf))
def dlogpdf_dlink(self, inv_link_f, y, Y_metadata=None):
"""
Gradient of the pdf at y, given inverse link of f w.r.t inverse link of f.
.. math::
\\frac{d\\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)} = \\frac{y_{i}}{\\lambda(f_{i})} - \\frac{(1 - y_{i})}{(1 - \\lambda(f_{i}))}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: gradient of log likelihood evaluated at points inverse link of f.
:rtype: Nx1 array
"""
#grad = (y/inv_link_f) - (1.-y)/(1-inv_link_f)
#grad = np.where(y, 1./inv_link_f, -1./(1-inv_link_f))
ff = np.clip(inv_link_f, 1e-9, 1-1e-9)
denom = np.where(y==1, ff, -(1-ff))
return 1./denom
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
"""
Hessian at y, given inv_link_f, w.r.t inv_link_f the hessian will be 0 unless i == j
i.e. second derivative logpdf at y given inverse link of f_i and inverse link of f_j w.r.t inverse link of f_i and inverse link of f_j.
.. math::
\\frac{d^{2}\\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)^{2}} = \\frac{-y_{i}}{\\lambda(f)^{2}} - \\frac{(1-y_{i})}{(1-\\lambda(f))^{2}}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: Diagonal of log hessian matrix (second derivative of log likelihood evaluated at points inverse link of f.
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on inverse link of f_i not on inverse link of f_(j!=i)
"""
#d2logpdf_dlink2 = -y/(inv_link_f**2) - (1-y)/((1-inv_link_f)**2)
#d2logpdf_dlink2 = np.where(y, -1./np.square(inv_link_f), -1./np.square(1.-inv_link_f))
arg =
|
np.where(y==1, inv_link_f, 1.-inv_link_f)
|
numpy.where
|
# coding: utf-8
# # Data Preprocessing
# I refer to U-net model with submission on the website: https://www.kaggle.com/hmendonca/u-net-model-with-submission. On Augment Data part, we can tweak the parameters to process images. For some detail, I am still trying to understand them.
# In[6]:
# Lets import some useful libraires
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from skimage.io import imread
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from skimage.segmentation import mark_boundaries
#from skimage.util import montage2d as montage
from skimage.morphology import binary_opening, disk, label
import gc; gc.enable() # memory is tight
montage_rgb = lambda x: np.stack([montage(x[:, :, :, i]) for i in range(x.shape[3])], -1)
ship_dir = '../input'
train_image_dir = os.path.join(ship_dir, 'train_v2')
test_image_dir = os.path.join(ship_dir, 'test_v2')
def multi_rle_encode(img, **kwargs):
'''
Encode connected regions as separated masks
'''
labels = label(img)
if img.ndim > 2:
return [rle_encode(np.sum(labels==k, axis=2), **kwargs) for k in np.unique(labels[labels>0])]
else:
return [rle_encode(labels==k, **kwargs) for k in np.unique(labels[labels>0])]
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
def rle_encode(img, min_max_threshold=1e-3, max_mean_threshold=None):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
if np.max(img) < min_max_threshold:
return '' ## no need to encode if it's all zeros
if max_mean_threshold and np.mean(img) > max_mean_threshold:
return '' ## ignore overfilled mask
pixels = img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle_decode(mask_rle, shape=(768, 768)):
'''
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T # Needed to align to RLE direction
def masks_as_image(in_mask_list):
# Take the individual ship masks and create a single mask array for all ships
all_masks = np.zeros((768, 768), dtype = np.uint8)
for mask in in_mask_list:
if isinstance(mask, str):
all_masks |= rle_decode(mask)
return all_masks
def masks_as_color(in_mask_list):
# Take the individual ship masks and create a color mask array for each ships
all_masks = np.zeros((768, 768), dtype = np.float)
scale = lambda x: (len(in_mask_list)+x+1) / (len(in_mask_list)*2) ## scale the heatmap image to shift
for i,mask in enumerate(in_mask_list):
if isinstance(mask, str):
all_masks[:,:] += scale(i) * rle_decode(mask)
return all_masks
# In[54]:
test_image_dir
# In[7]:
masks = pd.read_csv(os.path.join('../input/', 'train_ship_segmentations_v2.csv'))
not_empty = pd.notna(masks.EncodedPixels)
print(not_empty.sum(), 'masks in', masks[not_empty].ImageId.nunique(), 'images')
print((~not_empty).sum(), 'empty images in', masks.ImageId.nunique(), 'total images')
masks.head()
# # Split into training and validation groups
# We stratify by the number of boats appearing so we have nice balances in each set
# In[8]:
masks['ships'] = masks['EncodedPixels'].map(lambda c_row: 1 if isinstance(c_row, str) else 0)
unique_img_ids = masks.groupby('ImageId').agg({'ships': 'sum'}).reset_index()
unique_img_ids['has_ship'] = unique_img_ids['ships'].map(lambda x: 1.0 if x>0 else 0.0)
unique_img_ids['has_ship_vec'] = unique_img_ids['has_ship'].map(lambda x: [x])
# some files are too small/corrupt
unique_img_ids['file_size_kb'] = unique_img_ids['ImageId'].map(lambda c_img_id:
os.stat(os.path.join(train_image_dir,
c_img_id)).st_size/1024)
unique_img_ids = unique_img_ids[unique_img_ids['file_size_kb'] > 50] # keep only +50kb files
unique_img_ids['file_size_kb'].hist()
masks.drop(['ships'], axis=1, inplace=True)
unique_img_ids.sample(7)
# ### Examine Number of Ship Images
# Here we examine how often ships appear and replace the ones without any ships with 0
# In[9]:
unique_img_ids['ships'].hist(bins=unique_img_ids['ships'].max())
# # Undersample Empty Images
# Here we undersample the empty images to get a better balanced group with more ships to try and segment
# In[34]:
SAMPLES_PER_GROUP = 4000
balanced_train_df = unique_img_ids.groupby('ships').apply(lambda x: x.sample(SAMPLES_PER_GROUP) if len(x) > SAMPLES_PER_GROUP else x)
balanced_train_df['ships'].hist(bins=balanced_train_df['ships'].max()+1)
print(balanced_train_df.shape[0], 'masks')
# In[35]:
from sklearn.model_selection import train_test_split
train_ids, valid_ids = train_test_split(balanced_train_df,
test_size = 0.2,
stratify = balanced_train_df['ships'])
train_df = pd.merge(masks, train_ids)
valid_df = pd.merge(masks, valid_ids)
print(train_df.shape[0], 'training masks')
print(valid_df.shape[0], 'validation masks')
# # Decode all the RLEs into Images
# We make a generator to produce batches of images
# In[36]:
# Model parameters
BATCH_SIZE = 48
EDGE_CROP = 16
GAUSSIAN_NOISE = 0.1
UPSAMPLE_MODE = 'SIMPLE'
# downsampling inside the network
NET_SCALING = (1, 1)
# downsampling in preprocessing
IMG_SCALING = (3, 3)
# number of validation images to use
VALID_IMG_COUNT = 900
# maximum number of steps_per_epoch in training
MAX_TRAIN_STEPS = 9
MAX_TRAIN_EPOCHS = 99
AUGMENT_BRIGHTNESS = False
# In[37]:
def make_image_gen(in_df, batch_size = BATCH_SIZE):
all_batches = list(in_df.groupby('ImageId'))
out_rgb = []
out_mask = []
while True:
np.random.shuffle(all_batches)
for c_img_id, c_masks in all_batches:
rgb_path = os.path.join(train_image_dir, c_img_id)
c_img = imread(rgb_path)
c_mask = np.expand_dims(masks_as_image(c_masks['EncodedPixels'].values), -1)
if IMG_SCALING is not None:
c_img = c_img[::IMG_SCALING[0], ::IMG_SCALING[1]]
c_mask = c_mask[::IMG_SCALING[0], ::IMG_SCALING[1]]
out_rgb += [c_img]
out_mask += [c_mask]
if len(out_rgb)>=batch_size:
yield np.stack(out_rgb, 0)/255.0, np.stack(out_mask, 0)
out_rgb, out_mask=[], []
# In[38]:
train_gen = make_image_gen(train_df)
train_x, train_y = next(train_gen)
print('x', train_x.shape, train_x.min(), train_x.max())
print('y', train_y.shape, train_y.min(), train_y.max())
# In[39]:
from skimage.util.montage import montage2d as montage
#from skimage.util import montage2d as montage
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (30, 10))
batch_rgb = montage_rgb(train_x)
batch_seg = montage(train_y[:, :, :, 0])
ax1.imshow(batch_rgb)
ax1.set_title('Images')
ax2.imshow(batch_seg)
ax2.set_title('Segmentations')
ax3.imshow(mark_boundaries(batch_rgb,
batch_seg.astype(int)))
ax3.set_title('Outlined Ships')
fig.savefig('overview.png')
# # Make the Validation Set
# In[40]:
get_ipython().run_cell_magic('time', '', 'valid_x, valid_y = next(make_image_gen(valid_df, VALID_IMG_COUNT))\nprint(valid_x.shape, valid_y.shape)')
# # Augment Data
# In[41]:
from keras.preprocessing.image import ImageDataGenerator
dg_args = dict(featurewise_center = False,
samplewise_center = False,
rotation_range = 45, # a value in degrees (0-180), a range within which to randomly rotate pictures
width_shift_range = 0.1, # randomly translate pictures vertically or horizontally
height_shift_range = 0.1, # randomly translate pictures vertically or horizontally
shear_range = 0.01, # randomly applying shearing transformations
zoom_range = [0.9, 1.25],
horizontal_flip = True, #randomly flipping half of the images horizontally
vertical_flip = True,
fill_mode = 'reflect', # strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.
data_format = 'channels_last')
# brightness can be problematic since it seems to change the labels differently from the images
if AUGMENT_BRIGHTNESS:
dg_args[' brightness_range'] = [0.5, 1.5]
image_gen = ImageDataGenerator(**dg_args)
if AUGMENT_BRIGHTNESS:
dg_args.pop('brightness_range')
label_gen = ImageDataGenerator(**dg_args)
def create_aug_gen(in_gen, seed = None):
np.random.seed(seed if seed is not None else np.random.choice(range(9999)))
for in_x, in_y in in_gen:
seed = np.random.choice(range(9999))
# keep the seeds syncronized otherwise the augmentation to the images is different from the masks
g_x = image_gen.flow(255*in_x,
batch_size = in_x.shape[0],
seed = seed,
shuffle=True)
g_y = label_gen.flow(in_y,
batch_size = in_x.shape[0],
seed = seed,
shuffle=True)
yield next(g_x)/255.0, next(g_y)
# In[42]:
cur_gen = create_aug_gen(train_gen)
t_x, t_y = next(cur_gen)
print('x', t_x.shape, t_x.dtype, t_x.min(), t_x.max())
print('y', t_y.shape, t_y.dtype, t_y.min(), t_y.max())
# only keep first 9 samples to examine in detail
t_x = t_x[:9]
t_y = t_y[:9]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (20, 10))
ax1.imshow(montage_rgb(t_x), cmap='gray')
ax1.set_title('images')
ax2.imshow(montage(t_y[:, :, :, 0]), cmap='gray_r')
ax2.set_title('ships')
# In[26]:
gc.collect()
# # Build a Model
# Here we use a slight deviation on the U-Net standard
# In[43]:
from keras import models, layers
# Build U-Net model
def upsample_conv(filters, kernel_size, strides, padding):
return layers.Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding)
def upsample_simple(filters, kernel_size, strides, padding):
return layers.UpSampling2D(strides)
if UPSAMPLE_MODE=='DECONV':
upsample=upsample_conv
else:
upsample=upsample_simple
input_img = layers.Input(t_x.shape[1:], name = 'RGB_Input')
pp_in_layer = input_img
if NET_SCALING is not None:
pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)
pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
pp_in_layer = layers.BatchNormalization()(pp_in_layer)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (pp_in_layer)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (c1)
p1 = layers.MaxPooling2D((2, 2)) (c1)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (p1)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (c2)
p2 = layers.MaxPooling2D((2, 2)) (c2)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (p2)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (c3)
p3 = layers.MaxPooling2D((2, 2)) (c3)
c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (p3)
c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (c4)
p4 = layers.MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same') (p4)
c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same') (c5)
u6 = upsample(64, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = layers.concatenate([u6, c4])
c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (u6)
c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (c6)
u7 = upsample(32, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = layers.concatenate([u7, c3])
c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (u7)
c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (c7)
u8 = upsample(16, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = layers.concatenate([u8, c2])
c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (u8)
c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (c8)
u9 = upsample(8, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = layers.concatenate([u9, c1], axis=3)
c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (u9)
c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (c9)
d = layers.Conv2D(1, (1, 1), activation='sigmoid') (c9)
# d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
# d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
if NET_SCALING is not None:
d = layers.UpSampling2D(NET_SCALING)(d)
seg_model = models.Model(inputs=[input_img], outputs=[d])
seg_model.summary()
# In[44]:
import keras.backend as K
from keras.optimizers import Adam
from keras.losses import binary_crossentropy
## intersection over union
def IoU(y_true, y_pred, eps=1e-6):
if np.max(y_true) == 0.0:
return IoU(1-y_true, 1-y_pred) ## empty image; calc IoU of zeros
intersection = K.sum(y_true * y_pred, axis=[1,2,3])
union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) - intersection
return -K.mean( (intersection + eps) / (union + eps), axis=0)
# In[45]:
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
weight_path="{}_weights.best.hdf5".format('seg_model')
checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=True)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.33,
patience=1, verbose=1, mode='min',
min_delta=0.0001, cooldown=0, min_lr=1e-8)
early = EarlyStopping(monitor="val_loss", mode="min", verbose=2,
patience=20) # probably needs to be more patient, but kaggle time is limited
callbacks_list = [checkpoint, early, reduceLROnPlat]
# In[46]:
def fit():
seg_model.compile(optimizer=Adam(1e-3, decay=1e-6), loss=IoU, metrics=['binary_accuracy'])
step_count = min(MAX_TRAIN_STEPS, train_df.shape[0]//BATCH_SIZE)
aug_gen = create_aug_gen(make_image_gen(train_df))
loss_history = [seg_model.fit_generator(aug_gen,
steps_per_epoch=step_count,
epochs=MAX_TRAIN_EPOCHS,
validation_data=(valid_x, valid_y),
callbacks=callbacks_list,
workers=1 # the generator is not very thread safe
)]
return loss_history
while True:
loss_history = fit()
if np.min([mh.history['val_loss'] for mh in loss_history]) < -0.2:
break
# In[47]:
def show_loss(loss_history):
epochs = np.concatenate([mh.epoch for mh in loss_history])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(22, 10))
_ = ax1.plot(epochs, np.concatenate([mh.history['loss'] for mh in loss_history]), 'b-',
epochs, np.concatenate([mh.history['val_loss'] for mh in loss_history]), 'r-')
ax1.legend(['Training', 'Validation'])
ax1.set_title('Loss')
_ = ax2.plot(epochs, np.concatenate([mh.history['binary_accuracy'] for mh in loss_history]), 'b-',
epochs, np.concatenate([mh.history['val_binary_accuracy'] for mh in loss_history]), 'r-')
ax2.legend(['Training', 'Validation'])
ax2.set_title('Binary Accuracy (%)')
show_loss(loss_history)
# In[48]:
seg_model.load_weights(weight_path)
seg_model.save('seg_model.h5')
# In[49]:
pred_y = seg_model.predict(valid_x)
print(pred_y.shape, pred_y.min(axis=0).max(), pred_y.max(axis=0).min(), pred_y.mean())
# In[50]:
fig, ax = plt.subplots(1, 1, figsize = (6, 6))
ax.hist(pred_y.ravel(),
|
np.linspace(0, 1, 20)
|
numpy.linspace
|
# Helper functions for differentiable image warping
# Code based on https://github.com/seasonSH/WarpGAN
import torch
import numpy as np
import time
def umeyama(src, dst, estimate_scale):
"""Estimate N-D similarity transformation with or without scaling.
Parameters
----------
src : (M, N) array
Source coordinates.
dst : (M, N) array
Destination coordinates.
estimate_scale : bool
Whether to estimate scaling factor.
Returns
-------
T : (N + 1, N + 1)
The homogeneous similarity transformation matrix. The matrix contains
NaN values only if the problem is not well-conditioned.
References
----------
.. [1] "Least-squares estimation of transformation parameters between two
point patterns", <NAME>, PAMI 1991, DOI: 10.1109/34.88573
"""
num = src.shape[0]
dim = src.shape[1]
# Compute mean of src and dst.
src_mean = src.mean(axis=0)
dst_mean = dst.mean(axis=0)
# Subtract mean from src and dst.
src_demean = src - src_mean
dst_demean = dst - dst_mean
# Eq. (38).
A = np.dot(dst_demean.T, src_demean) / num
# Eq. (39).
d = np.ones((dim,), dtype=np.double)
if np.linalg.det(A) < 0:
d[dim - 1] = -1
T = np.eye(dim + 1, dtype=np.double)
U, S, V = np.linalg.svd(A)
# Eq. (40) and (43).
rank = np.linalg.matrix_rank(A)
if rank == 0:
return np.nan * T
elif rank == dim - 1:
if np.linalg.det(U) * np.linalg.det(V) > 0:
T[:dim, :dim] = np.dot(U, V)
else:
s = d[dim - 1]
d[dim - 1] = -1
T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V))
d[dim - 1] = s
else:
T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T))
if estimate_scale:
# Eq. (41) and (42).
scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d)
else:
scale = 1.0
T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T)
T[:dim, :dim] *= scale
return T
# Main function for image warping
def image_warp(img, dst_pts, flow_pts, device, sharp=False, img2=None):
"""Warps the giving image based on displacement and destination points
Args:
img: [b, c, h, w] float `Tensor`
dst_pts: [n, 2] float `Tensor`
flow_pts: [n, 2] float `Tensor`
Returns:
interpolated: [b, c, h, w] float `Tensor`
"""
start_time = time.time()
w, v = solve_interpolation(dst_pts, flow_pts, device)
solve_time = time.time()
_, _, height, width = img.size()
grid_x, grid_y = np.meshgrid(np.arange(width), np.arange(height))
grid_locations =
|
np.stack([grid_y, grid_x], axis=2)
|
numpy.stack
|
import numpy as np
import dataset
def mult_vec(mat, vec):
v = np.ones(4)
v[:3] = vec
return (np.linalg.inv(mat)@v)[:3]
def test_random_camera():
mat = dataset.geometry.random_camera()
center = np.zeros(3)
target =
|
np.array([0,0,-1])
|
numpy.array
|
"""
Created on Wed Mar 07 09:23:08 2018
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import t
from scipy.stats import norm
from scipy.optimize import minimize
from time import time
KBETA_FWHM_DEFAULT_INITIAL_VALUE = 0.24
KBETA_FWHM_BOUNDS_DEFAULT_INITIAL_VALUE = (0.02,1.0)
class XrayPhase(object):
"""The x-ray diffraction data of one material. Can be either a Profile (full pattern XY data)
or a Dif (peaks only, which will be broadened). Has scalings, RIR, chemistry, and name of corresponding file."""
def __init__(self,file_path=None):
self.RIR = None
self.scaling_guess = None
self.scaling_optimized = None
self.scaling_bounds = (0.0,None)
self.refine_scaling = True
self.chemistry = None
self.relative_contribution = None
self.absolute_contribution = None
self.file_path = file_path
if file_path is None:
self.file_nickname = None
else:
self.file_nickname = file_path.split('/')[-1].split('\\')[-1].split('.')[0]
def SetScalingGuess(self,scaling_guess,set_both=True):
self.scaling_guess = scaling_guess
if set_both:
self.scaling_optimized = scaling_guess
def SetScalingOptimized(self,scaling_optimized):
self.scaling_optimized = scaling_optimized
def SetScalingBounds(self,scaling_bounds):
self.scaling_bounds = scaling_bounds
def RefineScaling(self,boolean):
self.refine_scaling = boolean
def GetArea(self,start_2theta=None,end_2theta=None):
pass
def GetAreaSloppy(self,start_2theta,end_2theta):
pass
def ReadFile(self,file_path):
pass
def SetRelativeContribution(self,percentage):
self.relative_contribution = percentage
def GetRelativeContribution(self):
return self.relative_contribution
def SetAbsoluteContribution(self,percentage):
self.absolute_contribution = percentage
def GetAbsoluteContribution(self):
return self.absolute_contribution
class XrayProfile(XrayPhase):
"""The x-ray diffraction data of one material, with full pattern XY data. Can be read in from MDI, XY, or CSV."""
def __init__(self, file_path, is_input_profile=False, take_every_nth_point=1, twotheta_ranges=[(0.0,90.0)], twotheta_offset=0.0, print_warnings=True):
super(XrayProfile,self).__init__(file_path)
self.xy_data = None
self.xy_data_for_plotting = None
self.x_data_interpolated = None
self.y_data_interpolated = None
self.area_with_scaling_1 = None
if(is_input_profile):
self.SetScalingGuess(1.0)
self.ReadFile(file_path,is_input_profile,take_every_nth_point,twotheta_ranges,twotheta_offset,print_warnings=print_warnings)
def GetInterpolatedXYData(self, xValues):
self.x_data_interpolated = np.array(xValues)
self.y_data_interpolated = np.interp(xValues,self.xy_data[0],self.xy_data[1])
return np.array([self.x_data_interpolated,self.y_data_interpolated])
def GetScaledPattern(self,xValues=None,scaling=None,true_for_optimized_false_for_guess=True):
if(scaling is None):
if true_for_optimized_false_for_guess:
scaling = self.scaling_optimized
else:
scaling = self.scaling_guess
self.GetInterpolatedXYData(xValues)
return np.multiply( self.y_data_interpolated, scaling )
def GetArea(self, xValues= None, scaling=None, start_2theta=None, end_2theta=None):
from scipy import integrate
if scaling is None:
scaling = self.scaling_optimized
if xValues is not None:
self.GetInterpolatedXYData(xValues)
self.area_with_scaling_1 = integrate.simps(self.y_data_interpolated, xValues)
return self.area_with_scaling_1 * scaling
def ReadFile(self, file_path, is_input_profile=False, take_every_nth_point=1, twotheta_ranges=[(0.0,90.0)], twotheta_offset=0.0, print_warnings=True):
RIR_found, chemistry_found = False, False
RIR = 0.0
chemistry = {}
should_read_next_line = True
mdi_header_found, xy_data_found = False, False
f = open(file_path, 'r')
while(True):
if should_read_next_line:
line = f.readline()
else:
should_read_next_line = True
if line == "":
break
elif "".join(line.split()).startswith("#RIR"):
RIR_found = True
RIR = float(line.lstrip().split()[2])
elif "".join(line.split()).startswith("#CHEMISTRY"):
chemistry_found = True
while(True):
line = f.readline()
should_read_next_line = False
linesplit = line.split()
if len(linesplit) != 2 or not linesplit[1].replace('.','').isdigit():
# chemistry not found on this line
break
else:
# chemistry found on this line
name,percentage = linesplit
chemistry[name] = float(percentage)
else:
try: # look for MDI format
linesplit = line.replace('.','').split()
if (len(linesplit) == 7 and linesplit[0].isdigit() and linesplit[1].isdigit() and
linesplit[5].isdigit() and linesplit[6].isdigit()):
mdi_header_found = True
linesplit = line.split()
x_start = float(linesplit[0])
x_end = float(linesplit[5])
x_num = int(linesplit[6])
x_data = np.linspace(x_start,x_end,num=x_num)
y_data = []
while(True):
line = f.readline()
should_read_next_line = False
linesplit = line.replace('.','').split()
if "".join(linesplit).isdigit():
y_data += [float(i) for i in line.split()]
else:
break
except:
pass
try: # look for XY or CSV format
linesplit = line.replace(',',' ').replace('.','').replace('e','').replace('E','').replace('-','').replace('+','').split()
if len(linesplit) == 2 and linesplit[0].isdigit() and linesplit[1].isdigit():
linesplit = line.replace(',',' ').split()
x_data = [float(linesplit[0])]
y_data = [float(linesplit[1])]
xy_data_found = True
while(True):
line = f.readline()
should_read_next_line = False
linesplit = line.replace(',',' ').replace('.','').replace('e','').replace('E','').replace('-','').replace('+','').split()
if len(linesplit) == 2 and linesplit[0].isdigit() and linesplit[1].isdigit():
linesplit = line.replace(',',' ').split()
x_data += [float(linesplit[0])]
y_data += [float(linesplit[1])]
else:
break
except:
pass
if not (mdi_header_found and len(x_data) == len(y_data) or xy_data_found):
f.close()
raise SyntaxError("No data found in profile: '"+file_path+"'\n"
+'Profile files must contain either XY data or MDI data.\n'
+'- XY files must contain at least one line that is\n'
+' a pair of numbers separated by whitespace and/or a comma.\n'
+'- MDI files must contain standard MDI header line of the form:\n'
+' x_start x_increment scan_rate xray_source xray_wavelength x_end number_of_points')
f.close()
# At this point, at least x_data and y_data have been found, and RIR and chemistry have been looked for.
# Done parsing the file. Time to process the data.
self.xy_data_unprocessed = np.array([x_data,y_data])
# apply 2theta offset: add offset to every 2theta value
if twotheta_offset != 0.0:
x_data = [x + twotheta_offset for x in x_data]
# set minimum intensity to 0.0 (if not input profile) and set maximum intensity to 100.0
maxY = max(y_data)
if is_input_profile:
y_data = np.array([y_data[i]*100.0/maxY for i in range(len(y_data))])
else:
minY = min(y_data)
y_data = np.array([(y_data[i]-minY) *100.0/(maxY-minY) for i in range(len(y_data))])
self.xy_data_for_plotting = np.array([x_data,y_data])
if take_every_nth_point > 1:
x_data = [x_data[i] for i in range(len(x_data)) if i % take_every_nth_point == 0]
y_data = [y_data[i] for i in range(len(y_data)) if i % take_every_nth_point == 0]
# remove data points outside of desired range
if twotheta_ranges != [(0.0,90.0)]:
new_x_data,new_y_data = [],[]
for i in range(len(x_data)):
for pair in twotheta_ranges:
if x_data[i] >= pair[0] and x_data[i] <= pair[1]:
new_x_data.append(x_data[i])
new_y_data.append(y_data[i])
break # It is sufficient for the data to be in only one interval. So, go to next x_data.
x_data,y_data = new_x_data,new_y_data
self.xy_data = np.array([x_data,y_data])
self.RIR = RIR
self.chemistry = chemistry
if print_warnings:
if(not is_input_profile and not RIR_found):
print("Warning: no RIR found in amorphous file: "+file_path)
if(not chemistry_found):
print("Warning: no chemistry found in amorphous file: "+file_path)
class XrayDif(XrayPhase):
"""The x-ray diffraction data of one material, with only peak XY data.
Reads only difs from AMCSD or RRUFF (no file modifications needed)."""
def __init__(self, file_path=None, print_warnings=True):
super(XrayDif,self).__init__(file_path)
self.FWHM_guess = None
self.FWHM_optimized = None
self.FWHM_bounds = (None,None)
self.refine_FWHM = True
self.pseudovoigt_parameter_guess = None # 0.0
self.pseudovoigt_parameter_optimized = None # 0.0
self.pseudovoigt_parameter_bounds = (0.0,1.0)
self.refine_pseudovoigt_parameter = False
self.pearson_vii_parameter_guess = None
self.pearson_vii_parameter_optimized = None
self.pearson_vii_parameter_bounds = (1.0,None)
self.refine_pearson_vii_parameter = False
self.peak_intensities = None
self.kalpha_average_2thetas = None
self.kalpha1_2thetas = None
self.kalpha2_2thetas = None
self.kbeta_2thetas = None
self.HKLs = None
self.d_spacings = None
self.cell_parameters_original = None #for copypasta
self.cell_parameters_guess = None
self.cell_parameters_optimized = None
self.cell_parameter_bounds = [(0.1,None),(0.1,None),(0.1,None),(30.0,180.0),(30.0,180.0),(30.0,180.0)]
self.refine_cell_parameters = True
self.kalpha_average_wavelength = None
self.kalpha1_wavelength = None
self.kalpha2_wavelength = None
self.kalpha_type = "average" #alternative is "12"
self.implement_kbeta = False
self.refine_kbeta_scaling = True #overridden by implement_kbeta = False
self.kbeta_FWHM_guess = None
self.kbeta_FWHM_optimized = None
self.kbeta_FWHM_bounds = (None,None)
self.refine_kbeta_FWHM = True #overridden by implement_kbeta = False
self.kbeta_scaling_guess = None
self.kbeta_scaling_optimized = None
self.kbeta_scaling_bounds = (0.0,None) #TODO: make supporting class functions for these new parameters
self.kbeta_wavelength = None
self.space_group = None
self.num_unique_cell_parameters = None
self.crystal_system = None
self.ReadFile(file_path,print_warnings)
def SetFWHMGuess(self,FWHM_guess,set_both=True):
self.FWHM_guess = FWHM_guess
if set_both:
self.FWHM_optimized = FWHM_guess
def SetFWHMOptimized(self,FWHM_optimized):
self.FWHM_optimized = FWHM_optimized
def SetFWHMBounds(self,FWHM_bounds):
self.FWHM_bounds = FWHM_bounds
def RefineFWHM(self,boolean):
self.refine_FWHM = boolean
def SetPseudoVoigtParameterGuess(self,pseudovoigt_parameter_guess,set_both=True):
self.pseudovoigt_parameter_guess = pseudovoigt_parameter_guess
if set_both:
self.pseudovoigt_parameter_optimized = pseudovoigt_parameter_guess
def SetPseudoVoigtParameterOptimized(self,pseudovoigt_parameter_optimized):
self.pseudovoigt_parameter_optimized = pseudovoigt_parameter_optimized
def SetPseudoVoigtParameterBounds(self,pseudovoigt_parameter_bounds):
self.pseudovoigt_parameter_bounds = pseudovoigt_parameter_bounds
def RefinePseudoVoigtParameter(self,boolean):
self.refine_pseudovoigt_parameter = boolean
def SetPearsonVIIParameterGuess(self,pearson_vii_parameter_guess,set_both=True):
self.pearson_vii_parameter_guess = pearson_vii_parameter_guess
if set_both:
self.pearson_vii_parameter_optimized = pearson_vii_parameter_guess
def SetPearsonVIIParameterOptimized(self,pearson_vii_parameter_optimized):
self.pearson_vii_parameter_optimized = pearson_vii_parameter_optimized
def SetPearsonVIIParameterBounds(self,pearson_vii_parameter_bounds):
self.pearson_vii_parameter_bounds = pearson_vii_parameter_bounds
def RefinePearsonVIIParameter(self,boolean):
self.refine_pearson_vii_parameter = boolean
def SetCellParametersGuess(self, cell_parameters_guess):
self.cell_parameters_guess = cell_parameters_guess
def SetCellParametersOptimized(self, cell_parameters_optimized):
self.cell_parameters_optimized = cell_parameters_optimized
def SetCellParameterBoundsUsingValues(self, cell_parameter_bounds):
self.cell_parameter_bounds = cell_parameter_bounds
def SetCellParameterBoundsUsingPercentage(self, percentage = 5.0, true_for_optimized_false_for_guess = True):
if true_for_optimized_false_for_guess:
self.cell_parameter_bounds = [(self.cell_parameters_optimized[i]*(1.0-percentage/100.0),self.cell_parameters_optimized[i]*(1.0+percentage/100.0)) for i in range(6)]
else:
self.cell_parameter_bounds = [(self.cell_parameters_guess[i]*(1.0-percentage/100.0),self.cell_parameters_guess[i]*(1.0+percentage/100.0)) for i in range(6)]
def SetCellParameterBoundsUsingDifferences(self, plusminus = [0.05,0.05,0.05,1.0,1.0,1.0], true_for_optimized_false_for_guess = True):
if true_for_optimized_false_for_guess:
self.cell_parameter_bounds = [(self.cell_parameters_optimized[i]-plusminus[i],self.cell_parameters_optimized[i]+plusminus[i]) for i in range(6)]
else:
self.cell_parameter_bounds = [(self.cell_parameters_guess[i]-plusminus[i],self.cell_parameters_guess[i]+plusminus[i]) for i in range(6)]
def RefineCellParameters(self, boolean):
self.refine_cell_parameters = boolean
def GetArea(self, FWHM=None, scaling=None, start_2theta=None, end_2theta=None):
total_area = 0.0
#Note: I assume that the area of the kbeta peaks is negligible
if FWHM is None:
FWHM = self.FWHM_optimized
if scaling is None:
scaling = self.scaling_optimized
xin = self.kalpha_average_2thetas
yin = self.peak_intensities
sigma = FWHM / 2.35482
if (start_2theta is not None) and (end_2theta is not None):
for i in range(len(xin)):
total_area += yin[i] * (norm.cdf(x=end_2theta,loc=xin[i],scale=sigma) - norm.cdf(x=start_2theta,loc=xin[i],scale=sigma))
if (start_2theta is not None) and (end_2theta is None):
for i in range(len(xin)):
total_area += yin[i] * (1.0 - norm.cdf(x=start_2theta,loc=xin[i],scale=sigma))
if (start_2theta is None) and (end_2theta is not None):
for i in range(len(xin)):
total_area += yin[i] * norm.cdf(x=end_2theta,loc=xin[i],scale=sigma)
if (start_2theta is None) and (end_2theta is None):
for i in range(len(xin)):
total_area += yin[i] # area under a Gaussian is 1
return total_area * scaling
def GetBroadenedPattern(self,xValues,FWHM=None,scaling=None,kbeta_scaling=None,kbeta_FWHM=None,pseudovoigt_parameter=None,pearson_vii_parameter=None,true_for_optimized_false_for_guess=True):
#turns the peaks into full xy data for plotting
eta = pseudovoigt_parameter
m = pearson_vii_parameter
xin_alpha_avg = self.kalpha_average_2thetas
xin_alpha1 = self.kalpha1_2thetas
xin_alpha2 = self.kalpha2_2thetas
xin_beta = self.kbeta_2thetas
yin = self.peak_intensities
if (FWHM is None):
if true_for_optimized_false_for_guess:
FWHM = self.FWHM_optimized
else:
FWHM = self.FWHM_guess
if (scaling is None):
if true_for_optimized_false_for_guess:
scaling = self.scaling_optimized
else:
scaling = self.scaling_guess
if (self.implement_kbeta):
if (kbeta_scaling is None):
if true_for_optimized_false_for_guess:
kbeta_scaling = self.kbeta_scaling_optimized
else:
kbeta_scaling = self.kbeta_scaling_guess
if (kbeta_FWHM is None):
if true_for_optimized_false_for_guess:
kbeta_FWHM = self.kbeta_FWHM_optimized
else:
kbeta_FWHM = self.kbeta_FWHM_guess
if (m is None):
if true_for_optimized_false_for_guess:
m = self.pearson_vii_parameter_optimized
else:
m = self.pearson_vii_parameter_guess
if (eta is None):
if true_for_optimized_false_for_guess:
eta = self.pseudovoigt_parameter_optimized
else:
eta = self.pseudovoigt_parameter_guess
elif eta > 1.0 or eta < 0.0:
print("Pseudo-voigt parameter eta must be between 0 and 1.")
print("The given value of eta is "+str(eta))
return
k = 0.01
# convenient Gaussian parameter is sigma
sigma = FWHM / 2.35482
tempG = 1/(sigma * np.sqrt(2 * np.pi))
if self.implement_kbeta:
kbeta_sigma = FWHM / 2.35482
kbeta_tempG = 1/(sigma *
|
np.sqrt(2 * np.pi)
|
numpy.sqrt
|
#
# note: use python3
#
# import needed libraries
#
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from mpl_toolkits.axes_grid1.inset_locator import (inset_axes, InsetPosition, mark_inset)
import numpy as np
import scipy as sc
import glob
import pickle
import os
import sys as sys
#from dropFun import *
#
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
sys.setrecursionlimit(10000)
#
# definition of auxiliaries functions
#
def readBin(filename):
f = open(filename,'rb')
data = np.fromfile(f,dtype='float64')
f.close()
return data
#
def readBinI(filename):
f = open(filename,'rb')
data = np.fromfile(f,dtype='int32')
f.close()
return data
#
def newTag(i,nd):
for j in range(6):
# print i,j
if drop['procShared'][i,j]>-1:
logCheck = np.logical_and((drop['myid']==drop['procShared'][i,j]), (drop['id']==drop['idShared'][i,j]))
lid = np.squeeze(logCheck.nonzero())
drop['procShared'][i,j]=-1
if lid.size>0 and newId[lid] ==0:
#print i,j, lid
newId[lid] = nd
newTag(lid,nd)
return
#
def cordCorr(xi,xavg,lx,d,corrF):
if corrF:
if abs(xavg-xi)>0.9*d: # NOTE!!!!!!! approx factor! NOT VALID FOR VERY LONG LIGAMENTS
sign = xavg-lx/2
sign = sign/abs(sign)
xf = xi+sign*lx
print(corrF)
else:
xf = xi
else:
xf = xi
return xf
#
# Parameters
#
nx = 32
ny = 32
nz = 32
d0 = 0.50
maxD = 0.70 # used to check for droplet across boundaries.
path = '../../src/data/post/tagging/' # to be modified accordingly
#
lx = 1
ly = 1
lz = 1
#
dx = lx/nx
dy = ly/ny
dz = lz/nz
#
vol0 = (np.pi/6.0)*d0**3
area0 = (np.pi/1.0)*d0**2
time = '0000000'
drop = {}
#
# quantities to be plotted
#
drop_files = np.sort(glob.glob(path+'/xposfld*'))
timeSeries = {}
#
for nfile in range(len(drop_files)):
#
time = drop_files[nfile][-11:-4]
print (time)
#
# read the binary files - volumetric
#
drop = {}
drop['x_dp'] = readBin(path+'xposfld'+time+'.bin') # position-x
drop['y_dp'] = readBin(path+'yposfld'+time+'.bin') # position-y
drop['z_dp'] = readBin(path+'zposfld'+time+'.bin') # position-z
drop['u_dp'] = readBin(path+'uvelfld'+time+'.bin') # velocity-x
drop['v_dp'] = readBin(path+'vvelfld'+time+'.bin') # velocity-y
drop['w_dp'] = readBin(path+'wvelfld'+time+'.bin') # velocity-z
drop['vold'] = readBin(path+'voldfld'+time+'.bin') # volume of the droplet
#
# read the binary files - processors
#
drop['id'] = readBinI(path+'dridfld'+time+'.bin')
drop['procShared'] = readBinI(path+'procfld'+time+'.bin')
drop['idShared'] = readBinI(path+'idshfld'+time+'.bin')
drop['order'] = readBinI(path+'ordefld'+time+'.bin')
drop['procShared'] = np.reshape(drop['procShared'],(len(drop['x_dp']),6),order='c')
drop['idShared'] = np.reshape(drop['idShared'],(len(drop['x_dp']),6),order='c')
drop['order'] = np.reshape(drop['order'],(len(drop['x_dp']),6),order='c')
drop['myid'] = readBinI(path+'idmyfld'+time+'.bin')
#
nd = 0
newId = np.zeros(len(drop['x_dp']))
#
# i index cycles over the disperse phase number
#
for i in range(len(drop['x_dp'])):
if newId[i]==0:
nd += 1
newId[i] = nd
if (drop['procShared'][i,:]>-1).any():
newTag(i,nd)
final = {}
for i in range(1,nd+1):
#
corrX=corrY=corrZ= False
#
# Determine if the droplets is crossing a boundary, by checking the difference
# between the min and max coordinates
#
spacX = abs(np.min(drop['x_dp'][newId==i])-
np.max(drop['x_dp'][newId==i]))/lx
spacY = abs(np.min(drop['y_dp'][newId==i])-
np.max(drop['y_dp'][newId==i]))/ly
spacZ = abs(np.min(drop['z_dp'][newId==i])-
np.max(drop['z_dp'][newId==i]))/lz
#
# Set the correction flag accordingly
#
corrX = spacX>maxD
corrY = spacY>maxD
corrZ = spacZ>maxD
#print(corrZ)
#
# compute the approximate center and diameter
#
x_tmp = np.sum(drop['x_dp'][newId==i]*drop['vold'][newId==i])/np.sum(drop['vold'][newId==i])
y_tmp =
|
np.sum(drop['y_dp'][newId==i]*drop['vold'][newId==i])
|
numpy.sum
|
import numpy
from six import string_types
from netCDF4 import Dataset
from pyproj import Proj
from trefoil.netcdf.crs import get_crs, is_geographic, PROJ4_GEOGRAPHIC
from trefoil.netcdf.utilities import get_ncattrs
from trefoil.netcdf.variable import SpatialCoordinateVariable, SpatialCoordinateVariables, DateVariable
X_DIMENSION_STANDARD_NAMES = ('longitude', 'grid_longitude', 'projection_x_coordinate')
X_DIMENSION_COMMON_NAMES = ('longitude', 'lon', 'long', 'x')
Y_DIMENSION_STANDARD_NAMES = ('latitude', 'grid_latitude', 'projection_y_coordinate')
Y_DIMENSION_COMMON_NAMES = ('latitude', 'lat', 'y')
TIME_DIMENSION_STANDARD_NAMES = ('time',)
TIME_DIMENSION_COMMON_NAMES = ('time', 'year', 'years') # TODO: months?
def get_interval(data):
if data.shape[0] > 1:
unique_intervals =
|
numpy.unique(data[1:] - data[:-1])
|
numpy.unique
|
import numpy
import pytest
import pynegf
from pynegf_test import utils
# Skip if libnegf is not available.
if pynegf.cdll_libnegf() is None:
pytest.skip(
"libnegf backengine not available on the system",
allow_module_level=True)
def _transmission_linear_chain_dephasing(coupling=None):
"""
Utility to calculate the transmission in presence of diagonal
dephasing for a nearest neighbor linear chain.
"""
negf = pynegf.PyNegf()
# Build the sparse hamiltonian for the nearest-neighbor linear chain.
mat_csr = utils.orthogonal_linear_chain(
nsites=100, contact_size=10, coupling=1.0)
negf.set_hamiltonian(mat_csr)
# Set an identity overlap matrix.
negf.set_identity_overlap(100)
# Initialize the system structure.
negf.init_structure(
2,
numpy.array([89, 99]),
numpy.array([79, 89]))
# Initialize parameters relevant for the transmission.
negf.params.g_spin = 1
negf.params.emin = -2.5
negf.params.emax = 2.5
negf.params.estep = 0.025
negf.params.mu[0] = 2.1
negf.params.mu[1] = -2.1
negf.verbosity = 0
negf.set_params()
if coupling is not None:
negf.set_diagonal_elph_dephasing(numpy.array([coupling]*80))
negf.solve_landauer()
# Get transmission, dos and energies as numpy object
energies = negf.energies()
if coupling is None:
transmission = negf.transmission()
else:
transmission = negf.energy_current()
return energies, transmission
def test_transmission_dephasing_linear_chain():
"""
Test that we can calculate the transmission with dephasing for an
ideal linear chain.
"""
energies, ballistic_transmission = _transmission_linear_chain_dephasing()
dephasing_transmissions = []
for coupling in [0.0, 0.01, 0.05]:
energies, transmission = _transmission_linear_chain_dephasing(
coupling=coupling)
dephasing_transmissions.append(transmission)
# The ballistic transmission should be equal to the dephasing
# case with zero coupling.
assert numpy.linalg.norm(
ballistic_transmission - dephasing_transmissions[0]
) == pytest.approx(0.)
# Increasing the coupling, the transmission should go lower.
tol = 0.001
assert (dephasing_transmissions[1] < dephasing_transmissions[0] + tol).all()
assert (dephasing_transmissions[2] < dephasing_transmissions[1] + tol).all()
# A quantitative check on the mid-point.
mid_point = energies.size // 2
assert dephasing_transmissions[0][0, mid_point] == pytest.approx(1.0)
assert dephasing_transmissions[1][0, mid_point] == pytest.approx(0.999, abs=1e-3)
assert dephasing_transmissions[2][0, mid_point] == pytest.approx(0.95, abs=1e-2)
def _density_linear_chain_dephasing(coupling=None, orthogonal=True):
"""
Utility to calculate the density matrix in presence of diagonal
dephasing for a nearest neighbor linear chain.
"""
negf = pynegf.PyNegf()
# Build the sparse hamiltonian for the nearest-neighbor linear chain.
mat_csr = utils.orthogonal_linear_chain(
nsites=50, contact_size=10, coupling=1.0)
if orthogonal:
negf.set_hamiltonian(mat_csr)
# Set an identity overlap matrix.
negf.set_identity_overlap(50)
else:
negf.set_hamiltonian(mat_csr)
mat_csr = utils.orthogonal_linear_chain(
nsites=50, contact_size=10, coupling=0.1, onsite=1.0)
# This is to make sure that S is positive definite.
numpy.linalg.cholesky(mat_csr.todense())
# Set an identity overlap matrix.
negf.set_overlap(mat_csr)
# Initialize the system structure.
negf.init_structure(
2,
numpy.array([39, 49]),
numpy.array([29, 39]))
# Initialize parameters relevant for the density matrix calculation.
negf.params.ec = -3.5
negf.params.mu[0] = -0.1
negf.params.mu[1] = 0.1
negf.params.kbt_dm[0] = 0.001
negf.params.kbt_dm[1] = 0.001
negf.params.np_real[0] = 50
negf.params.verbose = 100
negf.set_params()
if coupling is not None:
negf.set_diagonal_elph_dephasing(numpy.array([coupling]*30))
negf.solve_density()
# Get the density matrix.
density_matrix = negf.density_matrix()
return density_matrix
def test_density_matrix_dephasing_linear_chain():
"""
Test that we can calculate the density matrix with dephasing for an
ideal linear chain.
"""
ballistic_density_matrix = _density_linear_chain_dephasing()
ballistic_density_matrix = ballistic_density_matrix.todense()
dephasing_density_matrix = []
for coupling in [0.0, 0.05, 0.5]:
density_matrix = _density_linear_chain_dephasing(
coupling=coupling, orthogonal=True)
dephasing_density_matrix.append(density_matrix.todense())
# The ballistic density matrix should be equal to the dephasing
# case with zero coupling.
assert numpy.linalg.norm(
ballistic_density_matrix - dephasing_density_matrix[0]
) == pytest.approx(0.)
# In presens of dephasing the occupation should be a ramp,
# decreasing from the left to the right contact.
for dm in dephasing_density_matrix[1:]:
for i in range(1, 29):
assert numpy.diagonal(dm)[i - 1] > numpy.diagonal(dm)[i]
# The difference in density between first and last device site
# should increase with increasing dephasing.
dm1 = numpy.real(numpy.diagonal(dephasing_density_matrix[1]))
dm2 = numpy.real(numpy.diagonal(dephasing_density_matrix[2]))
assert ((dm1[0] - dm1[29]) < (dm2[0] - dm2[29]))
# Mid-value check. The density ramp should cross the ballistic
# one at half chain length.
assert (all(dm1[:15] > 1.0) and all(dm1[15:] < 1.0))
assert (all(dm2[:15] > 1.0) and all(dm2[15:] < 1.0))
# Approximate delta determined by inspection, for regression.
assert dm1[0] - dm1[29] == pytest.approx(0.00115, rel=0.01)
assert dm2[0] - dm2[29] == pytest.approx(0.0399, rel=0.01)
@pytest.mark.skip(reason="Not clear if the backengine is correct physically")
def test_density_matrix_dephasing_linear_chain_overlap():
"""
Test that we can calculate the density matrix with dephasing for an
ideal linear chain with overlap.
"""
ballistic_density_matrix = _density_linear_chain_dephasing(
orthogonal=False)
ballistic_density_matrix = ballistic_density_matrix.todense()
dephasing_density_matrix = []
for coupling in [0.0, 0.1, 0.5]:
density_matrix = _density_linear_chain_dephasing(
coupling=coupling, orthogonal=False)
dephasing_density_matrix.append(density_matrix.todense())
# The ballistic density matrix should be equal to the dephasing
# case with zero coupling.
assert numpy.linalg.norm(
ballistic_density_matrix - dephasing_density_matrix[0]
) == pytest.approx(0.)
# In presens of dephasing the occupation should be a ramp,
# decreasing from the left to the right contact.
for dm in dephasing_density_matrix[1:]:
for i in range(1, 29):
assert numpy.diagonal(dm)[i - 1] > numpy.diagonal(dm)[i]
# The difference in density between first and last device site
# should increase with increasing dephasing.
dm1 = numpy.real(numpy.diagonal(dephasing_density_matrix[1]))
dm2 = numpy.real(numpy.diagonal(dephasing_density_matrix[2]))
assert ((dm1[0] - dm1[29]) < (dm2[0] - dm2[29]))
# Mid-value check. The density ramp should cross the ballistic
# one at half chain length.
assert (all(dm1[:15] > 1.0) and all(dm1[15:] < 1.0))
assert (all(dm2[:15] > 1.0) and all(dm2[15:] < 1.0))
# Approximate delta determined by inspection, for regression.
# TODO: values to be determined
# assert dm1[0] - dm1[29] == pytest.approx(0.00115, rel=0.01)
# assert dm2[0] - dm2[29] == pytest.approx(0.0399, rel=0.01)
def test_current_conservation_dephasing(coupling=None):
""" Test that we have current conservation at the electrodes """
currents = []
for orthogonal in [True, False]:
for (ni, nf) in [(1, 2), (2, 1)]:
negf = pynegf.PyNegf()
# Build the sparse hamiltonian for the nearest-neighbor linear chain.
mat_csr = utils.orthogonal_linear_chain(
nsites=50, contact_size=10, coupling=1.0)
if orthogonal:
negf.set_hamiltonian(mat_csr)
# Set an identity overlap matrix.
negf.set_identity_overlap(50)
else:
negf.set_hamiltonian(mat_csr)
mat_csr = utils.orthogonal_linear_chain(
nsites=50, contact_size=10, coupling=0.1, onsite=1.0)
# This is to make sure that S is positive definite.
numpy.linalg.cholesky(mat_csr.todense())
# Set an identity overlap matrix.
negf.set_overlap(mat_csr)
# Initialize the system structure.
negf.init_structure(
2,
numpy.array([39, 49]),
|
numpy.array([29, 39])
|
numpy.array
|
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import os.path
iris_dataset = pd.read_csv("iris.csv")
iris_grps = iris_dataset.groupby("species")
def getiris(p1):
iris = []
iris = pd.read_csv(p1)
return iris
def des():
#iris = []
#iris = pd.read_csv(p1)
data = iris_dataset.describe()
print(data)
return data.to_string()
#group by species
def grpsby(p1):
iris = []
iris = pd.read_csv(p1)
iris_grps = iris.groupby("species")
for name, data in iris_grps:
print(name)
print("----------------------------------------------------\n")
print(data.iloc[:, 0:5])
#print(data.iloc[0:4])
print("\n")
#keep
#group by species
def descgrpsby():
#iris_grps = iris_dataset.groupby("species")
data = iris_grps.describe()
return data.to_string()
def find_range(s):
return s.max() - s.min()
#find the range
def comprange():
data = iris_grps.aggregate(find_range)
return data.to_string()
def std_dev(p1,p2):
iris = []
iris = pd.read_csv(p1)
return
|
np.std(iris[p2])
|
numpy.std
|
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import scipy.interpolate as spi # scipy.interpolate.UnivariateSpline
from io import StringIO
from datetime import date, time, datetime, timedelta
from math import exp, log, sqrt, e
def extrema(in_data):
flag = 1
dsize = in_data.size
spmax_1 = []
spmax_2 = []
spmax_1.append(0)
spmax_2.append(in_data[0])
jj = 1
kk = 1
while jj < dsize - 1:
if (in_data[jj-1] <= in_data[jj]) and (in_data[jj] >= in_data[jj+1]):
spmax_1.append(jj)
spmax_2.append(in_data[jj])
kk += 1
jj += 1
spmax_1.append(dsize-1)
spmax_2.append(in_data[-1])
if kk >= 3:
slope1 = (spmax_2[1] - spmax_2[2]) / (spmax_1[1] - spmax_1[2])
tmp1 = slope1*(spmax_1[0] - spmax_1[1]) + spmax_2[1]
if tmp1 > spmax_2[0]:
spmax_2[0] = tmp1
slope2 = (spmax_2[kk-1] - spmax_2[kk-2]) / (spmax_1[kk-1] - spmax_1[kk-2])
tmp2 = slope2*(spmax_1[kk] - spmax_1[kk-1]) + spmax_2[kk-1]
if tmp2 > spmax_2[kk]:
spmax_2[kk] = tmp2
else:
flag = -1
msize = in_data.size
dsize = np.max(msize)
xsize = dsize/3
xsize2 = 2*xsize
spmin_1 = []
spmin_2 = []
spmin_1.append(0)
spmin_2.append(in_data[0])
jj = 1
kk = 1
while jj < dsize-1:
if (in_data[jj-1] >= in_data[jj]) and (in_data[jj] <= in_data[jj+1]):
spmin_1.append(jj)
spmin_2.append(in_data[jj])
kk += 1
jj += 1
spmin_1.append(dsize-1)
spmin_2.append(in_data[-1])
if kk >= 3:
slope1 = (spmin_2[1] - spmin_2[2]) / (spmin_1[1] - spmin_1[2])
tmp1 = slope1*(spmin_1[0] - spmin_1[1]) + spmin_2[1]
if tmp1 < spmin_2[0]:
spmin_2[0] = tmp1
slope2 = (spmin_2[kk-1] - spmin_2[kk-2]) / (spmin_1[kk-1] - spmin_1[kk-2])
tmp2 = slope2*(spmin_1[kk] - spmin_1[kk-1]) + spmin_2[kk-1]
if tmp2 < spmin_2[kk]:
spmin_2[kk] = tmp2
else:
flag = -1
flag = 1
return spmax_1, spmax_2, spmin_1, spmin_2, flag
def cubic_spline_3pts(x, y, T):
"""
Apperently scipy.interpolate.interp1d does not support
cubic spline for less than 4 points.
"""
x0, x1, x2 = x
y0, y1, y2 = y
x1x0, x2x1 = x1-x0, x2-x1
y1y0, y2y1 = y1-y0, y2-y1
_x1x0, _x2x1 = 1./x1x0, 1./x2x1
m11, m12, m13= 2*_x1x0, _x1x0, 0
m21, m22, m23 = _x1x0, 2.*(_x1x0+_x2x1), _x2x1
m31, m32, m33 = 0, _x2x1, 2.*_x2x1
v1 = 3*y1y0*_x1x0*_x1x0
v3 = 3*y2y1*_x2x1*_x2x1
v2 = v1+v3
M =
|
np.array([[m11,m12,m13],[m21,m22,m23],[m31,m32,m33]])
|
numpy.array
|
import numpy as np
from matplotlib import pyplot as plt
import os.path as osp
import torch
import warnings
from omegaconf import DictConfig, OmegaConf
import pickle
import pandas as pd
import ruamel.yaml
from src import dataloader
def val_test_split(dataloader, val_ratio):
N = len(dataloader)
n_val = int(N * val_ratio)
val_idx = np.arange(n_val)
val_loader = [list(dataloader)[i] for i in val_idx]
test_loader = [list(dataloader)[i] for i in range(N) if i not in val_idx]
return val_loader, test_loader
def MSE_numpy(output, gt, mask):
diff = output - gt
diff2 = np.square(diff)
mse = np.sum(diff2 * mask) / np.sum(mask)
return mse
def MSE(output, gt, mask):
diff = torch.abs(output - gt)
diff2 = torch.square(diff)
mse = torch.sum(diff2 * mask) / torch.sum(mask)
return mse
def MSE_weighted(output, gt, mask, p=0.75):
diff = torch.abs(output - gt)
diff2 = torch.square(diff)
weight = 1 + torch.pow(torch.abs(gt), p)
mse = torch.sum(diff2 * weight * mask) / torch.sum(mask)
return mse
def MSE_root_transformed(output, gt, mask, root=3):
errors = (torch.pow(output.relu(), 1/root) - torch.pow(gt, 1/root))**2
errors = errors[mask]
mse = errors.mean()
return mse
def plot_training_curves(training_curves, val_curves, dir, log=True):
epochs = training_curves.shape[1]
fig, ax = plt.subplots()
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', message='Mean of empty slice')
warnings.filterwarnings(action='ignore', message='Degrees of freedom <= 0 for slice.')
train_line = ax.plot(range(1, epochs + 1), np.nanmean(training_curves, 0), label='training')
ax.fill_between(range(1, epochs + 1), np.nanmean(training_curves, 0) - np.nanstd(training_curves, 0),
np.nanmean(training_curves, 0) + np.nanstd(training_curves, 0), alpha=0.2,
color=train_line[0].get_color())
val_line = ax.plot(range(1, epochs + 1), np.nanmean(val_curves, 0), label='validation')
ax.fill_between(range(1, epochs + 1),
|
np.nanmean(val_curves, 0)
|
numpy.nanmean
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import copy
import re
import collections
import numpy as np
import xml.etree.cElementTree as ElementTree
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import Table, Column
import fermipy
import fermipy.config
from fermipy import utils
from fermipy import wcs_utils
from fermipy import hpx_utils
from fermipy import catalog
from fermipy import defaults
from fermipy import model_utils
from fermipy import fits_utils
from fermipy.logger import Logger, log_level
from fermipy.model_utils import make_parameter_dict
from fermipy.model_utils import cast_pars_dict
from fermipy.model_utils import get_function_defaults
from fermipy.model_utils import get_spatial_type
from fermipy.model_utils import get_function_norm_par_name
from fermipy.model_utils import get_function_par_names
from fermipy.model_utils import extract_pars_from_dict
from fermipy.model_utils import create_pars_from_dict
def create_source_table(scan_shape):
"""Create an empty source table.
Returns
-------
tab : `~astropy.table.Table`
"""
cols_dict = collections.OrderedDict()
cols_dict['Source_Name'] = dict(dtype='S48', format='%s')
cols_dict['name'] = dict(dtype='S48', format='%s')
cols_dict['class'] = dict(dtype='S32', format='%s')
cols_dict['SpectrumType'] = dict(dtype='S32', format='%s')
cols_dict['SpatialType'] = dict(dtype='S32', format='%s')
cols_dict['SourceType'] = dict(dtype='S32', format='%s')
cols_dict['SpatialModel'] = dict(dtype='S32', format='%s')
cols_dict['RAJ2000'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['DEJ2000'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['GLON'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['GLAT'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['ts'] = dict(dtype='f8', format='%.3f')
cols_dict['loglike'] = dict(dtype='f8', format='%.3f')
cols_dict['npred'] = dict(dtype='f8', format='%.3f')
cols_dict['offset'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['offset_ra'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['offset_dec'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['offset_glon'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['offset_glat'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['offset_roi_edge'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['pivot_energy'] = dict(dtype='f8', format='%.3f', unit='MeV')
cols_dict['flux_scan'] = dict(dtype='f8', format='%.3f',
shape=scan_shape)
cols_dict['eflux_scan'] = dict(dtype='f8', format='%.3f',
shape=scan_shape)
cols_dict['norm_scan'] = dict(dtype='f8', format='%.3f',
shape=scan_shape)
cols_dict['dloglike_scan'] = dict(dtype='f8', format='%.3f',
shape=scan_shape)
cols_dict['loglike_scan'] = dict(dtype='f8', format='%.3f',
shape=scan_shape)
# Add source dictionary columns
for k, v in sorted(defaults.source_output.items()):
if not k in cols_dict.keys():
if v[2] == float:
cols_dict[k] = dict(dtype='f8', format='%f')
elif k == 'Spectrum_Filename' or k == 'Spatial_Filename':
cols_dict[k] = dict(dtype='S128', format='%s')
elif v[2] == str:
cols_dict[k] = dict(dtype='S32', format='%s')
cols_dict['param_names'] = dict(dtype='S32', format='%s', shape=(10,))
cols_dict['param_values'] = dict(dtype='f8', format='%f', shape=(10,))
cols_dict['param_errors'] = dict(dtype='f8', format='%f', shape=(10,))
# Catalog Parameters
cols_dict['Flux_Density'] = dict(
dtype='f8', format='%.5g', unit='1 / (MeV cm2 s)')
cols_dict['Spectral_Index'] = dict(dtype='f8', format='%.3f')
cols_dict['Pivot_Energy'] = dict(dtype='f8', format='%.3f', unit='MeV')
cols_dict['beta'] = dict(dtype='f8', format='%.3f')
cols_dict['Exp_Index'] = dict(dtype='f8', format='%.3f')
cols_dict['Cutoff'] = dict(dtype='f8', format='%.3f', unit='MeV')
cols_dict['Expfactor'] = dict(dtype='f8', format='%.3f')
cols_dict['Conf_68_PosAng'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['Conf_68_SemiMajor'] = dict(
dtype='f8', format='%.3f', unit='deg')
cols_dict['Conf_68_SemiMinor'] = dict(
dtype='f8', format='%.3f', unit='deg')
cols_dict['Conf_95_PosAng'] = dict(dtype='f8', format='%.3f', unit='deg')
cols_dict['Conf_95_SemiMajor'] = dict(
dtype='f8', format='%.3f', unit='deg')
cols_dict['Conf_95_SemiMinor'] = dict(
dtype='f8', format='%.3f', unit='deg')
for t in ['eflux', 'eflux100', 'eflux1000', 'eflux10000']:
cols_dict[t] = dict(dtype='f8', format='%.3f', unit='MeV / (cm2 s)')
cols_dict[t + '_err'] = dict(dtype='f8',
format='%.3f', unit='MeV / (cm2 s)')
for t in ['eflux_ul95', 'eflux100_ul95', 'eflux1000_ul95', 'eflux10000_ul95']:
cols_dict[t] = dict(dtype='f8', format='%.3f', unit='MeV / (cm2 s)')
for t in ['flux', 'flux100', 'flux1000', 'flux10000']:
cols_dict[t] = dict(dtype='f8', format='%.3f', unit='1 / (cm2 s)')
cols_dict[t + '_err'] = dict(dtype='f8',
format='%.3f', unit='1 / (cm2 s)')
for t in ['flux_ul95', 'flux100_ul95', 'flux1000_ul95', 'flux10000_ul95']:
cols_dict[t] = dict(dtype='f8', format='%.3f', unit='1 / (cm2 s)')
for t in ['dnde', 'dnde100', 'dnde1000', 'dnde10000']:
cols_dict[t] = dict(dtype='f8', format='%.3f', unit='1 / (MeV cm2 s)')
cols_dict[t + '_err'] = dict(dtype='f8',
format='%.3f', unit='1 / (MeV cm2 s)')
cols = [Column(name=k, **v) for k, v in cols_dict.items()]
tab = Table(cols)
return tab
def get_skydir_distance_mask(src_skydir, skydir, dist, min_dist=None,
square=False, coordsys='CEL'):
"""Retrieve sources within a certain angular distance of an
(ra,dec) coordinate. This function supports two types of
geometric selections: circular (square=False) and square
(square=True). The circular selection finds all sources with a given
angular distance of the target position. The square selection
finds sources within an ROI-like region of size R x R where R
= 2 x dist.
Parameters
----------
src_skydir : `~astropy.coordinates.SkyCoord`
Array of sky directions.
skydir : `~astropy.coordinates.SkyCoord`
Sky direction with respect to which the selection will be applied.
dist : float
Maximum distance in degrees from the sky coordinate.
square : bool
Choose whether to apply a circular or square selection.
coordsys : str
Coordinate system to use when applying a selection with square=True.
"""
if dist is None:
dist = 180.
if not square:
dtheta = src_skydir.separation(skydir).rad
elif coordsys == 'CEL':
dtheta = get_linear_dist(skydir,
src_skydir.ra.rad,
src_skydir.dec.rad,
coordsys=coordsys)
elif coordsys == 'GAL':
dtheta = get_linear_dist(skydir,
src_skydir.galactic.l.rad,
src_skydir.galactic.b.rad,
coordsys=coordsys)
else:
raise Exception('Unrecognized coordinate system: %s' % coordsys)
msk = (dtheta < np.radians(dist))
if min_dist is not None:
msk &= (dtheta > np.radians(min_dist))
return msk
def get_linear_dist(skydir, lon, lat, coordsys='CEL'):
xy = wcs_utils.sky_to_offset(skydir, np.degrees(lon), np.degrees(lat),
coordsys=coordsys)
x = np.radians(xy[:, 0])
y = np.radians(xy[:, 1])
delta = np.array([np.abs(x), np.abs(y)])
dtheta = np.max(delta, axis=0)
return dtheta
def get_dist_to_edge(skydir, lon, lat, width, coordsys='CEL'):
xy = wcs_utils.sky_to_offset(skydir, np.degrees(lon), np.degrees(lat),
coordsys=coordsys)
x = np.radians(xy[:, 0])
y = np.radians(xy[:, 1])
delta_edge = np.array([np.abs(x) - width, np.abs(y) - width])
dtheta = np.max(delta_edge, axis=0)
return dtheta
def get_true_params_dict(pars_dict):
params = {}
for k, p in pars_dict.items():
val = float(p['value']) * float(p['scale'])
err = np.nan
if 'error' in p:
err = float(p['error']) * np.abs(float(p['scale']))
params[k] = {'value': val, 'error': err}
return params
def spatial_pars_from_catalog(cat):
if cat['Spatial_Function'] == 'RadialDisk':
rext = np.sqrt(cat['Model_SemiMajor'] * cat['Model_SemiMinor'])
return {'Radius': {'value': rext}}
elif cat['Spatial_Function'] == 'RadialGaussian':
sigma_to_r68 = np.sqrt(-2.0 * np.log(1.0 - 0.6827))
rext = np.sqrt(cat['Model_SemiMajor'] * cat['Model_SemiMinor'])
return {'Sigma': {'value': rext / sigma_to_r68}}
return {}
def spectral_pars_from_catalog(cat):
"""Create spectral parameters from 3FGL catalog columns."""
spectrum_type = cat['SpectrumType']
pars = get_function_defaults(cat['SpectrumType'])
par_idxs = {k: i for i, k in
enumerate(get_function_par_names(cat['SpectrumType']))}
for k in pars:
pars[k]['value'] = cat['param_values'][par_idxs[k]]
if spectrum_type == 'PowerLaw':
pars['Index']['value'] *= -1.0
pars['Index']['scale'] = -1.0
pars['Scale']['scale'] = 1.0
pars['Index']['max'] = max(5.0, pars['Index']['value'] + 1.0)
pars['Index']['min'] = min(0.0, pars['Index']['value'] - 1.0)
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index'] = make_parameter_dict(pars['Index'], False, False)
elif spectrum_type == 'LogParabola':
pars['norm'] = make_parameter_dict(pars['norm'], False, True)
pars['Eb'] = make_parameter_dict(pars['Eb'], True, False)
pars['alpha'] = make_parameter_dict(pars['alpha'], False, False)
pars['beta'] = make_parameter_dict(pars['beta'], False, False)
elif spectrum_type == 'PLSuperExpCutoff':
pars['Index1']['value'] *= -1.0
pars['Index1']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['Cutoff'] = make_parameter_dict(pars['Cutoff'], False, True)
elif spectrum_type == 'PLSuperExpCutoff2':
pars['Index1']['value'] *= -1.0
pars['Index1']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['Expfactor'] = make_parameter_dict(pars['Expfactor'], False, True)
elif spectrum_type == 'PLSuperExpCutoff3':
pars['IndexS']['value'] *= -1.0
pars['IndexS']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['IndexS'] = make_parameter_dict(pars['IndexS'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['Expfactor2'] = make_parameter_dict(pars['Expfactor2'], False, True)
elif spectrum_type == 'PLSuperExpCutoff4':
pars['IndexS']['value'] *= -1.0
pars['IndexS']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['IndexS'] = make_parameter_dict(pars['IndexS'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['ExpfactorS'] = make_parameter_dict(pars['ExpfactorS'], False, True)
else:
raise Exception('Unsupported spectral type:' + spectrum_type)
return pars
class Model(object):
"""Base class for point-like and diffuse source components. This
class is a container for spectral and spatial parameters as well
as other source properties such as TS, Npred, and location within
the ROI.
"""
def __init__(self, name, data):
self._data = defaults.make_default_dict(defaults.source_output)
self._data['spectral_pars'] = get_function_defaults(data['SpectrumType'])
if 'spectral_pars' in data:
for k, v in data['spectral_pars'].items():
if k in self._data['spectral_pars']:
self._data['spectral_pars'][k].update(v)
else:
self._data['spectral_pars'][k] = v.copy()
try:
self._data['spatial_pars'] = get_function_defaults(data['SpatialType'])
except:
print (data)
raise KeyError("xx")
self._data.setdefault('catalog', data.pop('catalog', {}))
self._data.setdefault('assoc', data.pop('assoc', {}))
self._data.setdefault('class', '')
self._data['name'] = name
self._data.setdefault('psf_scale_fn', None)
self._data = utils.merge_dict(self._data, data)
self._names = [name]
catalog = self._data['catalog']
if 'CLASS1' in catalog:
self['class'] = catalog['CLASS1'].strip()
elif 'CLASS' in catalog:
self['class'] = catalog['CLASS'].strip()
for k in ROIModel.src_name_cols:
if k not in catalog:
continue
name = catalog[k].strip()
if name != '' and name not in self._names:
self._names.append(name)
self._data['assoc'][k] = name
self._sync_params()
def __contains__(self, key):
return key in self._data
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __eq__(self, other):
return self.name == other.name
def __str__(self):
data = copy.deepcopy(self.data)
data['names'] = self.names
output = []
output += ['{:15s}:'.format('Name') + ' {name:s}']
output += ['{:15s}:'.format('TS') + ' {ts:.2f}']
output += ['{:15s}:'.format('Npred') + ' {npred:.2f}']
output += ['{:15s}:'.format('SpatialModel') + ' {SpatialModel:s}']
output += ['{:15s}:'.format('SpectrumType') + ' {SpectrumType:s}']
output += ['Spectral Parameters']
for i, p in enumerate(self['param_names']):
if not p:
break
val = self['param_values'][i]
err = self['param_errors'][i]
output += ['{:15s}: {:10.4g} +/- {:10.4g}'.format(p, val, err)]
return '\n'.join(output).format(**data)
def items(self):
return self._data.items()
@property
def data(self):
return self._data
@property
def spectral_pars(self):
return self._data['spectral_pars']
@property
def spatial_pars(self):
return self._data['spatial_pars']
@property
def params(self):
return get_true_params_dict(self._data['spectral_pars'])
@property
def is_free(self):
""" returns True if any of the spectral model parameters is set to free, else False
"""
return bool(np.array([int(value.get("free", False)) for key, value in self.spectral_pars.items()]).sum())
@property
def name(self):
return self._data['name']
@property
def names(self):
return self._names
@property
def assoc(self):
return self._data['assoc']
@property
def psf_scale_fn(self):
return self._data['psf_scale']
@staticmethod
def create_from_dict(src_dict, roi_skydir=None, rescale=False):
src_dict = copy.deepcopy(src_dict)
src_dict.setdefault('SpatialModel', 'PointSource')
src_dict.setdefault('SpatialType',
get_spatial_type(src_dict['SpatialModel']))
# Need this to handle old conventions for
# MapCubeFunction/ConstantValue sources
if src_dict['SpatialModel'] == 'DiffuseSource':
src_dict['SpatialModel'] = src_dict['SpatialType']
if 'filefunction' in src_dict:
src_dict['Spectrum_Filename'] = src_dict.pop('filefunction', str(''))
if 'mapcube' in src_dict:
src_dict['Spatial_Filename'] = src_dict.pop('mapcube', str(''))
if 'radialprofile' in src_dict:
src_dict['Spatial_Filename'] = src_dict.pop('radialprofile', str(''))
if 'spectral_pars' in src_dict:
src_dict['spectral_pars'] = cast_pars_dict(
src_dict['spectral_pars'])
if 'spatial_pars' in src_dict:
src_dict['spatial_pars'] = cast_pars_dict(src_dict['spatial_pars'])
if src_dict['SpatialModel'] == 'ConstantValue':
return IsoSource(src_dict['name'], src_dict)
elif src_dict['SpatialModel'] == 'CompositeSource':
return CompositeSource(src_dict['name'], src_dict)
elif src_dict['SpatialModel'] == 'MapCubeFunction':
return MapCubeSource(src_dict['name'], src_dict)
else:
return Source.create_from_dict(src_dict, roi_skydir,
rescale=rescale)
def _sync_params(self):
pars = model_utils.pars_dict_to_vectors(self['SpectrumType'],
self.spectral_pars)
self._data.update(pars)
def get_norm(self):
par_name = get_function_norm_par_name(self['SpectrumType'])
val = self.spectral_pars[par_name]['value']
scale = self.spectral_pars[par_name]['scale']
return float(val) * float(scale)
def add_to_table(self, tab):
row_dict = {}
row_dict['Source_Name'] = self['name']
row_dict['RAJ2000'] = self['ra']
row_dict['DEJ2000'] = self['dec']
row_dict['GLON'] = self['glon']
row_dict['GLAT'] = self['glat']
if not 'param_names' in self.data:
pars = model_utils.pars_dict_to_vectors(self['SpectrumType'],
self.spectral_pars)
row_dict.update(pars)
r68_semimajor = self['pos_err_semimajor'] * \
self['pos_r68'] / self['pos_err']
r68_semiminor = self['pos_err_semiminor'] * \
self['pos_r68'] / self['pos_err']
r95_semimajor = self['pos_err_semimajor'] * \
self['pos_r95'] / self['pos_err']
r95_semiminor = self['pos_err_semiminor'] * \
self['pos_r95'] / self['pos_err']
row_dict['Conf_68_PosAng'] = self['pos_angle']
row_dict['Conf_68_SemiMajor'] = r68_semimajor
row_dict['Conf_68_SemiMinor'] = r68_semiminor
row_dict['Conf_95_PosAng'] = self['pos_angle']
row_dict['Conf_95_SemiMajor'] = r95_semimajor
row_dict['Conf_95_SemiMinor'] = r95_semiminor
row_dict.update(self.get_catalog_dict())
for t in self.data.keys():
if t == 'params':
continue
if t in tab.columns:
row_dict[t] = self[t]
for k in ["Spatial_Filename", "Spectrum_Filename"]:
if row_dict[k] is None:
row_dict[k] = ''
row = [row_dict[k] for k in tab.columns]
tab.add_row(row)
def get_catalog_dict(self):
o = {'Spectral_Index': np.nan,
'Flux_Density': np.nan,
'Pivot_Energy': np.nan,
'beta': np.nan,
'Exp_Index': np.nan,
'Cutoff': np.nan,
'Expfactor': np.nan}
params = get_true_params_dict(self.spectral_pars)
if self['SpectrumType'] == 'PowerLaw':
o['Spectral_Index'] = -1.0 * params['Index']['value']
o['Flux_Density'] = params['Prefactor']['value']
o['Pivot_Energy'] = params['Scale']['value']
elif self['SpectrumType'] == 'LogParabola':
o['Spectral_Index'] = params['alpha']['value']
o['Flux_Density'] = params['norm']['value']
o['Pivot_Energy'] = params['Eb']['value']
o['beta'] = params['beta']['value']
elif self['SpectrumType'] == 'PLSuperExpCutoff':
o['Spectral_Index'] = -1.0 * params['Index1']['value']
o['Exp_Index'] = params['Index2']['value']
o['Flux_Density'] = params['Prefactor']['value']
o['Pivot_Energy'] = params['Scale']['value']
o['Cutoff'] = params['Cutoff']['value']
elif self['SpectrumType'] == 'PLSuperExpCutoff2':
o['Spectral_Index'] = -1.0 * params['Index1']['value']
o['Exp_Index'] = params['Index2']['value']
o['Flux_Density'] = params['Prefactor']['value']
o['Pivot_Energy'] = params['Scale']['value']
o['Expfactor'] = params['Expfactor']['value']
elif self['SpectrumType'] == 'PLSuperExpCutoff3':
o['Spectral_Index'] = -1.0 * params['IndexS']['value']
o['Exp_Index'] = params['Index2']['value']
o['Flux_Density'] = params['Prefactor']['value']
o['Pivot_Energy'] = params['Scale']['value']
o['Expfactor'] = params['Expfactor2']['value']
elif self['SpectrumType'] == 'PLSuperExpCutoff4':
o['Spectral_Index'] = -1.0 * params['IndexS']['value']
o['Exp_Index'] = params['Index2']['value']
o['Flux_Density'] = params['Prefactor']['value']
o['Pivot_Energy'] = params['Scale']['value']
o['Expfactor'] = params['ExpfactorS']['value']
return o
def check_cuts(self, cuts):
if cuts is None:
return True
if isinstance(cuts, tuple):
cuts = {cuts[0]: (cuts[1], cuts[2])}
elif isinstance(cuts, list):
cuts = {c[0]: (c[1], c[2]) for c in cuts}
for k, v in cuts.items():
# if not isinstance(c,tuple) or len(c) != 3:
# raise Exception('Wrong format for cuts tuple.')
if k in self._data:
if not utils.apply_minmax_selection(self[k], v):
return False
elif 'catalog' in self._data and k in self._data['catalog']:
if not utils.apply_minmax_selection(self['catalog'][k], v):
return False
else:
return False
return True
def set_psf_scale_fn(self, fn):
self._data['psf_scale_fn'] = fn
def set_spectral_pars(self, spectral_pars):
self._data['spectral_pars'] = copy.deepcopy(spectral_pars)
self._sync_params()
def update_spectral_pars(self, spectral_pars):
self._data['spectral_pars'] = utils.merge_dict(
self.spectral_pars, spectral_pars)
self._sync_params()
def set_name(self, name, names=None):
self._data['name'] = name
if names is None:
self._names = [name]
else:
self._names = names
def add_name(self, name):
if name not in self._names:
self._names.append(name)
def update_data(self, d):
self._data = utils.merge_dict(self._data, d, add_new_keys=True)
def update_from_source(self, src):
self._data['spectral_pars'] = {}
self._data['spatial_pars'] = {}
self._data = utils.merge_dict(self.data, src.data, add_new_keys=True)
self._name = src.name
self._names = list(set(self._names + src.names))
class IsoSource(Model):
def __init__(self, name, data):
data['SpectrumType'] = 'FileFunction'
data['SpatialType'] = 'ConstantValue'
data['SpatialModel'] = 'ConstantValue'
data['SourceType'] = 'DiffuseSource'
if not 'spectral_pars' in data:
data['spectral_pars'] = {
'Normalization': {'name': 'Normalization', 'scale': 1.0,
'value': 1.0,
'min': 0.001, 'max': 1000.0,
'free': False}}
super(IsoSource, self).__init__(name, data)
self._init_spatial_pars()
@property
def filefunction(self):
return self._data['Spectrum_Filename']
@property
def diffuse(self):
return True
def _init_spatial_pars(self):
self['spatial_pars'] = {
'Value': {'name': 'Value', 'scale': '1',
'value': '1', 'min': '0', 'max': '10',
'free': '0'}}
def write_xml(self, root, **kwargs):
filename = kwargs.get('Spectrum_Filename', self.filefunction)
filename = utils.path_to_xmlpath(self.filefunction)
source_element = utils.create_xml_element(root, 'source',
dict(name=self.name,
type='DiffuseSource'))
spec_el = utils.create_xml_element(source_element, 'spectrum',
dict(file=filename,
type='FileFunction',
ctype='-1'))
spat_el = utils.create_xml_element(source_element, 'spatialModel',
dict(type='ConstantValue'))
for k, v in self.spectral_pars.items():
utils.create_xml_element(spec_el, 'parameter', v)
for k, v in self.spatial_pars.items():
utils.create_xml_element(spat_el, 'parameter', v)
class MapCubeSource(Model):
def __init__(self, name, data):
data.setdefault('SpectrumType', 'PowerLaw')
data['SpatialType'] = 'MapCubeFunction'
data['SpatialModel'] = 'MapCubeFunction'
data['SourceType'] = 'DiffuseSource'
if not 'spectral_pars' in data:
data['spectral_pars'] = {
'Prefactor': {'name': 'Prefactor', 'scale': 1.0,
'value': 1.0, 'min': 0.1, 'max': '10.0',
'free': False},
'Index': {'name': 'Index', 'scale': -1.0,
'value': 0.0, 'min': -1.0, 'max': 1.0,
'free': False},
'Scale': {'name': 'Scale', 'scale': 1.0,
'value': 1000.0,
'min': 1000.0, 'max': 1000.0,
'free': False},
}
super(MapCubeSource, self).__init__(name, data)
self._init_spatial_pars()
@property
def mapcube(self):
return self._data['Spatial_Filename']
@property
def diffuse(self):
return True
def _init_spatial_pars(self):
self['spatial_pars'] = {
'Normalization':
{'name': 'Normalization', 'scale': '1',
'value': '1', 'min': '0', 'max': '10',
'free': '0'}}
def write_xml(self, root, **kwargs):
filename = kwargs.get('Spatial_Filename', self.mapcube)
filename = utils.path_to_xmlpath(filename)
source_element = utils.create_xml_element(root, 'source',
dict(name=self.name,
type='DiffuseSource'))
spec_el = utils.create_xml_element(source_element, 'spectrum',
dict(type=self.data['SpectrumType']))
spat_el = utils.create_xml_element(source_element, 'spatialModel',
dict(type='MapCubeFunction',
file=filename))
for k, v in self.spectral_pars.items():
utils.create_xml_element(spec_el, 'parameter', v)
for k, v in self.spatial_pars.items():
utils.create_xml_element(spat_el, 'parameter', v)
class Source(Model):
"""Class representation of a source (non-diffuse) model component.
A source object serves as a container for the properties of that
source (position, spatial/spectral parameters, TS, etc.) as
derived in the current analysis. Most properties of a source
object can be accessed with the bracket operator:
# Return the TS of this source
>>> src['ts']
# Get a skycoord representation of the source position
>>> src.skydir
"""
def __init__(self, name, data, radec=None):
data.setdefault('SpatialModel', 'PointSource')
data.setdefault('SpectrumType', 'PowerLaw')
data.setdefault(
'SpatialType', model_utils.get_spatial_type(data['SpatialModel']))
data.setdefault(
'SourceType', model_utils.get_source_type(data['SpatialType']))
super(Source, self).__init__(name, data)
catalog = self.data.get('catalog', {})
if radec is not None:
self._set_radec(radec)
elif 'ra' in self.data and 'dec' in self.data:
self._set_radec([self.data['ra'], self.data['dec']])
elif 'RAJ2000' in catalog and 'DEJ2000' in catalog:
self._set_radec([catalog['RAJ2000'], catalog['DEJ2000']])
else:
raise Exception('Failed to infer RADEC for source: %s' % name)
self._init_spatial_pars(SpatialWidth=self['SpatialWidth'])
def __str__(self):
data = copy.deepcopy(self.data)
data['names'] = self.names
output = []
output += ['{:15s}:'.format('Name') + ' {name:s}']
output += ['{:15s}:'.format('Associations') + ' {names}']
output += ['{:15s}:'.format('RA/DEC') + ' {ra:10.3f}/{dec:10.3f}']
output += ['{:15s}:'.format('GLON/GLAT') +
' {glon:10.3f}/{glat:10.3f}']
output += ['{:15s}:'.format('TS') + ' {ts:.2f}']
output += ['{:15s}:'.format('Npred') + ' {npred:.2f}']
output += ['{:15s}:'.format('Flux') +
' {flux:9.4g} +/- {flux_err:8.3g}']
output += ['{:15s}:'.format('EnergyFlux') +
' {eflux:9.4g} +/- {eflux_err:8.3g}']
output += ['{:15s}:'.format('SpatialModel') + ' {SpatialModel:s}']
output += ['{:15s}:'.format('SpectrumType') + ' {SpectrumType:s}']
output += ['Spectral Parameters']
for i, p in enumerate(self['param_names']):
if not p:
break
val = self['param_values'][i]
err = self['param_errors'][i]
output += ['{:15s}: {:10.4g} +/- {:10.4g}'.format(p, val, err)]
return '\n'.join(output).format(**data)
def _set_radec(self, radec):
self['radec'] = np.array(radec, ndmin=1)
self['RAJ2000'] = radec[0]
self['DEJ2000'] = radec[1]
self['ra'] = radec[0]
self['dec'] = radec[1]
glonlat = utils.eq2gal(radec[0], radec[1])
self['glon'], self['glat'] = glonlat[0][0], glonlat[1][0]
if 'RA' in self.spatial_pars:
self.spatial_pars['RA']['value'] = radec[0]
self.spatial_pars['DEC']['value'] = radec[1]
def _set_spatial_width(self, spatial_width):
self.data['SpatialWidth'] = spatial_width
if self['SpatialType'] in ['RadialGaussian']:
self.spatial_pars['Sigma'][
'value'] = spatial_width / 1.5095921854516636
elif self['SpatialType'] in ['RadialDisk']:
self.spatial_pars['Radius'][
'value'] = spatial_width / 0.8246211251235321
def _init_spatial_pars(self, **kwargs):
spatial_pars = copy.deepcopy(kwargs)
spatial_width = spatial_pars.pop('SpatialWidth', None)
if self['SpatialType'] == 'SkyDirFunction':
self._extended = False
self._data['SourceType'] = 'PointSource'
else:
self._extended = True
self._data['SourceType'] = 'DiffuseSource'
spatial_pars.setdefault('RA', spatial_pars.pop('ra', self['ra']))
spatial_pars.setdefault('DEC', spatial_pars.pop('dec', self['dec']))
for k, v in spatial_pars.items():
if not isinstance(v, dict):
spatial_pars[k] = {'name': k, 'value': v}
if k in self.spatial_pars:
self.spatial_pars[k].update(spatial_pars[k])
if spatial_width is not None:
self._set_spatial_width(spatial_width)
elif self['SpatialType'] == 'RadialDisk':
self['SpatialWidth'] = self.spatial_pars[
'Radius']['value'] * 0.8246211251235321
elif self['SpatialType'] == 'RadialGaussian':
self['SpatialWidth'] = self.spatial_pars[
'Sigma']['value'] * 1.5095921854516636
if 'RA' in spatial_pars or 'DEC' in spatial_pars:
self._set_radec([spatial_pars['RA']['value'],
spatial_pars['DEC']['value']])
def update_data(self, d):
self._data = utils.merge_dict(self._data, d, add_new_keys=True)
if 'ra' in d and 'dec' in d:
self._set_radec([d['ra'], d['dec']])
def set_radec(self, ra, dec):
self._set_radec(np.array([ra, dec]))
def set_position(self, skydir):
"""
Set the position of the source.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
"""
if not isinstance(skydir, SkyCoord):
skydir = SkyCoord(ra=skydir[0], dec=skydir[1], unit=u.deg)
if not skydir.isscalar:
skydir = np.ravel(skydir)[0]
radec = np.array([skydir.icrs.ra.deg, skydir.icrs.dec.deg])
self._set_radec(radec)
def set_roi_direction(self, roidir):
offset = roidir.separation(self.skydir).deg
offset_cel = wcs_utils.sky_to_offset(
roidir, self['ra'], self['dec'], 'CEL')
offset_gal = wcs_utils.sky_to_offset(
roidir, self['glon'], self['glat'], 'GAL')
self['offset'] = offset
self['offset_ra'] = offset_cel[0, 0]
self['offset_dec'] = offset_cel[0, 1]
self['offset_glon'] = offset_gal[0, 0]
self['offset_glat'] = offset_gal[0, 1]
def set_roi_geom(self, geom):
if geom is None:
return
self['offset_roi_edge'] = float(
wcs_utils.distance_to_edge(geom, self.skydir))
def set_spatial_model(self, spatial_model, spatial_pars):
update_pars = False
if spatial_model != self['SpatialModel']:
update_pars = True
self._data['SpatialModel'] = spatial_model
self._data['SpatialType'] = get_spatial_type(self['SpatialModel'])
if update_pars:
self._data['spatial_pars'] = get_function_defaults(
self['SpatialType'])
if spatial_model == 'PointSource':
self._data['SpatialWidth'] = None
self._init_spatial_pars(**spatial_pars)
def separation(self, src):
if isinstance(src, Source):
return self.radec.separation(src.skydir)
else:
return self.radec.separation(src)
@property
def diffuse(self):
return False
@property
def extended(self):
return self._extended
@property
def associations(self):
return self._names
@property
def radec(self):
return self['radec']
@property
def skydir(self):
"""Return a SkyCoord representation of the source position.
Returns
-------
skydir : `~astropy.coordinates.SkyCoord`
"""
return SkyCoord(self.radec[0] * u.deg, self.radec[1] * u.deg)
@property
def data(self):
return self._data
@classmethod
def create_from_dict(cls, src_dict, roi_skydir=None, rescale=False):
"""Create a source object from a python dictionary.
Parameters
----------
src_dict : dict
Dictionary defining the properties of the source.
"""
src_dict = copy.deepcopy(src_dict)
src_dict.setdefault('SpatialModel', 'PointSource')
src_dict.setdefault('Spectrum_Filename', None)
src_dict.setdefault('SpectrumType', 'PowerLaw')
src_dict['SpatialType'] = get_spatial_type(src_dict['SpatialModel'])
spectrum_type = src_dict['SpectrumType']
spatial_type = src_dict['SpatialType']
spectral_pars = src_dict.pop('spectral_pars', {})
spatial_pars = src_dict.pop('spatial_pars', {})
if not spectral_pars:
spectral_pars = extract_pars_from_dict(spectrum_type, src_dict)
norm_par_name = get_function_norm_par_name(spectrum_type)
if norm_par_name is not None:
spectral_pars[norm_par_name].setdefault('free', True)
if not spatial_pars:
spatial_pars = extract_pars_from_dict(spatial_type, src_dict)
for k in ['RA', 'DEC', 'Prefactor']:
if k in spatial_pars:
del spatial_pars[k]
spectral_pars = create_pars_from_dict(spectrum_type, spectral_pars,
rescale)
#raise ValueError("%s %s" % (spatial_type, spatial_pars))
spatial_pars = create_pars_from_dict(spatial_type, spatial_pars,
False)
if 'file' in src_dict:
src_dict['Spectrum_Filename'] = src_dict.pop('file')
if spectrum_type == 'DMFitFunction' and src_dict['Spectrum_Filename'] is None:
src_dict['Spectrum_Filename'] = os.path.join('$FERMIPY_DATA_DIR',
'gammamc_dif.dat')
src_dict['spectral_pars'] = cast_pars_dict(spectral_pars)
src_dict['spatial_pars'] = cast_pars_dict(spatial_pars)
if 'name' in src_dict:
name = src_dict['name']
src_dict['Source_Name'] = src_dict.pop('name')
elif 'Source_Name' in src_dict:
name = src_dict['Source_Name']
else:
raise Exception('Source name undefined. %s' % src_dict)
skydir = wcs_utils.get_target_skydir(src_dict, roi_skydir)
src_dict['RAJ2000'] = skydir.ra.deg
src_dict['DEJ2000'] = skydir.dec.deg
radec = np.array([skydir.ra.deg, skydir.dec.deg])
return cls(name, src_dict, radec=radec)
@classmethod
def create_from_xmlfile(cls, xmlfile, extdir=None):
"""Create a Source object from an XML file.
Parameters
----------
xmlfile : str
Path to XML file.
extdir : str
Path to the extended source archive.
"""
root = ElementTree.ElementTree(file=xmlfile).getroot()
srcs = root.findall('source')
if len(srcs) == 0:
raise Exception('No sources found.')
return cls.create_from_xml(srcs[0], extdir=extdir)
@staticmethod
def create_from_xml(root, extdir=None):
"""Create a Source object from an XML node.
Parameters
----------
root : `~xml.etree.ElementTree.Element`
XML node containing the source.
extdir : str
Path to the extended source archive.
"""
src_type = root.attrib['type']
spec = utils.load_xml_elements(root, 'spectrum')
spectral_pars = utils.load_xml_elements(root, 'spectrum/parameter')
spectral_type = spec['type']
spectral_pars = cast_pars_dict(spectral_pars)
spat = {}
spatial_pars = {}
nested_sources = []
if src_type == 'CompositeSource':
spatial_type = 'CompositeSource'
source_library = root.findall('source_library')[0]
for node in source_library.findall('source'):
nested_sources += [Source.create_from_xml(node, extdir=extdir)]
else:
spat = utils.load_xml_elements(root, 'spatialModel')
spatial_pars = utils.load_xml_elements(
root, 'spatialModel/parameter')
spatial_pars = cast_pars_dict(spatial_pars)
spatial_type = spat['type']
xml_dict = copy.deepcopy(root.attrib)
src_dict = {'catalog': xml_dict}
src_dict['Source_Name'] = xml_dict['name']
src_dict['SpectrumType'] = spectral_type
src_dict['SpatialType'] = spatial_type
src_dict['SourceType'] = src_type
src_dict['Spatial_Filename'] = ''
src_dict['Spectrum_Filename'] = ''
if 'file' in spat:
src_dict['Spatial_Filename'] = utils.xmlpath_to_path(spat['file'])
if not os.path.isfile(src_dict['Spatial_Filename']) \
and extdir is not None:
src_dict['Spatial_Filename'] = \
os.path.join(extdir, 'Templates',
src_dict['Spatial_Filename'])
if 'file' in spec:
src_dict['Spectrum_Filename'] = utils.xmlpath_to_path(spec['file'])
if src_type == 'PointSource':
src_dict['SpatialModel'] = 'PointSource'
elif src_type == 'CompositeSource':
src_dict['SpatialModel'] = 'CompositeSource'
elif spatial_type == 'SpatialMap':
src_dict['SpatialModel'] = 'SpatialMap'
else:
src_dict['SpatialModel'] = spatial_type
if src_type == 'PointSource' or \
spatial_type in ['SpatialMap', 'RadialGaussian', 'RadialDisk']:
if 'RA' in xml_dict:
src_dict['RAJ2000'] = float(xml_dict['RA'])
src_dict['DEJ2000'] = float(xml_dict['DEC'])
elif 'RA' in spatial_pars:
src_dict['RAJ2000'] = float(spatial_pars['RA']['value'])
src_dict['DEJ2000'] = float(spatial_pars['DEC']['value'])
else:
try:
skydir = wcs_utils.get_map_skydir(os.path.expandvars(
src_dict['Spatial_Filename']))
except Exception:
skydir = hpx_utils.get_map_skydir(os.path.expandvars(
src_dict['Spatial_Filename']))
src_dict['RAJ2000'] = skydir.ra.deg
src_dict['DEJ2000'] = skydir.dec.deg
radec = np.array([src_dict['RAJ2000'], src_dict['DEJ2000']])
src_dict['spectral_pars'] = spectral_pars
src_dict['spatial_pars'] = spatial_pars
return Source(src_dict['Source_Name'],
src_dict, radec=radec)
elif src_type == 'DiffuseSource' and spatial_type == 'ConstantValue':
return IsoSource(src_dict['Source_Name'],
{'Spectrum_Filename': spec['file'],
'spectral_pars': spectral_pars,
'spatial_pars': spatial_pars})
elif src_type == 'DiffuseSource' and spatial_type == 'MapCubeFunction':
return MapCubeSource(src_dict['Source_Name'],
{'Spatial_Filename': spat['file'],
'SpectrumType': spectral_type,
'spectral_pars': spectral_pars,
'spatial_pars': spatial_pars})
elif src_type == 'CompositeSource':
return CompositeSource(src_dict['Source_Name'],
{'SpectrumType': spectral_type,
'nested_sources': nested_sources})
else:
raise Exception(
'Unrecognized type for source: %s %s' % (src_dict['Source_Name'], src_type))
def write_xml(self, root):
"""Write this source to an XML node."""
if not self.extended:
try:
source_element = utils.create_xml_element(root, 'source',
dict(name=self['Source_Name'],
type='PointSource'))
except TypeError as msg:
print (self['Source_Name'], self)
raise TypeError(msg)
spat_el = ElementTree.SubElement(source_element, 'spatialModel')
spat_el.set('type', 'SkyDirFunction')
elif self['SpatialType'] == 'SpatialMap':
source_element = utils.create_xml_element(root, 'source',
dict(name=self['Source_Name'],
type='DiffuseSource'))
filename = utils.path_to_xmlpath(self['Spatial_Filename'])
spat_el = utils.create_xml_element(source_element, 'spatialModel',
dict(map_based_integral='True',
type='SpatialMap',
file=filename))
else:
source_element = utils.create_xml_element(root, 'source',
dict(name=self['Source_Name'],
type='DiffuseSource'))
spat_el = utils.create_xml_element(source_element, 'spatialModel',
dict(type=self['SpatialType']))
for k, v in self.spatial_pars.items():
utils.create_xml_element(spat_el, 'parameter', v)
el = ElementTree.SubElement(source_element, 'spectrum')
stype = self['SpectrumType'].strip()
el.set('type', stype)
if self['Spectrum_Filename'] is not None:
filename = utils.path_to_xmlpath(self['Spectrum_Filename'])
el.set('file', filename)
for k, v in self.spectral_pars.items():
utils.create_xml_element(el, 'parameter', v)
class CompositeSource(Model):
def __init__(self, name, data):
data.setdefault('SpectrumType', 'ConstantValue')
data['SpatialType'] = 'CompositeSource'
data['SpatialModel'] = 'CompositeSource'
data['SourceType'] = 'CompositeSource'
if not 'spectral_pars' in data:
data['spectral_pars'] = {
'Value': {'name': 'Value', 'scale': 1.0,
'value': 1.0, 'min': 0.1, 'max': '10.0',
'free': False},
}
super(CompositeSource, self).__init__(name, data)
self._build_nested_sources(data)
@property
def nested_sources(self):
return self._nested_sources
@property
def diffuse(self):
return True
def _build_nested_sources(self, data):
self._nested_sources = []
for nested_source in data.get('nested_sources', []):
if isinstance(nested_source, Model):
self._nested_sources.append(copy.deepcopy(nested_source))
elif isinstance(nested_source, dict):
self._nested_sources.append(
Source.create_from_dict(nested_source))
def write_xml(self, root):
source_element = utils.create_xml_element(root, 'source',
dict(name=self.name,
type='CompositeSource'))
spec_el = utils.create_xml_element(source_element, 'spectrum',
dict(type=self.data['SpectrumType']))
for k, v in self.spectral_pars.items():
utils.create_xml_element(spec_el, 'parameter', v)
spat_el = utils.create_xml_element(
source_element, 'source_library', dict(title=self.name))
for nested_source in self._nested_sources:
nested_source.write_xml(spat_el)
class ROIModel(fermipy.config.Configurable):
"""This class is responsible for managing the ROI model (both sources
and diffuse components). Source catalogs can be read
from either FITS or XML files. Individual components are
represented by instances of `~fermipy.roi_model.Model` and can be
accessed by name using the bracket operator.
* Create an ROI with all 3FGL sources and print a summary of its contents:
>>> skydir = astropy.coordinates.SkyCoord(0.0,0.0,unit='deg')
>>> roi = ROIModel({'catalogs' : ['3FGL'],'src_roiwidth' : 10.0},skydir=skydir)
>>> print(roi)
name SpatialModel SpectrumType offset ts npred
--------------------------------------------------------------------------------
3FGL J2357.3-0150 PointSource PowerLaw 1.956 nan 0.0
3FGL J0006.2+0135 PointSource PowerLaw 2.232 nan 0.0
3FGL J0016.3-0013 PointSource PowerLaw 4.084 nan 0.0
3FGL J0014.3-0455 PointSource PowerLaw 6.085 nan 0.0
* Print a summary of an individual source
>>> print(roi['3FGL J0006.2+0135'])
Name : 3FGL J0006.2+0135
Associations : ['3FGL J0006.2+0135']
RA/DEC : 1.572/ 1.585
GLON/GLAT : 100.400/ -59.297
TS : nan
Npred : nan
Flux : nan +/- nan
EnergyFlux : nan +/- nan
SpatialModel : PointSource
SpectrumType : PowerLaw
Spectral Parameters
Index : -2 +/- nan
Scale : 1000 +/- nan
Prefactor : 1e-12 +/- nan
* Get the SkyCoord for a source
>>> dir = roi['SourceA'].skydir
* Loop over all sources and print their names
>>> for s in roi.sources: print(s.name)
3FGL J2357.3-0150
3FGL J0006.2+0135
3FGL J0016.3-0013
3FGL J0014.3-0455
"""
defaults = dict(defaults.model.items(),
fileio=defaults.fileio)
src_name_cols = ['Source_Name',
'ASSOC', 'ASSOC1', 'ASSOC2', 'ASSOC_GAM',
'1FHL_Name', '2FGL_Name', '3FGL_Name',
'ASSOC_GAM1', 'ASSOC_GAM2', 'ASSOC_TEV']
def __init__(self, config=None, **kwargs):
# Coordinate for ROI center (defaults to 0,0)
self._skydir = kwargs.pop('skydir', SkyCoord(0.0, 0.0, unit=u.deg))
self._geom = kwargs.get('geom', None)
coordsys = kwargs.pop('coordsys', 'CEL')
srcname = kwargs.pop('srcname', None)
super(ROIModel, self).__init__(config, **kwargs)
self._src_radius = self.config['src_radius']
if self.config['src_roiwidth'] is not None:
self._config['src_radius_roi'] = self.config['src_roiwidth'] * 0.5
self._srcs = []
self._diffuse_srcs = []
self._src_dict = collections.defaultdict(list)
self._src_radius = []
self.load(coordsys=coordsys, srcname=srcname)
def __contains__(self, key):
key = key.replace(' ', '').lower()
return key in self._src_dict.keys()
def __getitem__(self, key):
return self.get_source_by_name(key)
def __iter__(self):
return iter(self._srcs + self._diffuse_srcs)
def __str__(self):
o = ''
o += '%-20s%-15s%-15s%8s%10s%12s\n' % (
'name', 'SpatialModel', 'SpectrumType', 'offset',
'ts', 'npred')
o += '-' * 80 + '\n'
for s in sorted(self.sources, key=lambda t: t['offset']):
if s.diffuse:
continue
o += '%-20.19s%-15.14s%-15.14s%8.3f%10.2f%12.1f\n' % (
s['name'], s['SpatialModel'],
s['SpectrumType'],
s['offset'], s['ts'], s['npred'])
for s in sorted(self.sources, key=lambda t: t['offset']):
if not s.diffuse:
continue
o += '%-20.19s%-15.14s%-15.14s%8s%10.2f%12.1f\n' % (
s['name'], s['SpatialModel'],
s['SpectrumType'],
'-----', s['ts'], s['npred'])
return o
@property
def skydir(self):
"""Return the sky direction corresponding to the center of the
ROI."""
return self._skydir
@property
def geom(self):
return self._geom
@property
def sources(self):
return self._srcs + self._diffuse_srcs
@property
def point_sources(self):
return self._srcs
@property
def diffuse_sources(self):
return self._diffuse_srcs
@property
def extdir(self):
extdir = self.config['extdir']
if extdir is not None and not os.path.isdir(os.path.expandvars(extdir)):
return os.path.join('$FERMIPY_DATA_DIR',
'catalogs', extdir)
else:
return extdir
def set_geom(self, geom):
self._geom = geom
for s in self._srcs:
s.set_roi_geom(geom)
def clear(self):
"""Clear the contents of the ROI."""
self._srcs = []
self._diffuse_srcs = []
self._src_dict = collections.defaultdict(list)
self._src_radius = []
def load_diffuse_srcs(self):
srcs = self.create_diffuse_srcs(self.config)
for src in srcs:
self.load_source(src, False, self.config['merge_sources'])
def create_diffuse_srcs(self, config):
srcs = []
srcs += self._create_diffuse_src('isodiff', config)
srcs += self._create_diffuse_src('galdiff', config)
srcs += self._create_diffuse_src('limbdiff', config)
srcs += self._create_diffuse_src('diffuse', config)
srcs += self._create_diffuse_src_from_xml(config)
return srcs
def _create_diffuse_src(self, name, config, src_type='FileFunction'):
if 'FERMI_DIR' in os.environ and 'FERMI_DIFFUSE_DIR' not in os.environ:
os.environ['FERMI_DIFFUSE_DIR'] = \
os.path.expandvars('$FERMI_DIR/refdata/fermi/galdiffuse')
search_dirs = []
if config.get('diffuse_dir', []):
search_dirs += config.get('diffuse_dir', [])
search_dirs += [self.config['fileio']['outdir'],
'$FERMI_DIFFUSE_DIR',
'$FERMIPY_DATA_DIR']
srcs = []
if config is not None:
srcs = config[name]
elif self.config[name] is not None:
srcs = self.config[name]
srcs_out = []
for i, t in enumerate(srcs):
if utils.isstr(t):
src_dict = {'file': t}
elif isinstance(t, dict):
src_dict = copy.deepcopy(t)
else:
raise Exception(
'Invalid type in diffuse mode list: %s' % str(type(t)))
src_dict['file'] = \
utils.resolve_file_path(src_dict['file'],
search_dirs=search_dirs)
if 'name' not in src_dict:
if len(srcs) == 1:
src_dict['name'] = name
else:
src_dict['name'] = name + '%02i' % i
if re.search(r'(\.txt$)', src_dict['file']):
src_type = 'FileFunction'
elif re.search(r'(\.fits$|\.fit$|\.fits.gz$|\.fit.gz$)',
src_dict['file']):
src_type = 'MapCubeFunction'
else:
raise Exception(
'Unrecognized file format for diffuse model: %s' % src_dict[
'file'])
# Extract here
if src_type == 'FileFunction':
src = IsoSource(src_dict['name'], {
'Spectrum_Filename': src_dict['file']})
altname = os.path.basename(src_dict['file'])
altname = re.sub(r'(\.txt$)', '', altname)
else:
# src = MapCubeSource(src_dict['name'], {
# 'Spatial_Filename': src_dict['file'],
sp_filename = src_dict.pop('file')
src_dict['Spatial_Filename'] = sp_filename
src = MapCubeSource(src_dict['name'], src_dict)
altname = os.path.basename(sp_filename)
altname = re.sub(r'(\.fits$|\.fit$|\.fits.gz$|\.fit.gz$)',
'', altname)
src.add_name(altname)
srcs_out += [src]
return srcs_out
def _create_diffuse_src_from_xml(self, config, src_type='FileFunction'):
"""Load sources from an XML file.
"""
diffuse_xmls = config.get('diffuse_xml')
srcs_out = []
for diffuse_xml in diffuse_xmls:
srcs_out += self.load_xml(diffuse_xml, coordsys=config.get('coordsys', 'CEL'))
return srcs_out
def create_source(self, name, src_dict, build_index=True,
merge_sources=True, rescale=True):
"""Add a new source to the ROI model from a dictionary or an
existing source object.
Parameters
----------
name : str
src_dict : dict or `~fermipy.roi_model.Source`
Returns
-------
src : `~fermipy.roi_model.Source`
"""
src_dict = copy.deepcopy(src_dict)
if isinstance(src_dict, dict):
src_dict['name'] = name
src = Model.create_from_dict(src_dict, self.skydir,
rescale=rescale)
else:
src = src_dict
src.set_name(name)
if isinstance(src, Source):
src.set_roi_direction(self.skydir)
src.set_roi_geom(self.geom)
self.load_source(src, build_index=build_index,
merge_sources=merge_sources)
return self.get_source_by_name(name)
def copy_source(self, name):
src = self.get_source_by_name(name)
return copy.deepcopy(src)
def load_sources(self, sources):
"""Delete all sources in the ROI and load the input source list."""
self.clear()
for s in sources:
if isinstance(s, dict):
s = Model.create_from_dict(s)
self.load_source(s, build_index=False)
self._build_src_index()
def _add_source_alias(self, name, src):
if src not in self._src_dict[name]:
self._src_dict[name] += [src]
def load_source(self, src, build_index=True, merge_sources=True,
**kwargs):
"""
Load a single source.
Parameters
----------
src : `~fermipy.roi_model.Source`
Source object that will be added to the ROI.
merge_sources : bool
When a source matches an existing source in the model
update that source with the properties of the new source.
build_index : bool
Re-make the source index after loading this source.
"""
src = copy.deepcopy(src)
name = src.name.replace(' ', '').lower()
min_sep = kwargs.get('min_separation', None)
if min_sep is not None:
sep = src.skydir.separation(self._src_skydir).deg
if len(sep) > 0 and np.min(sep) < min_sep:
return
match_srcs = self.match_source(src)
if len(match_srcs) == 1:
# self.logger.debug('Found matching source for %s : %s',
# src.name, match_srcs[0].name)
if merge_sources:
match_srcs[0].update_from_source(src)
else:
match_srcs[0].add_name(src.name)
self._add_source_alias(src.name.replace(' ', '').lower(),
match_srcs[0])
return
elif len(match_srcs) > 2:
raise Exception('Multiple sources with name %s' % name)
self._add_source_alias(src.name, src)
for name in src.names:
self._add_source_alias(name.replace(' ', '').lower(), src)
if isinstance(src, Source):
self._srcs.append(src)
else:
self._diffuse_srcs.append(src)
if build_index:
self._build_src_index()
def match_source(self, src):
"""Look for source or sources in the model that match the
given source. Sources are matched by name and any association
columns defined in the assoc_xmatch_columns parameter.
"""
srcs = []
names = [src.name]
for col in self.config['assoc_xmatch_columns']:
if col in src.assoc and src.assoc[col]:
names += [src.assoc[col]]
for name in names:
name = name.replace(' ', '').lower()
if name not in self._src_dict:
continue
srcs += [s for s in self._src_dict[name] if s not in srcs]
return srcs
def load(self, **kwargs):
"""Load both point source and diffuse components."""
coordsys = kwargs.get('coordsys', 'CEL')
extdir = kwargs.get('extdir', self.extdir)
srcname = kwargs.get('srcname', None)
self.clear()
self.load_diffuse_srcs()
for c in self.config['catalogs']:
if isinstance(c, catalog.Catalog):
self.load_existing_catalog(c)
continue
extname = os.path.splitext(c)[1]
if extname != '.xml':
self.load_fits_catalog(c, extdir=extdir, coordsys=coordsys,
srcname=srcname)
elif extname == '.xml':
self.load_xml(c, extdir=extdir, coordsys=coordsys)
else:
raise Exception('Unrecognized catalog file extension: %s' % c)
for c in self.config['sources']:
if 'name' not in c:
raise Exception(
'No name field in source dictionary:\n ' + str(c))
self.create_source(c['name'], c, build_index=False)
self._build_src_index()
def delete_sources(self, srcs):
to_del = []
for k, v in self._src_dict.items():
for s in srcs:
if s in v:
self._src_dict[k].remove(s)
if not v:
to_del.append(k)
while to_del:
ss = to_del.pop()
self._src_dict.pop(ss)
del ss
self._srcs = [s for s in self._srcs if s not in srcs]
self._diffuse_srcs = [s for s in self._diffuse_srcs if s not in srcs]
self._build_src_index()
@classmethod
def create_from_roi_data(cls, datafile):
"""Create an ROI model."""
data = np.load(datafile).flat[0]
roi = cls()
roi.load_sources(data['sources'].values())
return roi
@classmethod
def create(cls, selection, config, **kwargs):
"""Create an ROIModel instance."""
if selection['target'] is not None:
return cls.create_from_source(selection['target'],
config, **kwargs)
else:
target_skydir = wcs_utils.get_target_skydir(selection)
return cls.create_from_position(target_skydir, config, **kwargs)
@classmethod
def create_from_position(cls, skydir, config, **kwargs):
"""Create an ROIModel instance centered on a sky direction.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky direction on which the ROI will be centered.
config : dict
Model configuration dictionary.
"""
coordsys = kwargs.pop('coordsys', 'CEL')
roi = cls(config, skydir=skydir, coordsys=coordsys, **kwargs)
return roi
@classmethod
def create_from_source(cls, name, config, **kwargs):
"""Create an ROI centered on the given source."""
coordsys = kwargs.pop('coordsys', 'CEL')
roi = cls(config, src_radius=None, src_roiwidth=None,
srcname=name, **kwargs)
src = roi.get_source_by_name(name)
return cls.create_from_position(src.skydir, config,
coordsys=coordsys, **kwargs)
@classmethod
def create_roi_from_ft1(cls, ft1file, config):
"""Create an ROI model by extracting the sources coordinates
form an FT1 file."""
raise NotImplementedError
def has_source(self, name):
index_name = name.replace(' ', '').lower()
if index_name in self._src_dict:
return True
else:
return False
def get_source_by_name(self, name):
"""Return a single source in the ROI with the given name. The
input name string can match any of the strings in the names
property of the source object. Case and whitespace are
ignored when matching name strings. If no sources are found
or multiple sources then an exception is thrown.
Parameters
----------
name : str
Name string.
Returns
-------
srcs : `~fermipy.roi_model.Model`
A source object.
"""
srcs = self.get_sources_by_name(name)
if len(srcs) == 1:
return srcs[0]
elif len(srcs) == 0:
raise Exception('No source matching name: ' + name)
elif len(srcs) > 1:
raise Exception('Multiple sources matching name: ' + name)
def get_sources_by_name(self, name):
"""Return a list of sources in the ROI matching the given
name. The input name string can match any of the strings in
the names property of the source object. Case and whitespace
are ignored when matching name strings.
Parameters
----------
name : str
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
index_name = name.replace(' ', '').lower()
if index_name in self._src_dict:
return list(self._src_dict[index_name])
else:
raise Exception('No source matching name: ' + name)
def get_nearby_sources(self, name, distance, min_dist=None,
square=False):
src = self.get_source_by_name(name)
return self.get_sources_by_position(src.skydir,
distance, min_dist,
square)
def get_sources(self, skydir=None, distance=None, cuts=None,
minmax_ts=None, minmax_npred=None,
exclude=None, square=False, coordsys='CEL',
names=None):
"""Retrieve list of source objects satisfying the following
selections:
* Angular separation from ``skydir`` or ROI center (if
``skydir`` is None) less than ``distance``.
* Cuts on source properties defined in ``cuts`` list.
* TS and Npred in range specified by ``minmax_ts`` and ``minmax_npred``.
* Name matching a value in ``names``
Sources can be excluded from the selection by adding their
name to the ``exclude`` list. ``exclude`` can be a str or a list of str.
Returns
-------
srcs : list
List of source objects.
"""
if skydir is None:
skydir = self.skydir
if exclude is None:
exclude = []
exclude = utils.arg_to_list(exclude)
rsrc, srcs = self.get_sources_by_position(skydir,
distance,
square=square,
coordsys=coordsys)
o = []
for s in srcs + self.diffuse_sources:
if names and s.name not in names:
continue
if s.name in exclude:
continue
if not s.check_cuts(cuts):
continue
ts = s['ts']
npred = s['npred']
if not utils.apply_minmax_selection(ts, minmax_ts):
continue
if not utils.apply_minmax_selection(npred, minmax_npred):
continue
o.append(s)
return o
def get_sources_by_property(self, pname, pmin, pmax=None):
srcs = []
for i, s in enumerate(self._srcs):
if pname not in s:
continue
if pmin is not None and s[pname] < pmin:
continue
if pmax is not None and s[pname] > pmax:
continue
srcs.append(s)
return srcs
def get_sources_by_position(self, skydir, dist, min_dist=None,
square=False, coordsys='CEL'):
"""Retrieve sources within a certain angular distance of a sky
coordinate. This function supports two types of geometric
selections: circular (square=False) and square (square=True).
The circular selection finds all sources with a given angular
distance of the target position. The square selection finds
sources within an ROI-like region of size R x R where R = 2 x
dist.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky direction with respect to which the selection will be applied.
dist : float
Maximum distance in degrees from the sky coordinate.
square : bool
Choose whether to apply a circular or square selection.
coordsys : str
Coordinate system to use when applying a selection with square=True.
"""
msk = get_skydir_distance_mask(self._src_skydir, skydir, dist,
min_dist=min_dist, square=square,
coordsys=coordsys)
radius = self._src_skydir.separation(skydir).deg
radius = radius[msk]
srcs = [self._srcs[i] for i in np.nonzero(msk)[0]]
isort = np.argsort(radius)
radius = radius[isort]
srcs = [srcs[i] for i in isort]
return radius, srcs
def load_fits_catalog(self, name, **kwargs):
"""Load sources from a FITS catalog file.
Parameters
----------
name : str
Catalog name or path to a catalog FITS file.
"""
# EAC split this function to make it easier to load an existing catalog
cat = catalog.Catalog.create(name)
self.load_existing_catalog(cat, **kwargs)
def load_existing_catalog(self, cat, **kwargs):
"""Load sources from an existing catalog object.
Parameters
----------
cat : `~fermipy.catalog.Catalog`
Catalog object.
"""
coordsys = kwargs.get('coordsys', 'CEL')
extdir = kwargs.get('extdir', self.extdir)
srcname = kwargs.get('srcname', None)
m0 = get_skydir_distance_mask(cat.skydir, self.skydir,
self.config['src_radius'])
m1 = get_skydir_distance_mask(cat.skydir, self.skydir,
self.config['src_radius_roi'],
square=True, coordsys=coordsys)
m = (m0 & m1)
if srcname is not None:
m &= utils.find_rows_by_string(cat.table, [srcname],
self.src_name_cols)
offset = self.skydir.separation(cat.skydir).deg
offset_cel = wcs_utils.sky_to_offset(self.skydir,
cat.radec[:, 0], cat.radec[:, 1],
'CEL')
offset_gal = wcs_utils.sky_to_offset(self.skydir,
cat.glonlat[
:, 0], cat.glonlat[:, 1],
'GAL')
for i, (row, radec) in enumerate(zip(cat.table[m],
cat.radec[m])):
catalog_dict = catalog.row_to_dict(row)
src_dict = {'catalog': catalog_dict}
src_dict['Source_Name'] = row['Source_Name']
src_dict['SpectrumType'] = row['SpectrumType']
if row['extended']:
src_dict['SourceType'] = 'DiffuseSource'
src_dict['SpatialType'] = str(row['Spatial_Function'])
src_dict['SpatialModel'] = str(row['Spatial_Function'])
search_dirs = []
if extdir is not None:
search_dirs += [extdir, os.path.join(extdir, 'Templates')]
search_dirs += [row['extdir'],
os.path.join(row['extdir'], 'Templates')]
if src_dict['SpatialType'] == 'SpatialMap':
try:
src_dict['Spatial_Filename'] = utils.resolve_file_path(
row['Spatial_Filename'],
search_dirs=search_dirs)
except:
print("Failed to find %s" % row['Spatial_Filename'])
else:
src_dict['SourceType'] = 'PointSource'
src_dict['SpatialType'] = 'SkyDirFunction'
src_dict['SpatialModel'] = 'PointSource'
src_dict['spectral_pars'] = spectral_pars_from_catalog(
catalog_dict)
src_dict['spatial_pars'] = spatial_pars_from_catalog(catalog_dict)
src = Source(src_dict['Source_Name'], src_dict, radec=radec)
src.data['offset'] = offset[m][i]
src.data['offset_ra'] = offset_cel[:, 0][m][i]
src.data['offset_dec'] = offset_cel[:, 1][m][i]
src.data['offset_glon'] = offset_gal[:, 0][m][i]
src.data['offset_glat'] = offset_gal[:, 1][m][i]
self.load_source(src, False,
merge_sources=self.config['merge_sources'])
self._build_src_index()
def load_xml(self, xmlfile, **kwargs):
"""Load sources from an XML file."""
extdir = kwargs.get('extdir', self.extdir)
coordsys = kwargs.get('coordsys', 'CEL')
if not os.path.isfile(xmlfile):
xmlfile = os.path.join(fermipy.PACKAGE_DATA, 'catalogs', xmlfile)
root = ElementTree.ElementTree(file=xmlfile).getroot()
diffuse_srcs = []
srcs = []
ra, dec = [], []
for s in root.findall('source'):
src = Source.create_from_xml(s, extdir=extdir)
if src.diffuse:
diffuse_srcs += [src]
else:
srcs += [src]
ra += [src['RAJ2000']]
dec += [src['DEJ2000']]
src_skydir = SkyCoord(ra=np.array(ra) * u.deg,
dec=
|
np.array(dec)
|
numpy.array
|
"""
This is a sample code showing how tensorflow code can be instrumented in MCenter.
"""
import argparse
import numpy as np
import tensorflow as tf
from parallelm.mlops import mlops as mlops
from parallelm.mlops.predefined_stats import PredefinedStats
from parallelm.mlops.stats.bar_graph import BarGraph
"""
Function to add the arguments that are provided as arguments to the component.
"""
def add_parameters(parser):
parser.add_argument("--output_file", dest="output_file", type=str, required=False, default="tmp/image_predictions",
help='Prediction directory')
parser.add_argument("--model_dir", dest="model_dir", type=str, required=False, help='Model Directory',
default="/tmp/tf_log")
def main():
# Parse arguments
parser = argparse.ArgumentParser()
add_parameters(parser)
args = parser.parse_args()
# Initialize MLOps Library
mlops.init()
# Create synthetic data (Gaussian Distribution, Poisson Distribution and Beta Distribution)
num_samples = 50
num_features = 20
np.random.seed(0)
g =
|
np.random.normal(0, 1, (num_samples, num_features))
|
numpy.random.normal
|
from typing import Dict, List
import random
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from utils import keep_keys_with_greater_than_equal_k_items
class BaseDataLoader(object):
def __init__(
self,
database,
val_database,
test_database,
n,
k_ml,
k_val_ml,
k_val,
k_val_val,
k_test,
k_val_test,
meta_batch_size,
num_tasks_val,
val_seed
):
self.database = database
self.val_database = val_database
self.test_database = test_database
self.n = n
self.k_ml = k_ml
self.k_val_ml = k_val_ml
self.k_val = k_val
self.k_val_val = k_val_val
self.k_test = k_test
self.k_val_test = k_val_test
self.meta_batch_size = meta_batch_size
self.num_tasks_val = num_tasks_val
self.val_seed = val_seed
def get_train_dataset(self):
dataset = self.get_supervised_meta_learning_dataset(
self.database.train_folders,
n=self.n,
k=self.k_ml,
k_validation=self.k_val_ml,
meta_batch_size=self.meta_batch_size,
instance_parse_function=self.get_parse_function(),
)
return dataset
def get_val_dataset(self):
val_dataset = self.get_supervised_meta_learning_dataset(
self.val_database.val_folders,
n=self.n,
k=self.k_val,
k_validation=self.k_val_val,
meta_batch_size=1,
seed=self.val_seed,
instance_parse_function=self.get_val_parse_function(),
)
val_dataset = val_dataset.repeat(-1)
val_dataset = val_dataset.take(self.num_tasks_val)
return val_dataset
def get_test_dataset(self, num_tasks, seed=-1):
test_dataset = self.get_supervised_meta_learning_dataset(
self.test_database.test_folders,
n=self.n,
k=self.k_test,
k_validation=self.k_val_test,
meta_batch_size=1,
seed=seed,
instance_parse_function=self.get_test_parse_function(),
)
test_dataset = test_dataset.repeat(-1)
test_dataset = test_dataset.take(num_tasks)
return test_dataset
def make_labels_dataset(self, n: int, k: int, k_validation: int, one_hot_labels: bool) -> tf.data.Dataset:
"""
Creates a tf.data.Dataset which generates corresponding labels to meta-learning inputs.
This method just creates this dataset for one task and repeats it. You can use zip to combine this dataset
with your desired dataset.
Note that the repeat is set to -1 so that the dataset will repeat itself. This will allow us to
zip it with any other dataset and it will generate labels as much as needed.
Also notice that this dataset is not batched into meta batch size, so this will just generate labels for one
task.
"""
tr_labels_ds = tf.data.Dataset.from_tensor_slices(np.expand_dims(np.repeat(np.arange(n), k), 0))
val_labels_ds = tf.data.Dataset.from_tensor_slices(np.expand_dims(np.repeat(np.arange(n), k_validation), 0))
if one_hot_labels:
tr_labels_ds = tr_labels_ds.map(lambda example: tf.one_hot(example, depth=n))
val_labels_ds = val_labels_ds.map(lambda example: tf.one_hot(example, depth=n))
labels_dataset = tf.data.Dataset.zip((tr_labels_ds, val_labels_ds))
labels_dataset = labels_dataset.repeat(-1)
return labels_dataset
def get_unsupervised_dataset(
self,
folders: Dict[str, List[str]],
n: int,
meta_batch_size: int,
one_hot_labels: bool = True,
reshuffle_each_iteration: bool = True,
seed: int = -1,
instance_parse_function=None
):
k = 1
"""This function generates a dataset that uses the same image for both training and validation"""
if instance_parse_function is None:
instance_parse_function = self.get_parse_function()
# TODO handle seed
if seed != -1:
|
np.random.seed(seed)
|
numpy.random.seed
|
# -*- coding: utf-8 -*-
"""
This library contains all functions needed to produce the spatial files
of a LARSIM raster model (tgb.dat, utgb.dat, profile.dat).
It uses functions from the TATOO core library.
Author: <NAME>
Chair for Hydrology and River Basin Management
Technical University of Munich
Requires the following ArcGIS licenses:
- Conversion Toolbox
- Spatial Analyst
System requirements:
- Processor: no special requirements
tested with Intel(R) Xeon(R) CPU E5-1650 v4 @ 3.60 GHz
- Memory/RAM: depending on the size of the DEM to be processed
tested with 32,0 GB RAM
- Python IDE for Python 3
- ArcGIS Pro 2.5
Version: v1.0.0, 2021-05-02
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright 2021'
__credits__ = ''
__license__ = 'CC BY-NC-ND 3.0 DE'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Production'
# load modules
import os
import sys
import copy
import arcpy
import numpy.matlib
import numpy.lib.recfunctions
import numpy as np
import pandas as pd
import tatoo_common as tc
# check out ArcGIS spatial analyst license
class LicenseError(Exception):
pass
try:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
print ("Checked out \"Spatial\" Extension")
else:
raise LicenseError
except LicenseError:
print("Spatial Analyst license is unavailable")
except:
print(arcpy.GetMessages(2))
# allow overwriting the outputs
arcpy.env.overwriteOutput = True
# %% function to preprocess a high-resolution digital elevation model
def preprocess_dem(path_dem_hr, path_fnw, cellsz, h_burn,
path_gdb_out, name_dem_mr_f='dem_mr_f',
print_out=False):
"""
Aggregates a high-resolution digital elevation raster, covnert river network
to model resolution raster, burns flow network raster into digital elevation
raster and fills sinks of the resulting raster.
JM 2021
Arguments:
-----------
path_dem_hr: str
path of the high-resolution digital elevation raster
(e.g., 'c:\model_creation.gdb\dem_hr')
path_fnw: str
path of the flow network feature class or shape file (e.g., 'c:\fnw.shp')
cellsz: integer
edge length of the resulting model cells in [m] (e.g., 100)
h_burn: integer
depth of river network burning in digital elevation model
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_dem_mr_f: str
name of the filled model-resolution digital elevation raster(e.g., 'dem_mr_f')
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line (default: false)
Returns:
-----------
Saves the following files:
- filled model-resolution digital elevation raster
- model-resolution raster representation of the flow network
"""
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_oid = 'OBJECTID'
# paths for intermediates
path_dem_mr_sr = path_gdb_out + 'dem_mr_sr'
path_dem_mr = path_gdb_out + 'dem_mr'
path_fnw_mr = path_gdb_out + 'fnw_mr'
path_dem_mr_cfnw = path_gdb_out + 'dem_mr_cfnw'
# paths for outputs
path_dem_mr_f = path_gdb_out + name_dem_mr_f
# Aggregate high resolution digital elevation model to model resolution
if print_out: print('...aggregate high resolution digital elevation model...')
# create snap raster at origin of coordinate system
dem_mr_sr = arcpy.sa.CreateConstantRaster(
1, 'INTEGER', cellsz, arcpy.Extent(
0.5 * cellsz, 0.5 * cellsz,
cellsz + 0.5 * cellsz, cellsz + 0.5 * cellsz))
dem_mr_sr.save(path_dem_mr_sr)
# save default and set environments
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
arcpy.env.snapRaster = path_dem_mr_sr
arcpy.env.extent = path_dem_hr
# aggregate high resolution DEM to model resolution
if arcpy.Exists(path_dem_mr):
arcpy.management.Delete(path_dem_mr)
dem_mr = arcpy.sa.Aggregate(path_dem_hr, cellsz, 'MEAN', 'EXPAND', 'DATA')
dem_mr.save(path_dem_mr)
# cut rivers
if print_out: print('...cut rivers...')
# convert polylines to raster in model grid size
arcpy.conversion.PolylineToRaster(path_fnw, f_oid, path_fnw_mr,
'MAXIMUM_LENGTH', 'NONE', path_dem_mr)
# decrease model resolution elevation raster values at flow network raster cells
dem_mr_cfnw = arcpy.sa.Con(arcpy.sa.IsNull(path_fnw_mr), path_dem_mr,
dem_mr - h_burn)
dem_mr_cfnw.save(path_dem_mr_cfnw)
# reset environment parameters
arcpy.env.snapRaster = default_env_snr
arcpy.env.extent = default_env_ext
# fill cut model resolution digital elevation raster sinks
if print_out: print('...fill cut model resolution digital elevation raster sinks...')
# fill sinks
dem_mr_cfnw_f = arcpy.sa.Fill(path_dem_mr_cfnw, '')
dem_mr_cfnw_f.save(path_dem_mr_f)
# %% function to calculate the model watershed
def calc_watershed(path_dem_mr_cfnw_f, path_pp,
path_gdb_out, name_fd_mr='fd_mr', name_fd_mr_corr='fd_mr_corr',
name_fa_mr='fa_mr', name_ws_s='ws_s',
initial=True, name_fd_p_corr='fd_p_corr', path_fd_p_corr='',
print_out=False):
"""
Creates the model watershed from a filled digital elevation raster using
pour points. The correction point feature class is necessary for the initial
calculation of the model watershed calculation.
JM 2021
Arguments:
-----------
path_dem_mr_cfnw_f: str
path of the filled model-resolution digital elevation raster
(e.g., 'c:\model_creation.gdb\dem_mr_cfnw_f')
path_pp: str
path of the pour point feature class
(e.g., 'c:\model_creation.gdb\pp')
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_fd_mr: str
name of the output model resolution flow direction raster (e.g., 'fd_mr')
name_fd_mr: str
name of the corrected output model resolution flow direction raster
(e.g., 'fd_mr_corr')
name_fa_mr: str
name of the output model resolution flow accumulation raster (e.g., 'fa_mr')
name_ws_s: str
name of the output watershed polygon feature class (e.g., 'ws_s')
initial: boolean (optional)
true if it is the initial run to calculate the model watershed
name_fd_p_corr: str (optional)
name of the output flow direction correction point feature class
(e.g., 'fd_p_corr')
path_fd_p_corr: str (optional)
path of the output flow direction correction point feature class
needed for case initial=False (e.g., 'fd_p_corr')
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
Saves the following outputs:
- model resolution flow direction raster
- watershed polygon feature class
- flow direction correction point feature class (optional)
"""
# check inputs
if not initial and not path_fd_p_corr:
sys.exit('With initial=False path_fd_p_corr must not be an empty string!')
# define internal field names
f_pp = 'pp'
f_pp_ws = 'ModelWatershed'
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_oid = 'OBJECTID'
f_val = 'Value'
f_VAL = 'VALUE'
# feature class names from input
name_pp = os.path.split(path_pp)[1]
# define paths of intermediates in working geodatabase
path_ppc = path_gdb_out + 'ppc'
path_spp = path_gdb_out + 'spp'
path_ws_r = path_gdb_out + 'ws_r'
path_ws_sr = path_gdb_out + 'ws_sr'
path_ws_s_sj = path_gdb_out + 'ws_s_sj'
if not initial: path_fd_r_corr = path_gdb_out + 'fd_r_corr'
# paths for outputs
path_fd_mr = path_gdb_out + name_fd_mr
path_fd_mr_corr = path_gdb_out + name_fd_mr_corr
path_fa_mr = path_gdb_out + name_fa_mr
path_ws_s = path_gdb_out + name_ws_s
if initial: path_fd_p_corr = path_gdb_out + name_fd_p_corr
# calculate flow direction
if print_out: print('...calculate flow direction raster...')
if arcpy.Exists(path_fd_mr): arcpy.management.Delete(path_fd_mr)
fd_mr = arcpy.sa.FlowDirection(path_dem_mr_cfnw_f, 'NORMAL', '', 'D8')
fd_mr.save(path_fd_mr)
# if run is initial, create correction flow direction point feature class
# and copy flow direction raster.
field_fd_corr = 'D8'
if initial:
# create flow direction correction feature class and add flow direction
# binary field
sr = arcpy.Describe(path_pp).spatialReference
arcpy.CreateFeatureclass_management(path_gdb_out, name_fd_p_corr,
'POINT', '', 'DISABLED', 'DISABLED',
sr, '', '0', '0', '0', '')
arcpy.AddField_management(path_fd_p_corr, field_fd_corr, 'SHORT', '', '',
'', '', 'NULLABLE', 'NON_REQUIRED', '')
if arcpy.Exists(path_fd_mr_corr): arcpy.management.Delete(path_fd_mr_corr)
arcpy.CopyRaster_management(path_fd_mr, path_fd_mr_corr, '', '', '255',
'NONE', 'NONE', '8_BIT_UNSIGNED', 'NONE',
'NONE', 'GRID', 'NONE', 'CURRENT_SLICE',
'NO_TRANSPOSE')
# else, correct flow direction raster using correction point features
else:
# get number of existing flow direction correction point features
fd_p_corr_descr = arcpy.Describe(path_fd_p_corr)
fd_p_nb = fd_p_corr_descr.extent.XMin
# if there are existing point features (nb!=0), do correction
if not np.isnan(fd_p_nb):
# set environments
default_env_snr = arcpy.env.snapRaster
default_env_csz = arcpy.env.cellSize
arcpy.env.snapRaster = path_fd_mr
arcpy.env.cellSize = path_fd_mr
# convert flow direction correction points to raster
arcpy.PointToRaster_conversion(path_fd_p_corr, field_fd_corr,
path_fd_r_corr, 'MOST_FREQUENT',
'NONE', path_fd_mr)
# change environments
default_env_ext = arcpy.env.extent
default_env_mask = arcpy.env.mask
arcpy.env.extent = 'MAXOF'
arcpy.env.mask = path_fd_mr
# replace flow direction values, where correction points are defined
fd_mr_corr = arcpy.ia.Con(arcpy.ia.IsNull(path_fd_r_corr), path_fd_mr,
path_fd_r_corr)
fd_mr_corr.save(path_fd_mr_corr)
# reset environments
arcpy.env.snapRaster = default_env_snr
arcpy.env.cellSize = default_env_csz
arcpy.env.extent = default_env_ext
arcpy.env.mask = default_env_mask
# else, copy uncorrected flow direction raster
else:
print(('INFO: Flow direction correction point feature'
'class is empty. Original flow direction is used instead.'))
if arcpy.Exists(path_fd_mr_corr): arcpy.management.Delete(path_fd_mr_corr)
arcpy.CopyRaster_management(path_fd_mr, path_fd_mr_corr, '', '', '255',
'NONE', 'NONE', '8_BIT_UNSIGNED', 'NONE',
'NONE', 'GRID', 'NONE', 'CURRENT_SLICE',
'NO_TRANSPOSE')
if print_out: print('...calculate flow accumulation...')
# calculate flow accumulation raster
if arcpy.Exists(path_fa_mr): arcpy.management.Delete(path_fa_mr)
fa_mr = arcpy.sa.FlowAccumulation(path_fd_mr_corr, '', 'DOUBLE', 'D8')
fa_mr.save(path_fa_mr)
# copy pour point feature class
if arcpy.Exists(path_ppc): arcpy.management.Delete(path_ppc)
arcpy.management.CopyFeatures(path_pp, path_ppc, '', '', '', '')
# add adn calculate field using the object ID
arcpy.AddField_management(path_ppc, f_pp, 'LONG', '', '', '', '', 'NULLABLE',
'NON_REQUIRED', '')
arcpy.CalculateField_management(path_ppc, f_pp, '!{0}!'.format(f_oid), 'PYTHON3', '')
# snap pour points to flow accumulation raster
if arcpy.Exists(path_spp): arcpy.management.Delete(path_spp)
spp = arcpy.sa.SnapPourPoint(path_ppc, fa_mr, '40', f_pp)
spp.save(path_spp)
if print_out: print('...calculate watershed...')
# calculate watershed raster
if arcpy.Exists(path_ws_r): arcpy.management.Delete(path_ws_r)
ws_r = arcpy.sa.Watershed(path_fd_mr_corr, spp, f_val)
ws_r.save(path_ws_r)
# set environments
arcpy.env.outputZFlag = 'Same As Input'
arcpy.env.outputMFlag = 'Same As Input'
# convert watershed raster to polygon features
if arcpy.Exists(path_ws_sr): arcpy.management.Delete(path_ws_sr)
arcpy.RasterToPolygon_conversion(path_ws_r, path_ws_sr, 'NO_SIMPLIFY', f_VAL,
'SINGLE_OUTER_PART', '')
if print_out: print('...select model watersheds...')
pp_fieldnames = [field.name for field in arcpy.ListFields(path_pp)]
# if field exists, that identifies polygon as model watershed, delete watersheds
# with the fields' value >= 1
if f_pp_ws in pp_fieldnames:
# join created watershed polygons to pour points
arcpy.SpatialJoin_analysis(
path_ws_sr, path_pp, path_ws_s_sj, 'JOIN_ONE_TO_ONE', 'KEEP_ALL',
"{0} '{0}' true true false 2 Short 0 0,First,#,{1},{0},-1,-1".format(
f_pp_ws, name_pp), 'CONTAINS', '', '')
# select and copy model watersheds marked with a positive integer
sel_sql = f_pp_ws + ' >= 1'
path_ws_s_sj_sel = arcpy.management.SelectLayerByAttribute(
path_ws_s_sj, 'NEW_SELECTION', sel_sql)
if arcpy.Exists(path_ws_s): arcpy.management.Delete(path_ws_s)
arcpy.management.CopyFeatures(path_ws_s_sj_sel, path_ws_s, '', '', '', '')
else:
if arcpy.Exists(path_ws_s): arcpy.management.Delete(path_ws_s)
arcpy.management.CopyFeatures(path_ws_sr, path_ws_s, '', '', '', '')
# %% function to calculate the model cell network
def calc_model_network(path_ws_s, path_fd_mr, path_fa_mr,
path_gdb_out, path_files_out, name_fl_mr='fl_mr',
name_tgb_p='tgb_p', name_mnw='mwn',
print_out=False):
"""
Creates a point feature class representing the center of model cells
as well as a polyline feature class representing the model network between
the model cells (upstream-downstream-relation).
JM 2021
Arguments:
-----------
path_ws_s: str
path of the output watershed polygon feature class
(e.g., 'ws_s')
path_fd_mr: str
path of the output model resolution flow direction raster
(e.g., 'fd_mr')
path_fa_mr: str
path of the output model resolution flow accumulation raster
(e.g., 'fa_mr')
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
path_files_out: str
storage path for intermediate data (e.g., 'c:\tmp_model_data\')
name_fl_mr: str
name of the extracted output model resolution flow length raster
(e.g., 'fl_mr_e')
name_tgb_p: str = (optional)
name of the output model cell point feature class (e.g., 'tgb_p')
name_tgbd_p: str = (optional)
name of the output downstream model cell point feature class
(e.g., 'tgbd_p')
name_mnw: str = (optional)
name of the output model network polyline feature class (e.g., 'mwn')
print_out: boolean (optional)
true if workprogress shall be print to command line
Returns:
-----------
df_data_tgb_p: pd.DataFrame
- tgb: model element ID number (int)
- tgb_down: downstream model element ID number (int)
- tgb_type: model element type (str)
- tgb_dtgb: real representative model element ID for dummy elements (int)
- tgb_a: inflowing catchment area of each model element [km²]
- x, y: x- and y-coordinates of element center [m]
df_tgb_up: pd.DataFrame
tgb_up1, tgb_up2: upstream model element ID numbers (int)
Saves the following outputs:
- extracted model resolution flow length raster
- model cell point feature class
- downstream model cell point feature class
- model network polyline feature class
"""
# define internal variables
def_val_dtgb = -1
# define internal field names
f_tgb = 'tgb'
f_tgb_down = 'tgb_down'
f_tgb_type = 'tgb_type'
f_tgb_dtgb = 'tgb_dtgb'
f_tgb_a = 'tgb_a'
f_x = 'x'
f_y = 'y'
f_nrflv = 'nrflv'
f_tgb_up1 = 'up1'
f_tgb_up2 = 'up2'
# define key-words to identify element types
str_headw = 'headwater'
str_routing = 'routing'
str_dummy = 'dummy'
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_p_x = 'POINT_X'
f_p_y = 'POINT_Y'
# define paths of intermediates in working geodatabase
path_fd_mr_e = path_gdb_out + 'fd_mr_e'
path_fa_mr_e = path_gdb_out + 'fa_mr_e'
name_tgb_down_p = 'tgb_down_p'
# paths for outputs
path_fl_mr_e = path_gdb_out + name_fl_mr
path_mnw = path_gdb_out + name_mnw
# create real representative index list for dummy subcatchments
def real_repr_idx(df_tgb, str_dummy, print_out=False):
if print_out: print(('...create representative index list for '
'dummy subcatchments tgb_dtgb...'))
# Preallocate arrays
ser_tgb_dtgb = pd.Series(np.ones(df_tgb.shape[0]) * def_val_dtgb,
index=df_tgb.index, name=f_tgb_dtgb).astype(np.int)
# Iterate over all final index values
for tgb in df_tgb.index:
# if cell is a dummy, find the connected real cell
if df_tgb.at[tgb, f_tgb_type] == str_dummy:
# follow dummy cascade downwards until real cell and set index
mm = copy.deepcopy(tgb)
while df_tgb.at[mm, f_tgb_type] == str_dummy:
mm = df_tgb.at[mm, f_tgb_down]
ser_tgb_dtgb.at[tgb] = mm
return ser_tgb_dtgb
# calculations
# (de-)activate additional debugging command line output
debug = False # (False/True)
# set workspace
arcpy.env.workspace = path_gdb_out
# clip flow direction raster to watershed polygon
if print_out: print('...clip flow direction raster...')
if arcpy.Exists(path_fd_mr_e): arcpy.management.Delete(path_fd_mr_e)
fd_mr_e = arcpy.sa.ExtractByMask(path_fd_mr, path_ws_s)
fd_mr_e.save(path_fd_mr_e)
# clip flow accumulation raster to watershed polygon
if print_out: print('...clip flow accumulation raster...')
if arcpy.Exists(path_fa_mr_e): arcpy.management.Delete(path_fa_mr_e)
fa_mr_e = arcpy.sa.ExtractByMask(path_fa_mr, path_ws_s)
fa_mr_e.save(path_fa_mr_e)
# calculate downstream flow length
if print_out: print('...calculate flow length...')
if arcpy.Exists(path_fl_mr_e): arcpy.management.Delete(path_fl_mr_e)
fl_mr_e = arcpy.sa.FlowLength(fd_mr_e, 'DOWNSTREAM', '')
fl_mr_e.save(path_fl_mr_e)
if print_out: print('...import flow rasters...')
# define paths of intermediates in working folder
path_fd_c_tif = path_files_out + 'fd_c.tif'
path_fa_c_tif = path_files_out + 'fa_c.tif'
path_fl_c_tif = path_files_out + 'fl_c.tif'
# import flow direction, accumulation and length as numpy rasters
fd, ncols, nrows, cellsz, xll, yll, ctrl_tif_export = tc.fdal_raster_to_numpy(
path_fd_mr_e, 'fd', path_fd_c_tif, True)
fa, _, _, _, _, _, _ = tc.fdal_raster_to_numpy(
path_fa_mr_e, 'fa', path_fa_c_tif, False)
fl, _, _, _, _, _, _ = tc.fdal_raster_to_numpy(
path_fl_mr_e, 'fl', path_fl_c_tif, True)
# add a NaN boundary to all gis input data sets
empty_row = np.zeros((1, ncols)) * np.nan
empty_col = np.zeros((nrows + 2, 1)) * np.nan
fa = np.concatenate((empty_row, fa, empty_row), axis=0)
fa = np.concatenate((empty_col, fa, empty_col), axis=1)
fd = np.concatenate((empty_row, fd, empty_row), axis=0)
fd = np.concatenate((empty_col, fd, empty_col), axis=1)
fl = np.concatenate((empty_row, fl, empty_row), axis=0)
fl = np.concatenate((empty_col, fl, empty_col), axis=1)
# adjust gis parameters for new sizes
ncols = ncols + 2
nrows = nrows + 2
xll = xll - cellsz
yll = yll - cellsz
# set default data type for calculations for efficient RAM usage
if ncols * nrows <= 32767: np_type = np.int32
else: np_type = np.int64
# get indices and number of not-nan-data
gis_notnans = np.nonzero(~np.isnan(fd))
gis_notnans_x = gis_notnans[0]
gis_notnans_y = gis_notnans[1]
gis_notnans_count = gis_notnans_x.shape[0]
# create lookup table connecting flow direction int-values to array indices
fd_lu = np.array([[ 1, 0, 1], [ 2, 1, 1], [ 4, 1, 0],
[ 8, 1,-1], [ 16, 0,-1], [ 32,-1,-1],
[ 64,-1, 0], [128,-1, 1]])
# pre-allocate flow direction arrays
fd_xd = np.empty((gis_notnans_count, 1), dtype=np_type)
fd_yd = np.empty((gis_notnans_count, 1), dtype=np_type)
# iterate flow direction int-values
for ii in range(fd_lu.shape[0]):
# get indices of not-nan flow direction values with fitting int-value
fd_notnans_ii = fd[~np.isnan(fd)] == fd_lu[ii, 0]
# set array x and y index at found indices
fd_xd[fd_notnans_ii] = fd_lu[ii, 1]
fd_yd[fd_notnans_ii] = fd_lu[ii, 2]
# create vector of combined not-nan array and converted flow direction indices
Jtm_down_xd = gis_notnans_x + np.int64(fd_xd[:, 0])
Jtm_down_yd = gis_notnans_y + np.int64(fd_yd[:, 0])
if print_out: print('...initialize arrays for iteration...')
# create temporal index array with continuous number Jtm
Jtm = np.ones((nrows, ncols), dtype=np_type) * -1
Jtm[gis_notnans_x, gis_notnans_y] = range(1, gis_notnans_count+1)
# calculate temporal downstream cell array Jtm_down using flow direction indices.
Jtm_down = np.ones((nrows, ncols),dtype=np_type) * -1
Jtm_down[gis_notnans] = Jtm[Jtm_down_xd, Jtm_down_yd]
# find the catchment outlet where no downstream index is set
OFr = np.nonzero(np.logical_and(Jtm != -1, Jtm_down == -1))
# mark the outlet cell in Jtm_down with a zero
Jtm_down[OFr] = 0
# preallocate list for temporal upstream index calculation Jt_up
Jt_up = np.ones((gis_notnans_count, 7), dtype=np_type) * -1
# iterate temporal upstream list
for jt_ii, jt in enumerate(range(1, Jt_up.shape[0] + 1)):
# find all rows in Jtm_down which do have jt as downstream cell
verw = np.nonzero(Jtm_down == jt)
# print subset in temporal upstream list Jt_up
Jt_up[jt_ii, 0:verw[0].shape[0]] = Jtm[verw]
# convert list to int
Jt_up = np.int32(Jt_up)
# calculate sum of necessary dummy cells (which have >2 upstream cells)
D_count = np.nansum(Jt_up[:, 2:7] != -1)
# calculate number of temporal index numbers jt
Jt_count = Jt_up.shape[0]
# calculate number of final indices j as sum of dummy and real cells
J_count = Jt_count + D_count
# preallocate temporal downstream list Jt_down
Jt_down = np.ones((Jt_count, 1), dtype=np_type) * -1
# iterate over temporal index jt and fill list
for jt_ii, jt in enumerate(range(1, Jt_count+1)):
# look for downstream cell from matrix
Jt_down[jt_ii] = Jtm_down[Jtm == jt]
# preallocate lists for final indices J, J_type and J_jt, final upstream
# and downstream lists J_up and J_down, and protocol list Done
J_type = J_count * [None]
J = np.array(range(1, J_count+1))
J_up = np.ones((J_count, 2), dtype=np_type) * -1
J_down = np.ones((J_count, ), dtype=np_type) * -1
J_jt = np.ones((J_count, 1), dtype=np_type) * -1
Done = np.ones((np.nanmax(Jtm)), dtype=np_type) * -1
# calculate protocol list D_contr
D_contr = np.nansum(Jt_up[:, 2:] != -1, 1)
# calculate final flow network index lists J, J_down, J_up, J_type, X and Y
# iterating from largest flow length downstream to outlet (tree-climbing algorithm)
if print_out: print('''...calculate final flow network index lists...''')
# find cell with largest flow length and its temporal index
jt = Jtm[fl == np.nanmax(fl)][0]
jti = jt - 1
# preset upstream subset (ss)
ss = Jt_up[jti, :]
ss = ss[ss != -1]
ssi = ss - 1
# calculate not done subset of upstream cell subset
ssnotdone = ss[Done[ssi] == -1]
# pre-set final index variable (0)
jj = 0
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' Initial cell at pos: {0:d}/{1:d} ({2:d})'.format(x, y, jt))
# while either outlet is not reached or not all upstream members are processed
while jt != Jtm[OFr] or ssnotdone.shape[0] != 0:
# case 1: HEADWATER CELL as ssnotnan is empty
# -> create new index for headwater and move downwards
if ss.shape[0] == 0:
# increment final index, fill type and link lists
jj += 1
jji = jj - 1
J_type[jji] = str_headw
J_jt[jji, 0] = jt
# debug protocol
if debug and print_out:
print('j: {0:d}, pos: {1:d}/{2:d} = {3:d} -> {4:d}, {5:s} cell'.format(
jj, x, y, jt, Jt_down[jti, 0], str_headw))
# set upstream cell to 0, mark cell as done and go downwards
J_up[jji, 0] = 0
Done[jti] = 1
jt = Jt_down[jti, 0]
jti = jt - 1
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' -> down to {0:d}/{1:d} = {2:d}'.format(x, y, jt))
else:
# case 2: ROUTING CELL as all upstream cells are done
# -> create new index for routing cell and move downwards
if all(Done[ssi] == 1):
# increment final index, fill type and link lists
jj += 1
jji = jj - 1
J_type[jji] = str_routing
J_jt[jji, 0] = jt
# define upstream cell subset and give position indices
ssj = np.flatnonzero(np.any(J_jt == ss, 1))
# if one or two upstream cells exist:
# connect two real cells in Jt_up and Jt_down
if ssj.shape[0] <= 2:
ssjl = ssj.shape[0]
ssjtu = Jt_up[jti, :ssjl]
ssjtu = ssjtu[ssjtu != -1]
J_up[jji, :ssjl] = J[np.flatnonzero(np.any(J_jt == ssjtu, 1))]
J_down[ssj] = jj
# else if > 2 upstream cells exist:
# connect 1 real and 1 dammy cell in Jt_up and Jt_down
else:
real = J[np.amax(ssj)]
dummy = np.amax(J_down[ssj])
J_up[jji, :] = [dummy, real]
J_down[[dummy-1, real-1]] = jj
# debug protocol
if debug and print_out:
pr_up = Jt_up[jti, :]
pr_up = pr_up[pr_up != -1]
print('''j: {0:d}, Pos: {1:d}/{2:d} = {3:d} -> {4:d},
Jt_up: {5:s}, {6:s} cell'''.format(
jj, x, y, jt, Jt_down[jt-1],
str(pr_up[~np.isnan(pr_up)])[1:-1], str_routing))
# mark cell as done and go downwards
Done[jti] = 1
jt = Jt_down[jti, 0]
jti = jt - 1
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' -> down to {0:d}/{1:d} = {2:d}'.format(x, y, jt))
else:
# case 3: DUMMY CELL as not all required dummy cells are
# done but >= 2 upstream cells are done
# -> create new index for dummy cell and move upwards to
# the cell with the largest flow accumulation
if np.sum(Done[ssi] != -1) >= 2:
# increment final index, fill type and link lists
jj += 1
jji = jj - 1
J_type[jji] = str_dummy
J_jt[jji,0] = 0
# define upstream cell subset and give position indices
ssj = np.flatnonzero(J_down[0:jji] == -1)
# preallocate testing matrix (all are false)
ssjt = np.zeros((ssj.shape[0], ), dtype=bool)
# iterate upstream cell subset
for ii, ssji in enumerate(ssj):
jtupi = Jt_up[jti, :]
jtupi = jtupi[jtupi != -1]
# ssj exists in Jt_up -> test is TRUE
if np.any(np.isin(jtupi, J_jt[ssji, 0])):
ssjt[ii] = True
# ssj does not exist in Jt_up but is dummy
# -> test is TRUE
elif J_type[ssji] == str_dummy:
ssjt[ii] = True
# reduce subset with testing matrix
ssj = ssj[ssjt]
# 'wrong neighbours'
# (loose, not finished dummy strings) are removed
if ssj.shape[0] > 2:
ssj = ssj[-2:]
# connect upstream cells in Jt_up and Jt_down
J_up[jji, :] = J[ssj]
J_down[ssj] = jj
# debug protocol
if debug and print_out:
pr_up = Jt_up[jti, :]
pr_up = pr_up[pr_up != -1]
print('''j: {0:d}, Pos: {1:d}/{2:d} = {3:d} -> {4:d},
Jt_up: {5:s}, {6:s} cell'''.format(
jj, x, y, jt, Jt_down[jti,0],
str(pr_up[~np.isnan(pr_up)])[1:-1], str_dummy))
# decrement dummy protocol variable
D_contr[jti] = D_contr[jti] - 1
# case 4 (else): UPWARDS MOVEMENT as not all required dummy
# cells are done and < 2 upstream cells are done
# -> do not create new index
# calculate not done subset of upstream cells and its largest
# flow accumulation cell preallocate subset for flow
# accumulation calculation
ssflowacc = np.zeros((ssnotdone.shape[0]), dtype=np_type)
# iterate not done subset of upstream cells and find flow
# accumulation
for ii, iiv in enumerate(ssflowacc):
ssflowacc[ii] = fa[Jtm == ssnotdone[ii]]
# calculate temporal index of max. flow accumulation
ssmaxind = ssnotdone[ssflowacc == np.amax(ssflowacc)]
# go upstream to max flow acc or first cell if more than one
# solutions exist
jt = ssmaxind[0]
jti = jt - 1
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' -> up to {0:d}/{1:d} = {2:d}'.format(x, y, jt))
# find upstream cells and create subset (ss)
ss = Jt_up[jti, :]
ss = ss[ss != -1]
ssi = ss - 1
# calculate not done subset of upstream cell subset
ssnotdone = ss[Done[ssi] == -1]
# Calculate values for catchment outlet
if print_out: print('...calculate outlet...')
# fill lists
jj += 1
jji = jj - 1
J_jt[jji, 0] = jt
J_type[jji] = str_routing
# debug protocol
if debug and print_out:
pr_up = Jt_up[jti, :]
pr_up = pr_up[pr_up != -1]
print('''j: {0:d}, Pos: {1:d}/{2:d} = {3:d} -> {4:d},
Jt_up: {5:s}, {6:s} cell'''.format(
jj, x, y, jt, Jt_down[jt-1],
str(pr_up[~np.isnan(pr_up)])[1:-1], str_routing))
# define upstream cell subset and give position indices
ssj = np.flatnonzero(np.any(J_jt == ss, 1))
# one or two upstream cells: connect two real cells in Jt_up and Jt_down
if ssj.shape[0] <= 2:
ssjl = ssj.shape[0]
ssjtu = Jt_up[jti, :ssjl]
ssjtu = ssjtu[ssjtu != -1]
J_up[jji, :ssjl] = J[np.flatnonzero(np.any(J_jt == ssjtu, 1))]
J_down[ssj] = jj
# > 2 upstream cells: connect 1 real and 1 dammy cell in Jt_up and Jt_down
else:
real = J[np.amax(ssj)]
dummy = np.amax(J_down[ssj])
J_up[jji, :] = [dummy, real]
J_down[[dummy-1, real-1]] = jj
# Define downstream cell as 0
J_down[jji] = Jt_down[jti]
# create final index array Jm and final dummy index list J_dj
if print_out: print('...create final index array and final dummy index list...')
# preallocate arrays
Jm = np.ones(Jtm.shape, dtype=np_type) * -1
J_dj = np.ones(J_up.shape[0], dtype=np_type) * def_val_dtgb
# iterate all cells
Jtm_it = np.nditer(Jtm, flags=['multi_index'])
while not Jtm_it.finished:
# if cell is a valid ID, find cell in list
if Jtm_it[0] != -1:
Jm[Jtm_it.multi_index] = J[np.flatnonzero(J_jt == Jtm_it[0])]
Jtm_it.iternext()
# create real representative index list for dummy cells iterating all
# final indices
for jj in range(1, J_up.shape[0]+1):
jji = jj - 1
# if cell is a dummy, find the connected real cell
if J_type[jji] == str_dummy:
# follow dummy cascade downwards until real cell and set index
mmi = jji
while J_type[mmi] == str_dummy:
mm = J_down[mmi]
mmi = mm - 1
J_dj[jji] = mm
# calculate cell name and coordinates
if print_out: print('...calculate coordinates...')
# preallocate variable
X = []
Y = []
# iterate final index
for jj in range(1, J_down.shape[0]+1):
jji = jj - 1
# if jj is a dummy, insert X and Y coordinates using dummy list
if J_type[jji] == str_dummy:
# calculate coordinate indices
xy = np.nonzero(Jm == J_dj[jji])
# if it is a head water or routing cell, insert X and Y coordinates
# using index array
else:
# calculate coordinate indices
xy = np.nonzero(Jm == jj)
# if jj is no dummy, insert X and Y coordinates
X.append(xll + (xy[1][0] + 1 - 0.5) * cellsz)
Y.append(yll + (nrows - xy[0][0] - 1 + 0.5) * cellsz)
# calculate upstream inflow catchment area of each routing cell
# pre-allocate variable
J_A = np.zeros(J.shape)
# iterate all cells
for jj_ii, jj in enumerate(J):
# if it is a routing or the outflow cell, calculate area
if J_type[jj_ii] == str_routing:
J_A[jj_ii] = fa[Jm == jj] * ((cellsz / 1000)**2)
# export model cell to point feature classes
if print_out: print('...create model cell point feature classes...')
# create pandas data frames
structarr_tgb_in = list(zip(J_down, J_type, J_A, X, Y))
df_mn = pd.DataFrame(structarr_tgb_in, index=J,
columns=[f_tgb_down, f_tgb_type, f_tgb_a, f_x, f_y])
df_tgb_up = pd.DataFrame(J_up, index=J, columns=[f_tgb_up1, f_tgb_up2])
# create real representative index list for dummy subcatchments
ser_tgb_dtgb = real_repr_idx(df_mn, str_dummy, print_out=print_out)
# create names of model subcatchments
ser_nrflv = pd.Series(df_mn.shape[0] * '', index=df_mn.index, name=f_nrflv)
for tgb, el_type in df_mn.loc[:, f_tgb_type].iteritems():
ser_nrflv.at[jj] = '{0:s}{1:05d}'.format(el_type[0].upper(), tgb)
# summarize DataFrames
df_tgb = pd.concat([df_mn, ser_tgb_dtgb, ser_nrflv], axis=1)
# summarize information for export
ser_tgb = df_tgb.index.to_series(name=f_tgb)
df_data_tgb_p = pd.concat(
[ser_tgb, df_tgb.loc[:, [f_tgb_down, f_tgb_type,
f_tgb_dtgb, f_tgb_a, f_x, f_y]]], axis=1)
# create spatial reference object
sr_obj = arcpy.Describe(path_fd_mr_e).spatialReference
# export to point feature classes
tc.tgb_to_points(df_data_tgb_p, sr_obj, path_gdb_out, name_tgb_p,
geometry_fields=(f_x, f_y))
tc.tgb_to_points(df_data_tgb_p, sr_obj, path_gdb_out, name_tgb_down_p,
geometry_fields=(f_x, f_y))
# create model network polyline feature class
if print_out: print('...create model network polyline feature class...')
# import cell information
arcpy.AddIndex_management(name_tgb_p, f_tgb_down, f_tgb_down,
'NON_UNIQUE', 'NON_ASCENDING')
# delete non-relevant fields of downstream feature class
arcpy.DeleteField_management(name_tgb_down_p, '{0}; {1}; {2}; {3}'.format(
f_tgb_dtgb, f_tgb_down, f_tgb_type, f_tgb_a))
# add coordinates to both feature classes
arcpy.AddXY_management(name_tgb_p)
arcpy.AddXY_management(name_tgb_down_p)
# alter coordinate fields of downstream feature class
f_p_xd = 'POINT_Xd'
f_p_yd = 'POINT_Yd'
arcpy.AlterField_management(name_tgb_down_p, f_p_x, f_p_xd, f_p_xd,
'', '4', 'NULLABLE', 'DO_NOT_CLEAR')
arcpy.AlterField_management(name_tgb_down_p, f_p_y, f_p_yd, f_p_yd,
'', '4', 'NULLABLE', 'DO_NOT_CLEAR')
# join information from downstream cells
tgb_l_join = arcpy.management.AddJoin(name_tgb_p, f_tgb_down, name_tgb_down_p,
f_tgb, 'KEEP_COMMON')
# calculate line features
if arcpy.Exists(path_mnw): arcpy.management.Delete(path_mnw)
arcpy.XYToLine_management(tgb_l_join, path_mnw, f_p_x, f_p_y,
f_p_xd, f_p_yd, 'GEODESIC', f_tgb, sr_obj)
# delete downstream neighbour model cell point feature class
arcpy.Delete_management(name_tgb_down_p)
return df_data_tgb_p, df_tgb_up
# %% function to preprocesses raster files, which are used for routing parameters.
def prepr_routing_rasters(path_dem_hr, path_fnw, path_ws_s, path_fa_mr,
path_gdb_out, name_fa_hr='fa_hr',
name_dem_hr_ws='dem_hr_ws', name_fl_fnw_mr='fl_fnw_mr',
name_dem_max_mr='dem_max_mr', name_dem_min_mr='dem_min_mr',
initial=True, print_out=False):
"""
Preprocesses raster files, which are used to calculate routing parameters.
JM 2021
Arguments:
-----------
path_dem_hr: str (e.g., 'c:\model_creation\dem_hr')
path of the output watershed polygon feature class
path_fnw: str (e.g., 'c:\model_creation\fnw')
path of the flow network polyline feature class or shape file
path_ws_s: str (e.g., 'c:\model_creation\ws_s')
path of the model watershed polygon feature class
path_fa_mr: str (e.g., 'c:\model_creation\fa_mr')
path of the model resolution flow accumulation raster
path_gdb_out: str (e.g., 'c:\model_creation.gdb')
path of the output file geodatabase
name_fa_hr: str (optional, default: 'fa_hr')
name of the output extracted high resolution flow accumulation raster
name_dem_hr_ws: str (optional, default: 'dem_hr_ws')
name of the output extracted high resolution digital elevation raster
name_fl_fnw_mr: str (optional, default: 'fl_fnw_mr')
name of the output model resolution flow length at flow network location
name_dem_max_mr: str (optional, default: 'dem_max_mr')
name of the output model resolution maximum value of the high resolution DEM
name_dem_min_mr: str (optional, default: 'dem_min_mr')
name of the output model resolution minimum value of the high resolution DEM
initial: boolean (optional, default: True)
true if it is the first run and all steps have to be calculated from the scratch
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
Saves the following outputs:
- extracted high resolution digital elevation raster (model domain)
- model resolution flow length at flow network location (else: NaN)
- model resolution maximum value of the high resolution elevation raster
- model resolution minimum value of the high resolution elevation raster
"""
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_oid = 'OBJECTID'
f_val = 'Value'
f_cellsz_x = 'CELLSIZEX'
method_mean = 'MEAN'
method_max = 'MAXIMUM'
method_min = 'MINIMUM'
# paths for intermediates
path_fnw_r = path_gdb_out + 'fnw_r'
path_fnw_mr = path_gdb_out + 'fnw_mr'
path_dem_hr_f = path_gdb_out + 'dem_hr_f'
path_fd_hr = path_gdb_out + 'fd_hr'
path_fl_hr = path_gdb_out + 'fl_hr'
path_fl_snfnw = path_gdb_out + 'fl_snfnw'
path_fl_snfa_mr = path_gdb_out + 'fl_snfa_mr'
if initial: path_fl_aggr_mr = path_gdb_out + 'fl_aggr_mr'
# paths for outputs
path_dem_hr_ws = path_gdb_out + name_dem_hr_ws
path_fa_hr = path_gdb_out + name_fa_hr
path_fl_fnw_mr = path_gdb_out + name_fl_fnw_mr
if initial:
path_dem_max_mr = path_gdb_out + name_dem_max_mr
path_dem_min_mr = path_gdb_out + name_dem_min_mr
# set workspace
arcpy.env.workspace = path_gdb_out
# if it is the first calculation run, calculate high-resolution flow length
if initial:
if print_out: print('...calculate high-resolution flow length...')
# save default environments
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
# set environments
arcpy.env.extent = 'MAXOF'
arcpy.env.snapRaster = path_dem_hr
# clip high resolution digital elevation raster to model domain
if print_out: print(' step 1/7: clip high resolution DEM to model domain...')
if arcpy.Exists(path_dem_hr_ws): arcpy.management.Delete(path_dem_hr_ws)
dem_hr_ws = arcpy.sa.ExtractByMask(path_dem_hr, path_ws_s)
dem_hr_ws.save(path_dem_hr_ws)
# fill corrected high resolution digital elevation raster
if print_out: print(' step 2/7: fill clipped high resolution DEM...')
if arcpy.Exists(path_dem_hr_f): arcpy.management.Delete(path_dem_hr_f)
dem_hr_f = arcpy.sa.Fill(path_dem_hr_ws, None)
dem_hr_f.save(path_dem_hr_f)
# calculate flow direction for filled digital elevation raster
if print_out: print(' step 3/7: calculate high resolution flow direction...')
if arcpy.Exists(path_fd_hr): arcpy.management.Delete(path_fd_hr)
fd_hr = arcpy.sa.FlowDirection(path_dem_hr_f, 'NORMAL', None, 'D8')
fd_hr.save(path_fd_hr)
# calculate flow accumulation
if print_out: print(' step 4/7: calculate high resolution flow accumulation...')
if arcpy.Exists(path_fa_hr): arcpy.management.Delete(path_fa_hr)
fa_hr = arcpy.sa.FlowAccumulation(path_fd_hr)
fa_hr.save(path_fa_hr)
# calculate flow length for flow direction
if print_out: print(' step 5/7: calculate high resolution flow length...')
if arcpy.Exists(path_fl_hr): arcpy.management.Delete(path_fl_hr)
fl_hr = arcpy.sa.FlowLength(path_fd_hr, 'DOWNSTREAM', None)
fl_hr.save(path_fl_hr)
# convert flow network polyline feature class to high resolution raster
if print_out: print((' step 6/7: convert flow network polyline feature '
'class to high resolution raster...'))
if arcpy.Exists(path_fnw_r): arcpy.management.Delete(path_fnw_r)
arcpy.conversion.PolylineToRaster(path_fnw, 'OBJECTID', path_fnw_r,
'MAXIMUM_LENGTH', 'NONE', path_dem_hr_ws)
# set flow length to nan if flow network raster is nan
if print_out: print((' step 7/7: set flow length to nan if flow network '
'raster is nan...'))
if arcpy.Exists(path_fl_snfnw): arcpy.management.Delete(path_fl_snfnw)
setn_expr = '{0} IS NULL'.format(f_val)
fl_snfnw = arcpy.ia.SetNull(path_fnw_r, path_fl_hr, setn_expr)
fl_snfnw.save(path_fl_snfnw)
# reset environments
arcpy.env.snapRaster = default_env_snr
arcpy.env.extent = default_env_ext
# Aggregate flow length to model resolution
if print_out: print('...aggregate flow length to model resolution...')
# save default environments
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
default_env_mask = arcpy.env.mask
# set environments
arcpy.env.snapRaster = path_fa_mr
arcpy.env.extent = path_fa_mr
arcpy.env.mask = path_fa_mr
# get high resolution and model resolution cell size
cell_sz_x_obj = arcpy.GetRasterProperties_management(path_dem_hr_ws, f_cellsz_x)
cell_sz_x = np.int32(cell_sz_x_obj.getOutput(0))
cellsz_obj = arcpy.GetRasterProperties_management(path_fa_mr, f_cellsz_x)
cellsz = np.int32(cellsz_obj.getOutput(0))
# aggregate flow length to final cell size
if initial:
fl_aggr_mr = arcpy.sa.Aggregate(
path_fl_snfnw, str(np.int32(cellsz/cell_sz_x)),
method_mean, 'EXPAND', 'DATA')
fl_aggr_mr.save(path_fl_aggr_mr)
# set aggregated flow length at flow accumulation areas < 0.1 km² to nan
expr_sql = '{0:s} < {1:.0f}'.format(f_val, 1000/cellsz)
fl_snfa_mr = arcpy.ia.SetNull(path_fa_mr, path_fl_aggr_mr, expr_sql)
fl_snfa_mr.save(path_fl_snfa_mr)
# convert polylines to raster in model grid size
arcpy.conversion.PolylineToRaster(path_fnw, f_oid, path_fnw_mr,
'MAXIMUM_LENGTH', 'NONE', path_fa_mr)
# set aggregated flow length to nan if aggregated flow network is nan as well
if arcpy.Exists(path_fl_fnw_mr): arcpy.management.Delete(path_fl_fnw_mr)
fl_fnw_mr = arcpy.ia.SetNull(arcpy.ia.IsNull(path_fnw_mr), path_fl_snfa_mr)
fl_fnw_mr.save(path_fl_fnw_mr)
# Aggregate high-resolution DEM to model resolution extracting min and max values
if initial:
if print_out: print(('...calculate min and max high resolution DEM values '
'in model resolution...'))
if arcpy.Exists(path_dem_max_mr): arcpy.management.Delete(path_dem_max_mr)
if arcpy.Exists(path_dem_min_mr): arcpy.management.Delete(path_dem_min_mr)
dem_max_mr = arcpy.sa.Aggregate(path_dem_hr_ws, str(cellsz),
method_max, 'EXPAND', 'DATA')
dem_min_mr = arcpy.sa.Aggregate(path_dem_hr_ws, str(cellsz),
method_min, 'EXPAND', 'DATA')
dem_max_mr.save(path_dem_max_mr)
dem_min_mr.save(path_dem_min_mr)
# reset environments
arcpy.env.snapRaster = default_env_snr
arcpy.env.extent = default_env_ext
arcpy.env.mask = default_env_mask
# %% function to create point feature class indicating elements, where no
# high-resolution flow length shall be calculated
def create_fl_ind_point(sr_obj,
path_gdb_out, name_no_fnw_fl='no_fnw_fl',
print_out=False):
"""
Creates a point feature class in the defined file geodatabase to be filled
by the user with points. These points indicate cells, for which no high
resolution flow length shall be calculated, but the model resolution is used
instead. The point feature class has neither Z- nor M-values.
JM 2021
Arguments:
-----------
sr_obj: arcpy.SpatialReferenceObject
arcpy.Object containing the spatial reference of the final feature class
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_no_fnw_fl: str (optional)
name of the output indication point feature class (e.g., 'no_fnw_fl')
print_out: boolean
true if workprogress shall be print to command line
Returns:
-----------
Saves the output pour point feature class
"""
if print_out: print('...create indication point feature class...')
# set path for output
path_no_fnw_fl = path_gdb_out + name_no_fnw_fl
# prepare indication point feature class
if arcpy.Exists(path_no_fnw_fl): arcpy.management.Delete(path_no_fnw_fl)
arcpy.CreateFeatureclass_management(path_gdb_out, name_no_fnw_fl, 'POINT', '',
'DISABLED', 'DISABLED', sr_obj, '', '0',
'0', '0', '')
# %% Create polygon feature class representing the model elements
def create_element_polyg(path_tgb_p, path_sn_raster, path_gdb_out,
name_tgb_s='tgb_s', print_out=False):
"""
Creates a polygon feature class in the defined file geodatabase, which includes
all values of the input point feature class and represents the model element
raster structure. The feature class only includes model elements, which are no
dummy elements and covers the whole model domain.
JM 2021
Arguments:
-----------
path_tgb_p: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\tgb_p')
path_sn_raster: str
path of the raster, which represents the model raster
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_tgb_s: str (optional, default: 'tgb_s')
name of the output model element polygon feature class
print_out: boolean
true if workprogress shall be print to command line
Returns:
-----------
Saves a polygon feature class representing the model elements
"""
# define internal variables
def_val_dtgb = -1
# define internal field names
f_tgb = 'tgb'
f_tgb_dtgb = 'tgb_dtgb'
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_val = 'VALUE'
f_cellsz_x = 'CELLSIZEX'
f_gridcode = 'gridcode'
# define paths of intermediates in working geodatabase
name_tgb_p_nodum = 'tgb_p_nodum'
name_tgb_p_sel = 'tgb_p_sel'
name_tgb_r = 'tgb_r'
# set workspace
arcpy.env.workspace = path_gdb_out
# Calculate model element polygons in raster structure
if print_out: print('...Calculate model element polygons in raster structure...')
# save original environment settings
default_env_snr = arcpy.env.snapRaster
# select elements which are not dummys and copy features to a new layer
sel_expr = '{0:s} = {1:d}'.format(f_tgb_dtgb, def_val_dtgb)
name_tgb_p_sel = arcpy.management.SelectLayerByAttribute(
path_tgb_p, 'NEW_SELECTION', sel_expr, '')
arcpy.CopyFeatures_management(name_tgb_p_sel, name_tgb_p_nodum, '', '', '', '')
arcpy.management.SelectLayerByAttribute(path_tgb_p, 'CLEAR_SELECTION', '', None)
# set environment
arcpy.env.snapRaster = path_sn_raster
# get model cell size
cellsz_obj = arcpy.GetRasterProperties_management(path_sn_raster, f_cellsz_x)
cellsz = np.int32(cellsz_obj.getOutput(0))
# create elment features from point layer converting to raster
arcpy.PointToRaster_conversion(name_tgb_p_nodum, f_tgb, name_tgb_r,
'MOST_FREQUENT', 'NONE', str(cellsz))
arcpy.RasterToPolygon_conversion(name_tgb_r, name_tgb_s, 'NO_SIMPLIFY',
f_val, 'SINGLE_OUTER_PART', '')
arcpy.AlterField_management(name_tgb_s, f_gridcode, f_tgb, f_tgb)
# restore environment
arcpy.env.snapRaster = default_env_snr
# %% summyrize GIS data for runoff concentration and routing parameter calculation
def summar_gisdata_for_roandrout(
path_tgb_p, path_dem_max_mr, path_dem_min_mr,
path_fl_mr, path_fl_fnw_mr, path_no_fnw_fl,
path_gdb_out, name_tgb_par_p='tgb_par_p',
field_dem_max_mr='dem_max_mr', field_dem_min_mr='dem_min_mr',
field_fl_fnw_mean_mr='fl_fnw_mean_mr', field_fl_mr='fl_mr',
print_out=False):
"""
Creates a point feature class in the defined file geodatabase, which includes
values of the maximum and minimum elevation as well as the flow network and
the model resolution flow length within each model element.
JM 2021
Arguments:
-----------
path_tgb_p: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\tgb_p')
path_dem_max_mr: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\dem_max_mr')
path_dem_min_mr: str
path of the output file geodatabase
(e.g., 'c:\model_creation.gdb\dem_min_mr')
path_fl_mr: str
path of the output file geodatabase
(e.g., 'c:\model_creation.gdb\fl_mr')
path_fl_fnw_mr: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\fl_fnw_mr')
path_no_fnw_fl: str
path of the output file geodatabase
(e.g., 'c:\model_creation.gdb\no_fnw_fl')
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_tgb_par_p: str (optional, default: 'tgb_par_p')
name of the output model element point feature class with extracted
parameters
field_dem_max_mr: str (optional, default: 'dem_max_mr')
name of the field in name_tgb_par_p containing max elevation value
field_dem_min_mr: str (optional, default: 'dem_min_mr')
name of the field in name_tgb_par_p containing min elevation value
field_fl_fnw_mean_mr: str (optional, default: 'fl_fnw_mean_mr')
name of the field in name_tgb_par_p containing flow network flow length
field_fl_mr: str (optional, default: 'fl_mr')
name of the field in name_tgb_par_p containing model resolution flow length
print_out: boolean
true if workprogress shall be print to command line
Returns:
-----------
Saves model element point feature class with extracted parameters:
- minimum elevation
- maximum elevation
- model resolution flow length
- flow network flow length
"""
# define internal variables
f_tgb = 'tgb'
# set workspace
arcpy.env.workspace = path_gdb_out
# define paths of intermediates in working geodatabase
name_fl_fnw_mr_corr = 'fl_fnw_mr_corr'
name_no_fnw_fl_r = 'no_fnwfl_r'
# save original environment settings
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
# If field fl_mr (model resolution flow length) does not exist,
# add field while extracting flow length values
if not arcpy.ListFields(path_tgb_p, field_fl_mr):
if print_out: print('...extract flow length values...')
arcpy.gp.ExtractMultiValuesToPoints_sa(
path_tgb_p, path_fl_mr + ' ' + field_fl_mr, 'NONE')
# If there are any flow length correction points
# remove values of fl_mr at TGBs marked with a feature point in no_fnw_fl
if arcpy.management.GetCount(path_no_fnw_fl)[0] != '0':
if print_out: print('...correct marked flow length values...')
# set environments
arcpy.env.extent = 'MAXOF'
arcpy.env.snapRaster = path_fl_mr
# convert correction points to raster and remove flow network flow length values
arcpy.PointToRaster_conversion(path_no_fnw_fl, 'OBJECTID', name_no_fnw_fl_r,
'MOST_FREQUENT', 'NONE', path_fl_mr)
fl_fnw_mr_corr = arcpy.ia.Con(arcpy.ia.IsNull(name_no_fnw_fl_r), path_fl_fnw_mr)
fl_fnw_mr_corr.save(name_fl_fnw_mr_corr)
# restore environments
arcpy.env.extent = default_env_ext
arcpy.env.snapRaster = default_env_snr
# Extract min and max elevation and flow length values to model point features
if print_out: print('...extract raster values to model point features...')
# copy model element point features to a new feature class
arcpy.management.CopyFeatures(path_tgb_p, name_tgb_par_p)
# if there are any flow length correction points, add information from corrected
if arcpy.management.GetCount(path_no_fnw_fl)[0] != '0':
arcpy.sa.ExtractMultiValuesToPoints(name_tgb_par_p, [
[path_dem_max_mr, field_dem_max_mr],
[path_dem_min_mr, field_dem_min_mr],
[name_fl_fnw_mr_corr, field_fl_fnw_mean_mr]], 'NONE')
# else use original files
else:
arcpy.sa.ExtractMultiValuesToPoints(name_tgb_par_p, [
[path_dem_max_mr, field_dem_max_mr],
[path_dem_min_mr, field_dem_min_mr],
[path_fl_fnw_mr, field_fl_fnw_mean_mr]], 'NONE')
# delete identical (Workaround for Bug in ExtractMultiValuesToPoints)
arcpy.management.DeleteIdentical(name_tgb_par_p, f_tgb, None, 0)
# %% calculate parameters for tgb.dat
def calc_roandrout_params(cellsz, q_spec_ch, name_tgb_par_p,
field_dem_max_mr='dem_max_mr', field_dem_min_mr='dem_min_mr',
field_fl_mr='fl_mr', field_fl_fnw_mean_mr='fl_fnw_mean_mr',
def_fl_upper_lim=np.inf, def_fl_strct_mism=2, def_sl_min=0.0001,
def_sl_excl_quant=None, def_zmin_rout_fac=0.5, def_zmax_fac=1,
ser_q_in_corr=None, ch_est_method='combined', def_bx=0, def_bbx_fac=1,
def_bnm=1.5, def_bnx=100, def_bnvrx=4, def_skm=30, def_skx=20,
print_out=False):
"""
Creates a point feature class in the defined file geodatabase, which includes
values of the maximum and minimum elevation as well as the flow network and
the model resolution flow length within each model element.
JM 2021
Arguments:
-----------
cellsz: integer
edge length of the model elements in [m] (e.g., 100)
q_spec_ch: float
channel forming specific flood discharge value [m3s-1km-2] (e.g., 0.21)
name_tgb_par_p: str
path of the input model element point feature class with following
parameters for each element except dummy elements:
- maximum elevation value
- minimum elevation value
- channel model resolution flow length
- channel flow network flow length
(e.g., 'c:\model_creation.gdb\tgb_p_fl')
field_dem_max_mr: str (optional, default: 'dem_max_mr')
name of the field in name_tgb_par_p containing max elevation value
field_dem_min_mr: str (optional, default: 'dem_min_mr')
name of the field in name_tgb_par_p containing min elevation value
field_fl_mr: str (optional, default: 'fl_mr')
name of the field in name_tgb_par_p containing model resolution flow length
field_fl_fnw_mean_mr: str (optional, default: 'fl_fnw_mean_mr')
name of the field in name_tgb_par_p containing flow network flow length
def_sl_min: float (optional, default: 0.0001)
minimum channel slope value to be maintained due to LARSIM-internal
restrictions
def_fl_strct_mism: int (optional, default: 2)
default flow length for structural mismatch and negative transition
deviations [m]. attention: 1 [m] is interpreted by LARSIM as 'no routing'!
def_fl_upper_lim: int (optional, default: inf)
upper threshold for realistic flow length [m]
def_sl_excl_quant: float (optional, default: None)
quantile of slope values to be set constant to quantile value
(e.g., 0.999 sets the upper 0.1% of the slope values to the
0.1% quantile value)
def_zmin_rout_fac: float (optional, default: 0.5)
Factor to vary the lower elevation of runoff concentration between
the minimum (0) and maximum (1) channel elevation of the element.
By default, the factor is set to the average elevation (0.5) [-]
def_zmax_fac: float (optional, default: 1)
Factor to vary the upper elevation of runoff concentration between
the minimum (0) and maximum (1) elevation of the element. By default,
ser_q_in_corr: pandas.Series
Series of channel-forming inflow (e.g., HQ2) at the corresponding
model element ID in the serie's index.
(e.g., pd.Series(np.array([2.8, 5.3]), index=[23, 359], name='q_in'))
ch_est_method: string (optional, default: 'combined')
String defining channel estimation function. Possible values:
- 'Allen': Allen et al. (1994)
- 'Krauter': Krauter (2006)
- 'combined': Allen et al.(1994) for small and Krauter (2006) for
large areas
def_bx: float (optional, default: 0)
Float defining the flat foreland width left and right [m]
def_bbx_fac: float (optional, default: 1)
Float factor defining the slopy foreland width left and right,
which is calculated multiplying the channel width with this factor [-]
def_bnm: float (optional, default: 1.5 = 67%)
Float defining the channel embankment slope left and right [mL/mZ]
def_bnx: float (optional, default: 100 = nearly flat foreland)
Float defining the slopy foreland slope left and right [mL/mZ]
def_bnvrx: float (optional, default: 4 = 25%)
Float defining the outer foreland slope left and right [mL/mZ]
def_skm: float (optional, default: 30 = natural channel, vegetated river bank)
Float defining the Strickler roughness values in the channel [m1/3s-1]
def_skx: float (optional, default: 20 = uneven vegetated foreland)
Float defining the Strickler roughness values of the left and right
foreland [m1/3s-1]
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
df_data_tgbdat: pandas.DataFrame
DataFrame of all parameters, which are needed in the resulting file.
The DataFrame includes the model element ID as index and the following
columns:
- 'TGB': element ID number (int)
- 'NRVLF': element name (str)
- 'FT': element area (float)
- 'HUT': lower elevation of runoff concentration [m]
- 'HOT': upper elevation of runoff concentration [m]
- 'TAL': maximum flow length for runoff concentration [km]
- 'X': x-coordinate of element center [m]
- 'Y': y-coordinate of element center [m]
- 'KMU': lower stationing of routing [m]
- 'KMO': upper stationing of routing [m]
- 'GEF': channel slope for routing [m]
- 'HM': channel depth [m]
- 'BM': channel width [m]
- 'BL': flat foreland width left [m]
- 'BR': flat foreland width right [m]
- 'BBL': slopy foreland width left [m]
- 'BBR': slopy foreland width right [m]
- 'BNM': channel embankment slope left and right [mL/mZ]
- 'BNL': slopy foreland slope left [mL/mZ]
- 'BNR': slopy foreland slope right [mL/mZ]
- 'BNVRL': outer foreland slope left [mL/mZ]
- 'BNVRR': outer foreland slope right [mL/mZ]
- 'SKM': Strickler roughnes values in the channel [m1/3s-1]
- 'SKL': Strickler roughnes values at the left foreland [m1/3s-1]
- 'SKR': Strickler roughnes values at the right foreland [m1/3s-1]
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_ft: pandas.Series
Series of corresponding model element areas. Dummy elements have an
area of 0. [km²]
ser_area_outfl: pandas.Series
Series of corresponding model element inflow catchment areas.
Dummy elements have an area of 0. [km²]
ser_ch_form_q: pandas.Series
Series of elements' channel-forming discharge at the corresponding
model element ID in the serie's index.
"""
# %% Redistribute flow length values at confluence points
def redistr_flowl_at_conflp(ser_fl, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy):
"""
This function redistributes flow length values at cofluence points.
Remember: there will result multipliers of 1 and sqrt(2) with the model
resolution as flow length values from D8 flow length calculation. The LARSIM
convention assumes the confluence point of cells upstream of the routing
element. Therefore, the resulting discrepancies at confluence points have to
be balanced in upstream routing elements.
JM 2021
Arguments:
-----------
ser_fl: pandas.Series
Series of model element raster flow length corresponding to the serie's
ascending index. The flow length is calculated using the D8-flow direction
based on the model resolution digital elevation raster and using the
'DOWNSTREAM' option (outlet = 0). It may be clipped from a larger raster,
whereby the outlet is not zero anymore.
(e.g., pd.Series([300, 341, 200, 100], index=[1, 2, 3, 4], name='ser_fl'))
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
Returns:
-----------
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The DataFrame
includes the following columns:
- downstream share of flow length within cell ('down')
- downstream share confluence correction value ('corr_conf_down')
- corrected downstream share of flow length within cell ('corr_down')
- upstream share of flow length within cell ('up')
- corrected upstream share of flow length within cell ('corr_up')
"""
# define internal string variables
f_up = 'up'
f_down = 'down'
f_corr_up = 'corr_up'
f_corr_down = 'corr_down'
f_corr_conf_down = 'corr_conf_down'
# pre-allocate variable
df_fl = pd.DataFrame(np.zeros((ser_fl.shape[0], 5)) * np.nan,
index=ser_fl.index,
columns=[f_down, f_corr_conf_down, f_corr_down,
f_up, f_corr_up])
# copy model resolution flow length (GIS raster calculation)
# (dummy cells are nan, outflow not)
ser_fl.at[ser_tgb_type_dummy] = np.nan
df_fl.at[ser_tgb_type_dummy, :] = np.nan
# iterate elements to calculate flow length to downstream cell
# (only head water and routing cells)
for tgb, fl_sum_tgb in ser_fl.iteritems():
# if it is a head water or routing cell and not the outflow:
if not ser_tgb_type_dummy.at[tgb] and tgb != np.max(df_fl.index):
# get flow length of downstream cell
fl_sum_down = ser_fl.loc[ser_tgb_down_nd.at[tgb]]
# calculate flow length difference between recent and downstream cell
df_fl.at[tgb, f_down] = (fl_sum_tgb - fl_sum_down) / 2
# iterate elements to calculate flow length to upstream cells and correct it
for tgb, fl_sum_tgb in ser_fl.iteritems():
# if it is a head water cell set upstream flow length to zero
if ser_tgb_type_headw.at[tgb]:
df_fl.at[tgb, f_up] = 0
# if it is a routing cell allocate mean residuals to upstream cells
elif not ser_tgb_type_dummy.at[tgb]:
# get values of upstream cells
fl_sum_up = ser_fl.loc[ser_tgb_up_nd.at[tgb]]
# calculate mean of differences between recent and upstream cells
fl_dif_up = np.nanmean(fl_sum_up - fl_sum_tgb) / 2
df_fl.at[tgb, f_up] = fl_dif_up
# calculate mean downstream residuals and allocate it to upstream cells
fl_dif_up_rest = (fl_sum_up - fl_sum_tgb) / 2 - fl_dif_up
df_fl.loc[fl_dif_up_rest.index, f_corr_conf_down] = fl_dif_up_rest
# calculate sums of flow length shares
df_fl.loc[:, f_corr_down] = np.sum(
df_fl.loc[:, [f_down, f_corr_conf_down]], axis=1)
df_fl.loc[:, f_corr_up] = df_fl.loc[:, f_up].values
return df_fl
# %% Redistribute flow network flow length values at confluence points
def redistr_flowl_polyl_at_conflp(ser_fl_fnw, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy, cellsz):
"""
This function redistributes the model resolution flow length values calculated
based on existing flow path polyline features.
Remember: The LARSIM convention assumes the confluence point of cells
upstream of the routing element. Therefore, the resulting discrepancies at
confluence points have to be balanced in upstream routing elements.
Furthermore, the flow network balances might get negative with unavoidable
influences of neighbouring flow network elements. This will be retained by
setting discrepancies to a symbolic value of 1 to prevent LARSIM assuming
a dummy cell. As it stays unclear, where the influencing flow network element
belongs to, the (rather small) discrepancy has to stay unbalanced upstream.
Additionally, to prevent instabilities in the water routing calculation, a
correction and redistribution of very small flow lengths is introduced. If
the flow length is smaller than 10% of the model's cell size, the difference
to the actual flow length at the recent cell is redistributed from upstream
cells to the recent one.
JM 2021
Arguments:
-----------
ser_fl_fnw: pandas.Series
Series of model element polyline flow length corresponding to the serie's
ascending index. The flow length is calculated using the accumulative
lengths of polyline elements intersected with model raster polygons.
The outlet is the minimum value, but has not to be zero.
(e.g., pd.Series([308.4, 341.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_fl_fnw'))
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
cellsz: int
Integer, which defines the model element edge length in [m]
Returns:
-----------
df_fl_fnw: pandas.DataFrame
DataFrame of corresponding model resolution flow length values.
The DataFrame includes the following columns:
- original downstream share of flow length within cell ('down')
- downstream correction value of confluences ('corr_conf_down')
- downstream correction value of redistribution ('corr_red_down')
- corrected downstream share of flow length within cell ('corr_down')
- upstream share of flow length within cell ('up')
- upstream correction value of redistribution ('corr_red_up')
- corrected upstream share of flow length within cell ('corr_up')
"""
# define internal string variables
f_up = 'up'
f_down = 'down'
f_corr_up = 'corr_up'
f_corr_down = 'corr_down'
f_corr_conf_down = 'corr_conf_down'
f_corr_red_up = 'corr_red_up'
f_corr_red_down = 'corr_red_down'
# pre-allocate variable
df_fl_fnw = pd.DataFrame(np.zeros((ser_fl_fnw.shape[0], 7))*np.nan,
index=ser_fl_fnw.index,
columns=[f_down, f_corr_conf_down,
f_corr_red_down, f_corr_down,
f_up, f_corr_red_up, f_corr_up])
# first column = high resolution flow length (GIS raster calculation)
# (dummy cells are nan, outflow not)
ser_fl_fnw.at[ser_tgb_type_dummy] = np.nan
df_fl_fnw.at[ser_tgb_type_dummy, :] = np.nan
# calculate flow distances
for tgb, fl_sum in ser_fl_fnw.iteritems():
# if high resolution flow length is not nan...
if not np.isnan(fl_sum):
# if it is a head water cell only calculate downstream part
if ser_tgb_type_headw.at[tgb]:
# find downstream cell and get flow length
fl_down = ser_fl_fnw.loc[ser_tgb_down_nd.at[tgb]]
# calculate flow length difference between recent and
# downstream cell
df_fl_fnw.at[tgb, f_down] = (fl_sum - fl_down) / 2
# set difference between recent and upstream cell to zero
df_fl_fnw.at[tgb, f_up] = 0
# if it is a routing cell...
elif not ser_tgb_type_dummy.at[tgb]:
# if it is not outflow
if tgb != np.max(ser_fl_fnw.index):
# find downstream cell and get flow length
fl_down = ser_fl_fnw.loc[ser_tgb_down_nd.loc[tgb]]
# downstream value is difference between recent and
# downstream cell or 1 [m] if it would be smaller
df_fl_fnw.at[tgb, f_down] \
= np.max([(fl_sum - fl_down) / 2, 1])
else:
# downstream difference is 0
df_fl_fnw.at[tgb, f_down] = 0
# find upstream cells and get flow lengths
jjnd_up = ser_tgb_up_nd.at[tgb]
# calculate flow length difference between recent
# and upstream cells
fl_dif_up = (ser_fl_fnw.loc[jjnd_up] - fl_sum) / 2
# correct negative upstream difference values and protocol
fl_dif_up_ii = np.logical_and(np.isnan(fl_dif_up),
fl_dif_up < 0)
fl_dif_up[fl_dif_up_ii] = 1
# calculate mean of difference between recent
# and upstream cells
if np.any(~np.isnan(fl_dif_up)):
fl_difmean_up = np.nanmean(fl_dif_up)
else:
fl_difmean_up = np.nan
df_fl_fnw.at[tgb, f_up] = fl_difmean_up
# calculate residual from mean calculation
df_fl_fnw.at[jjnd_up, f_corr_conf_down] \
= fl_dif_up - fl_difmean_up
# iterate cells in reversed calculation order from outflow to most
# upstream point and redistribute very small network values
for tgb in reversed(ser_fl_fnw.index):
# if high resolution flow length is not nan and it is a routing cell...
fl_sum = ser_fl_fnw[tgb]
if not np.isnan(fl_sum) \
and not (ser_tgb_type_headw.at[tgb] and ser_tgb_type_dummy.at[tgb]):
# add downstream, upstream and remaining flow length part of
# recent element
fl_fnw = np.nansum(
df_fl_fnw.loc[tgb, [f_down, f_corr_conf_down, f_up]])
# if the flow length is smaller than 10% of the cell size...
if fl_fnw < cellsz / 10:
# allocate the difference to 10% of cell size to the
# recent element
fl_fnw_dif_corr = cellsz / 10 - fl_fnw
df_fl_fnw.at[tgb, f_corr_red_up] = fl_fnw_dif_corr
# redistribute correction length to upstream cells
df_fl_fnw.at[ser_tgb_up_nd.at[tgb], f_corr_red_down] \
= - fl_fnw_dif_corr
# calculate sums of flow length shares
df_fl_fnw.at[:, f_corr_down] = np.sum(
df_fl_fnw.loc[:, [f_down, f_corr_conf_down, f_corr_red_down]], axis=1)
df_fl_fnw.at[:, f_corr_up] = np.sum(
df_fl_fnw.loc[:, [f_up, f_corr_red_up]], axis=1)
return df_fl_fnw
# %% Merge flow length from model resolution raster and flow network polylines
def merge_fnw_and_mr_fl(df_fl_mr, df_fl_fnw, ser_j_down, ser_tgb_down_nd,
ser_tgb_type_headw, ser_tgb_type_dummy,
def_fl_upper_lim=np.inf, def_fl_strct_mism=2):
"""
This function merges both model resolution flow length sources (1)
calculated based on existing flow path polyline features and (2) using
the D8-flow direction based on the model resolution digital elevation
raster and using the 'DOWNSTREAM' option (outlet = 0).
The flow length calculated from flow network polylines and model
resolution are potentially referenced to a different outflow point as
the extent of the DEM is different. The extent of the flow network
usually is larger, as the calculation of the model domain is based on
the underlying high-resolution DEM. Therefore, flow lenght references
have to be reset at the outflow point of the model. Consequently, the
flow network and model resolution flow length values are merged.
JM 2021
Arguments:
-----------
df_fl_mr: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following columns:
- downstream share of flow length within cell ('down')
- corrected downstream share of flow length within cell ('corr_down')
- corrected upstream share of flow length within cell ('corr_up')
df_fl_fnw: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following columns:
- corrected downstream share of flow length within cell ('corr_down')
- corrected upstream share of flow length within cell ('corr_up')
ser_j_down: pandas.Series
Series of corresponding downstream model element indices.
Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
def_fl_upper_lim: int (optional, default: inf)
upper threshold for realistic flow length [m]
def_fl_strct_mism: int (optional, default: 2)
default flow length for structural mismatch and negative transition
deviations [m]. attention: 1 [m] is interpreted by LARSIM as 'no routing'!
Returns:
-----------
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values.
The DataFrame includes the model element ID as index and the following
columns:
- accumulative flow length at lower boundary of cell ('lower')
- flow length value of cell ('length')
- accumulative flow length at upper boundary of cell ('upper')
"""
# define internal string variables
f_up = 'up'
f_down = 'down'
f_corr_up = 'corr_up'
f_corr_down = 'corr_down'
f_lower = 'lower'
f_upper = 'upper'
f_length = 'length'
# pre-allocate DataFrame for indentification keys
df_fl_keys = pd.DataFrame(np.zeros((df_fl_mr.shape[0], 2))*np.nan,
index=df_fl_mr.index, columns=[f_down, f_up])
# pre-allocate DataFrame for flow length values
df_fl = pd.DataFrame(np.zeros((df_fl_mr.shape[0], 3))*np.nan,
index=df_fl_mr.index,
columns=[f_lower, f_length, f_upper])
# calculate outflow cell index
tgb_out = np.max(df_fl_mr.index)
# pre-set outflow flow length value to 1 [m]
df_fl.at[tgb_out, f_lower] = 1
# iterate all cells in reversed order
for tgb in reversed(df_fl_mr.index):
# if cell is a routing or headwater cell
if not ser_tgb_type_dummy.at[tgb]:
# find real downstream cell
jjnd_down = ser_tgb_down_nd.at[tgb]
# SET LOWER CUMULATIVE FLOW LENGTH OF RECENT AS UPPER OF DOWNSTREAM CELL
if tgb != tgb_out:
df_fl.at[tgb, f_lower] = df_fl.at[jjnd_down, f_upper]
else:
df_fl.at[tgb, f_lower] = 0
# DECIDE ABOUT BEHAVIOR USING DOWNSTREAM PART OF RECENT AND
# UPSTREAM PART OF DOWNSTREAM CELL
# get downstream flow network flow length of RECENT cell
if tgb != tgb_out: fl_fnw_down = df_fl_fnw.loc[tgb, f_corr_down]
else: fl_fnw_down = 0
# if (1) downstream flow network flow length of RECENT cell is > 0
# set downstream flow length to flow network flow length (key 1)
# (no further distinction, as fl_fnw_down > 0 && fl_fnw_down_up <= 0
# cannot exist due to the definition of df_fl_fnw)
if fl_fnw_down > 0:
fl_down = df_fl_fnw.loc[tgb, f_corr_down]
df_fl_keys.at[tgb, f_down] = 1
# if (2) downstream flow network flow length of RECENT cell is < 0
# than a potential structural mismatch between model resolution flow
# length and flow network flow length resulting from cell aggregation
# has to be corrected. The downstream flow length is set to flow network
# flow length (key -1)
elif fl_fnw_down < 0:
fl_down = df_fl_fnw.loc[tgb, f_corr_down]
df_fl_keys.at[tgb, f_down] = -1
# if (3) downstream flow network flow length of RECENT cell does not
# exist (= 0), than model resolution flow length is used and further
# distinction of cases is based on upstream flow network flow length
# of DOWNSTREAM cell
elif fl_fnw_down == 0:
# get upstream flow network flow length of DOWNSTREAM cell
# (except for outflow)
if tgb != tgb_out:
fl_fnw_down_up = df_fl_fnw.loc[jjnd_down, f_corr_up]
else:
fl_fnw_down_up = 0
# if (3.1) upstream flow network flow length of DOWNSTREAM cell
# does not exist (<= 0), than both cells have model resolution
# flow length and downstream flow length part is set to model
# resolution flow length (key 100)
if fl_fnw_down_up <= 0:
fl_down = df_fl_mr.loc[tgb, f_corr_down]
df_fl_keys.at[tgb, f_down] = 100
# if (3.2) upstream flow network flow length of DOWNSTREAM
# cell exists (> 0) than there is a transition from downstream
# flow network to recent cell model resolution flow length
# and the difference of model resolution and flow network
# flow length is calculated (key -100).
else:
fl_down = df_fl_mr.loc[tgb, f_down] * 2 - fl_fnw_down_up
df_fl_keys.at[tgb, f_down] = -100
# CALCULATE UPSTREAM AND SUM OF FLOW LENGTH OF RECENT CELL
# headwater cells: cell flow length = downstream part
if ser_tgb_type_headw.at[tgb]:
df_fl.at[tgb, f_length] = fl_down
# routing cells: cell flow length = downstream + upstream flow length
else:
# get upstream flow network flow length of RECENT cell
fl_fnw_up = df_fl_fnw.loc[tgb, f_corr_up]
# if upstream flow network flow length of RECENT cell is > 0
# set upstream flow length to flow network flow length (key 1)
if fl_fnw_up > 0:
fl_up = fl_fnw_up
df_fl_keys.at[tgb, f_up] = 1
# if upstream flow network flow length is = 0 (< 0 cannot exist)
# set upstream flow length to model resolution flow length (key 100)
else:
fl_up = df_fl_mr.loc[tgb, f_corr_up]
df_fl_keys.at[tgb, f_up] = 100
# sum down- and upstream flow length parts (except for outflow)
if tgb != tgb_out: df_fl.at[tgb, f_length] = fl_down + fl_up
else: df_fl.at[tgb, f_length] = fl_up
# DO CORRECTIONS
# if structural mismatches and transition values cannot be compensated
# by upstream flow length part (flow length < 0), set flow length to
# the threshold def_fl_strct_mism, a symbolic very small value
if np.isin(df_fl_keys.at[tgb, f_down], [-1, -100]) \
and df_fl.at[tgb, f_length] <= def_fl_strct_mism:
df_fl.at[tgb, f_length] = def_fl_strct_mism
# if flow length is unrealistic high (flow length > def_fl_upper_lim),
# set flow length to the threshold def_fl_upper_lim
if df_fl.at[tgb, f_length] > def_fl_upper_lim:
df_fl.at[tgb, f_length] = def_fl_upper_lim
# CALCULATE UPSTREAM CUMULATIVE FLOW LENGTH OF RECENT CELL
# headwater cells: use lower cumulative flow length as upper
# (not used in LARSIM, as there is no routing in head water cells)
if ser_tgb_type_headw.at[tgb]:
df_fl.at[tgb, f_upper] = df_fl.at[tgb, f_lower]
# routing cell, which is not outlet: calculate sum of downstream
# cumulative flow length and flow length of recent cell
elif tgb != tgb_out:
df_fl.at[tgb, f_upper] = df_fl.at[tgb, f_length] \
+ df_fl.at[tgb, f_lower]
# routing cell, which is outlet: use flow length of recent cell
else:
df_fl.at[tgb, f_upper] = df_fl.at[tgb, f_length]
# if cell is a dummy cell
else:
# take value from downstream cell for upper and lower value
df_fl.at[tgb, [f_lower, f_upper]] = df_fl.loc[ser_j_down.at[tgb], f_upper]
return df_fl
# %% Calculate cumulative flow length values respecting LARSIM conventions
def calc_cum_ch_fl(df_fl, ser_tgb_up, ser_tgb_type_headw, ser_tgb_type_dummy):
"""
This function calculates the cumulative flow length values respecting LARSIM
conventions.
In elements with a difference of 1 [m] between upper and lower cumulative
flow length (KMO and KMU) will the routing be ignored. Therefore, dummy
and head water elements shall be set to a difference of 1 between KMO and KMU
(KMO - KMU = 1). The function returns a pandas.DataFrame for KMO and KMU.
JM 2021
Arguments:
-----------
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following
columns:
- accumulative flow length at lower boundary of cell ('lower')
- flow length value of cell ('length')
- accumulative flow length at upper boundary of cell ('upper')
ser_tgb_up: pandas.Series
Series of corresponding upstream model element indices.
Dummy elements are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
Returns:
-----------
df_cum_ch_fl: pandas.DataFrame
DataFrame of corresponding runoff concentration parameters. The DataFrame
includes the model element ID as index and the following columns:
- corresponding lower cumulative flow length values (KMU) [m]
- corresponding upper cumulative flow length values (KMO) [m]
"""
# define internal string variables
f_kmu = 'kmu'
f_kmo = 'kmo'
f_lower = 'lower'
f_upper = 'upper'
# Calculate Dummy adds for KMO and KMU
# pre-allocate arrays for adds
dummy_adds = pd.DataFrame(np.zeros((ser_tgb_up.shape[0], 2)),
index=ser_tgb_up.index, columns=[f_lower, f_upper])
# add of outlet is 1
tgb_out = np.max(ser_tgb_up.index)
dummy_adds.at[tgb_out, f_lower] = 1
# iterate all cells
for tgb in reversed(ser_tgb_up.index):
# get upstream cell IDs
tgb_up = ser_tgb_up.at[tgb]
# lower add of upstream cell is upper add of recent cell
dummy_adds.at[tgb_up, f_lower] = dummy_adds.at[tgb, f_upper]
# get indices of upstream dummy cells
tgb_up_dummys = ser_tgb_type_dummy.loc[tgb_up].index
# if upstream cell is not a dummy cell, upper add = lower add
dummy_adds.at[tgb_up, f_upper] = dummy_adds.loc[tgb_up, f_lower].values
# if upstream cell is a dummy cell, upper add = upper add + 1
dummy_adds.at[tgb_up_dummys, f_upper] \
= dummy_adds.loc[tgb_up_dummys, f_upper].values + 1
# Calculate head water adds
headw_adds = pd.Series(np.zeros((ser_tgb_up.shape[0])), index=ser_tgb_up.index,
name=f_upper)
headw_adds.at[ser_tgb_type_headw] = 1
# Add Dummy and Head Water Adds
ser_kmu = np.round(df_fl.loc[:, f_lower], 0) + dummy_adds.loc[:, f_lower]
ser_kmo = np.round(df_fl.loc[:, f_upper], 0) + dummy_adds.loc[:, f_upper] \
+ headw_adds
# summarize parameters
df_cum_ch_fl = pd.concat([ser_kmu, ser_kmo], axis=1)
df_cum_ch_fl.columns = [f_kmu, f_kmo]
return df_cum_ch_fl
# %% calculate channel elevation differences
def calc_ch_zdif(ser_zlower, df_fl,
ser_tgb_up_nd, ser_tgb_type_headw, ser_tgb_type_dummy,
def_sl_min=0.0001):
"""
This function calculates the channel elevation differences and corrects them
applying the LARSIM conventions. This means, that (1) a minimum channel slope
is maintained. The slope value might be very small, but is not allowed to be
zero. As there are LARSIM-internal rounding mechanisms, slope values smaller
0.0001 mL/mZ have to be avoided. Additionally, (2) multiple upstream
neighbour elements have to be balanced, as only one elevation value can be
applied to a single element. Potential conservation is achieved moving the
elevation difference to the upstream element neighbours.
JM 2021
Arguments:
-----------
ser_zlower: pandas.Series
Series of model elements' minimum elevation corresponding to the serie's
ascending index.
(e.g., pd.Series([308.4, 341.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_zlower'))
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following
columns:
- accumulative flow length at lower boundary of cell ('lower')
- flow length value of cell ('length')
- accumulative flow length at upper boundary of cell ('upper')
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
def_sl_min: float (optional, default: 0.0001)
minimum channel slope value to be maintained due to LARSIM-internal
restrictions
Returns:
-----------
df_ch_zdif: pandas.DataFrame
DataFrame of corrected element elevation values. The DataFrame
includes the model element ID as index and the following columns:
- slope correction value [m] ('corr_sl')
- balancing correction value [m] ('corr_bal')
- corrected minimum channel elevation [m] ('lower_corr')
- corrected channel elevation difference [m] ('ch_zdif')
- corrected maximum channel elevation [m] ('upper_corr')
"""
# define internal string variables
f_length = 'length'
f_ch_zdif = 'ch_zdif'
f_corr_sl = 'corr_sl'
f_corr_bal = 'corr_bal'
f_lower_corr = 'lower_corr'
f_upper_corr = 'upper_corr'
# pre-allocate arrays
df_ch_zdif = pd.DataFrame(
np.zeros((ser_tgb_up_nd.shape[0], 5)) * np.nan, index=ser_tgb_up_nd.index,
columns=[f_corr_sl, f_corr_bal, f_lower_corr, f_ch_zdif, f_upper_corr])
# fill input columns (min and max elevation within cell)
df_ch_zdif.lower_corr = ser_zlower
# set dummy cell values to nan
df_ch_zdif.at[ser_tgb_type_dummy, :] = np.nan
# iterate all cells
for tgb in reversed(ser_tgb_up_nd.index):
# routing cells
if not ser_tgb_type_dummy[tgb] and not ser_tgb_type_headw[tgb]:
# get min elevation within cell
zlower = df_ch_zdif.at[tgb, f_lower_corr]
# find upstream cell ID number
tgb_up_nd = ser_tgb_up_nd.at[tgb]
# get elevation value for upstream cell
zupper = df_ch_zdif.loc[tgb_up_nd, f_lower_corr]
# calculate range threshold to prevent slope < def_sl_min
zdif_sl_thr = def_sl_min * df_fl.at[tgb, f_length]
# find cell pairs lower threshold slope
sl_corr_bool = (zupper - zlower) <= zdif_sl_thr
# if there is any, correct height differences lower than threshold
if np.any(sl_corr_bool):
# get and set min elevation correction values
hd_corr = zdif_sl_thr - (zupper.loc[sl_corr_bool] - zlower)
df_ch_zdif.at[tgb_up_nd[sl_corr_bool], f_corr_sl] = hd_corr
# get and set max elevation correction values
zupper_sl_corr = zupper.loc[sl_corr_bool] + hd_corr
df_ch_zdif.at[tgb_up_nd[sl_corr_bool], f_lower_corr] = zupper_sl_corr
zupper.at[sl_corr_bool] = zupper_sl_corr.iloc[0]
else:
df_ch_zdif.at[tgb_up_nd, f_corr_sl] = 0
# if more than one upstream cells exist...
if np.any(tgb_up_nd):
# ...calculate minimum value
zupper_min = np.nanmin(zupper)
df_ch_zdif.at[tgb, f_upper_corr] = zupper_min
df_ch_zdif.at[tgb_up_nd, f_lower_corr] = zupper_min
df_ch_zdif.at[tgb_up_nd, f_corr_bal] = zupper_min - zupper
# if only one upstream cell exists take elevation value of it
else:
df_ch_zdif.at[tgb_up_nd, f_corr_bal] = 0
df_ch_zdif.at[tgb, f_upper_corr] = zupper
# calculate elevation range within cell
df_ch_zdif.loc[:, f_ch_zdif] = \
df_ch_zdif.loc[:, f_upper_corr] - df_ch_zdif.loc[:, f_lower_corr]
return df_ch_zdif
# %% calculate runoff concentration parameters
def calc_roconc_params(ser_ch_zmin, ser_zmax, ser_fl_ch_down, ser_fl_headw_len,
ser_tgb_type_headw, ser_tgb_type_dummy,
cellsz, def_zmin_rout_fac=0.5, def_zmax_fac=1):
"""
This function calculates the runoff concentration parameters needed for
the retention time estimation using the Kirpich formula (Kirpich, 1940).
JM 2021
Arguments:
-----------
ser_ch_zmin: pandas.Series [m]
Series of model elements' minimum channel elevation corresponding to
the serie's ascending index.
(e.g., pd.Series([302.4, 330.0, 180.5, 120.8], index=[1, 2, 3, 4],
name='ser_ch_zmin'))
ser_zmax: pandas.Series
Series of model elements' maximum elevation corresponding to the
serie's ascending index. [m]
(e.g., pd.Series([308.4, 341.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_zmax'))
ser_fl_ch_down: pandas.Series [m]
Series of model elements' downstream channel flow length parts
corresponding to the serie's ascending index.
(e.g., pd.Series([202.4, 120.0, 29.5, 13.8], index=[1, 2, 3, 4],
name='ser_fl_ch_down'))
ser_fl_headw_len: pandas.Series
Series of model elements' headwater flow length parts corresponding
to the serie's ascending index. [m]
(e.g., pd.Series([110.4, 231.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_fl_headw_len'))
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding
to the serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headw',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
cellsz: int
Integer, which defines the model element edge length in [m] (e.g., 100)
def_zmin_rout_fac: float (optional, default: 0.5)
Factor to vary the lower elevation of runoff concentration between
the minimum (0) and maximum (1) channel elevation of the element. By
default, the factor is set to the average elevation (0.5) [-]
def_zmax_fac: float (optional, default: 1)
Factor to vary the upper elevation of runoff concentration between
the minimum (0) and maximum (1) elevation of the element. By default,
the factor is set to the maximum elevation (1) [-]
Returns:
-----------
df_roconc_params: pandas.DataFrame
DataFrame of runoff concentration parameters. The DataFrame
includes the model element ID as index and the following columns:
- lower runoff concentration elevation [m] ('hut')
- upper runoff concentration elevation [m] ('hot')
- maximum runoff concentration flow length [km] ('tal')
"""
# define internal string variables
f_tal = 'tal'
f_hut = 'hut'
f_hot = 'hot'
# calculate lower runoff concentration elevation
# define HUT for head waters as low point of cell
ser_hut = ser_ch_zmin + (ser_zmax - ser_ch_zmin) * def_zmin_rout_fac
ser_hut.at[ser_tgb_type_headw] = ser_ch_zmin.loc[ser_tgb_type_headw]
# calculate upper runoff concentration elevation
ser_hot = ser_hut + (ser_zmax - ser_hut) * def_zmax_fac
# correct negative and zero HOT-HUT
zdif_corr_ii = np.round(ser_hot, 1) - np.round(ser_hut, 1) <= 0
ser_hot.at[zdif_corr_ii] = ser_hut.loc[zdif_corr_ii] + 0.1
# calculate maximum flow length
# define TAL for cells with stream as mean of streight and diagonal line
ser_tal = pd.Series(np.zeros(ser_hot.shape) + (np.sqrt(2) + 1) * cellsz / 4,
index=ser_hot.index, name=f_tal)
# define TAL for head waters balancing flow length upstream values
ser_tal.at[ser_tgb_type_headw] = \
ser_fl_ch_down.loc[ser_tgb_type_headw] \
+ ser_fl_headw_len.loc[ser_tgb_type_headw]
# convert from [m] to [km]
ser_tal = ser_tal / 1000
# summarize series
df_roconc_params = pd.concat([ser_hut, ser_hot, ser_tal], axis=1)
df_roconc_params.columns = [f_hut, f_hot, f_tal]
df_roconc_params.at[ser_tgb_type_dummy, :] = np.nan
return df_roconc_params
# %% calculation
# define key-words to identify element types
str_headw = 'headwater'
str_routing = 'routing'
str_dummy = 'dummy'
# define internal variables
f_tgb = 'tgb'
f_tgb_down = 'tgb_down'
f_tgb_type = 'tgb_type'
f_tgb_a = 'tgb_a'
f_x = 'x'
f_y = 'y'
f_nrflv = 'nrflv'
f_ft = 'ft'
# define arcpy default field names
f_pt_x = 'POINT_X'
f_pt_y = 'POINT_Y'
# calculate model network parameters
if print_out: print('...import and pre-process data...')
# Import model cell feature class attribute table and convert to pandas.DataFrame
structarr_tgb_in = arcpy.da.FeatureClassToNumPyArray(
name_tgb_par_p,
[f_tgb, f_tgb_type, f_tgb_down, f_tgb_a, f_pt_x, f_pt_y, field_fl_mr,
field_dem_max_mr, field_dem_min_mr, field_fl_fnw_mean_mr])
df_tgb_in = pd.DataFrame(np.sort(structarr_tgb_in, order=f_tgb),
index=structarr_tgb_in[f_tgb])
df_tgb_in = df_tgb_in.rename(columns={f_pt_x: f_x, f_pt_y: f_y})
# convert string identifiers of model cells to logical arrays
tgb_type_lookup, tgb_type_tgb_id = np.unique(df_tgb_in.loc[:, f_tgb_type],
return_inverse=True)
ser_tgb_type_headw = pd.Series(
tgb_type_tgb_id == np.nonzero(tgb_type_lookup == str_headw)[0][0],
dtype=bool, index=df_tgb_in.index, name=str_headw)
ser_tgb_type_routing = pd.Series(tgb_type_tgb_id == np.nonzero(
tgb_type_lookup == str_routing)[0][0],
dtype=bool, index=df_tgb_in.index, name=str_routing)
ser_tgb_type_dummy = pd.Series(tgb_type_tgb_id == np.nonzero(
tgb_type_lookup == str_dummy)[0][0],
dtype=bool, index=df_tgb_in.index, name=str_dummy)
# calculate upstream model element indices
ser_tgb_up = tc.get_upstream_idx(df_tgb_in.loc[:, f_tgb_down])
# get up- and downstream model cell indices while ignoring dummy elements
ser_tgb_down_nd = tc.get_downstream_idx_ign_dumm(
df_tgb_in.loc[:, f_tgb_down], ser_tgb_type_dummy)
ser_tgb_up_nd = tc.get_upstream_idx_ign_dumm(
df_tgb_in.loc[:, f_tgb_down], ser_tgb_type_headw, ser_tgb_type_dummy)
# calculate model network parameters
if print_out: print('...calculate model network parameters...')
# redistribute model resolution flow length values at confluence points
ser_fl = copy.deepcopy(df_tgb_in.loc[:, field_fl_mr])
df_fl_mr = redistr_flowl_at_conflp(ser_fl, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy)
# redistribute flow network flow length values at confluence points
# (including redistribution of very small flow length values)
ser_fl_fnw = copy.deepcopy(df_tgb_in.loc[:, field_fl_fnw_mean_mr])
df_fl_fnw = redistr_flowl_polyl_at_conflp(
ser_fl_fnw, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy, cellsz)
# merge flow length resulting from model resolution raster and flow network polylines
df_fl = merge_fnw_and_mr_fl(df_fl_mr, df_fl_fnw,
df_tgb_in.loc[:, f_tgb_down], ser_tgb_down_nd, ser_tgb_type_headw,
ser_tgb_type_dummy, def_fl_upper_lim=def_fl_upper_lim,
def_fl_strct_mism=def_fl_strct_mism)
# calculate cumulative flow length values respecting LARSIM conventions
df_cum_ch_fl = calc_cum_ch_fl(df_fl, ser_tgb_up, ser_tgb_type_headw, ser_tgb_type_dummy)
# calculate channel elevation differences
df_ch_zdif = calc_ch_zdif(df_tgb_in.loc[:, field_dem_min_mr], df_fl,
ser_tgb_up_nd, ser_tgb_type_headw, ser_tgb_type_dummy,
def_sl_min=def_sl_min)
# calculate slope for routing
ser_ch_gef = tc.calc_ch_sl(df_ch_zdif.loc[:, 'ch_zdif'], df_fl.loc[:, 'length'],
ser_tgb_type_routing,
def_sl_excl_quant=def_sl_excl_quant)
# calculate runoff concentration parameters
if print_out: print('...calculate runoff concentration parameters...')
df_roconc_params = calc_roconc_params(df_ch_zdif.lower_corr,
df_tgb_in.loc[:, field_dem_max_mr],
df_fl_mr.corr_down, df_fl.length,
ser_tgb_type_headw, ser_tgb_type_dummy,
cellsz, def_zmin_rout_fac=def_zmin_rout_fac,
def_zmax_fac=def_zmax_fac)
# calculate routing parameters
if print_out: print('...calculate routing parameters...')
# calculate channel-forming discharge
ser_ch_form_q = tc.calc_ch_form_q(df_tgb_in.loc[:, f_tgb_a], df_tgb_in.loc[:, f_tgb_down],
q_spec=q_spec_ch, ser_q_in_corr=ser_q_in_corr)
# calculate tripel trapezoid river cross section
df_ttp = tc.calc_ttp(ser_ch_form_q, ser_tgb_type_routing, ch_est_method=ch_est_method,
def_bx=def_bx, def_bbx_fac=def_bbx_fac, def_bnm=def_bnm,
def_bnx=def_bnx, def_bnvrx=def_bnvrx,
def_skm=def_skm, def_skx=def_skx)
# calculate informative parameters
if print_out: print('...calculate informative parameters...')
# calculate inflow catchment size informative value
ser_area_outfl = df_tgb_in.loc[:, f_tgb_a] + (cellsz**2) / (10**6)
ser_area_outfl.at[~ser_tgb_type_routing] = 0
# create names of elements
ser_nrflv = pd.Series(df_tgb_in.shape[0]*'', index=df_tgb_in.index, name=f_nrflv)
for tgb, el_type in df_tgb_in.loc[:, f_tgb_type].iteritems():
ser_nrflv.at[tgb] = '{0:s}{1:05d}'.format(el_type[0].upper(), tgb)
# calculate cell area value (FT)
ser_ft = pd.Series(np.zeros(ser_tgb_type_dummy.shape),
index=ser_tgb_type_dummy.index, name=f_ft)
ser_ft[~ser_tgb_type_dummy] = (cellsz**2) / (10**6)
# summarize information to data frame
if print_out: print('...summarize information...')
# summarize Series to DataFrame
fields = ['TGB','NRFLV','FT','HUT','HOT','TAL','X','Y','KMU','KMO','GEF',
'HM','BM','BL','BR','BBL','BBR','BNM','BNL','BNR','BNVRL','BNVRR',
'SKM','SKL','SKR','Kommentar_EZG-A','Kommentar_GBA']
df_data_tgbdat = pd.concat([df_tgb_in.loc[:, f_tgb], ser_nrflv, ser_ft, df_roconc_params,
df_tgb_in.loc[:, f_x].astype(np.int),
df_tgb_in.loc[:, f_y].astype(np.int),
df_cum_ch_fl, ser_ch_gef, df_ttp, ser_area_outfl,
ser_ch_form_q], axis=1)
df_data_tgbdat.columns=fields
# correct data for headwater and dummy catchments
df_data_tgbdat.at[ser_tgb_type_headw,
['GEF', 'HM','BM','BL','BR','BBL','BBR','BNM',
'BNL','BNR','BNVRL','BNVRR','SKM','SKL','SKR']] = np.nan
df_data_tgbdat.at[ser_tgb_type_dummy,
['HUT','HOT','TAL','GEF','HM','BM','BL','BR','BBL','BBR',
'BNM','BNL','BNR','BNVRL','BNVRR','SKM','SKL','SKR']] = np.nan
return df_data_tgbdat, ser_tgb_down_nd, ser_ft, ser_area_outfl, ser_ch_form_q
# %% create cross section lines
def create_csl(path_fnw, path_ws_s, path_fa_hr,
def_cs_dist, def_cs_wmax_eval,
path_gdb_out, name_csl='csl', name_fnw_o='fnw_o',
def_river_a_in=0, def_cs_search_rad=5, print_out=False):
"""
This function creates cross section lines with user defined spacing
inbetween the lines and length of the cross sections perpendicular to the
flow path. Additionally, the user may define a minimum catchment size from
which cross sections are calculated. The function saves a polyline feature
class containing the cross section lines and another one containing a
copy of the input flow network, which is used as reference for other
functions.
JM 2021
Arguments:
-----------
path_fnw: str (e.g., 'c:\fnw.shp')
path of the flow network feature class or shape file
path_ws_s: str (e.g., 'c:\model_creation\ws_s')
path of the model watershed domain polygon feature class
path_fa_hr: str (e.g., 'c:\model_creation\fa_hr')
path of the output extracted high resolution flow accumulation raster
def_cs_dist: int (e.g., 200) [m]
distance between transects along the flow path
def_cs_wmax_eval: int (e.g., 600) [m]
length of transects (incl. both sides of river)
path_gdb_out: str (e.g., 'c:\model_creation.gdb')
path of the output file geodatabase
name_csl: str (optional, default: 'csl')
name of the output cross section line feature class
name_fnw_o: str (optional, default: 'fnw_o')
name of reference flow network polyline feature class (copy of fnw)
def_river_a_in: float (optional, default: 0) [km²]
minimum inflowing catchment area defining a flow path where cross
sections are created
def_cs_search_rad: int (optional, default: 5) [m]
search radius for transect identification
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
Saves the cross section line feature class.
"""
# definitions
# set intermediate feature class and raster names
name_fnw = os.path.split(path_fnw)[1]
name_fa_fnw_seg = 'fa_fnw_seg'
name_fnw_c = 'fnw_c'
name_fnw_dissolve = 'fnw_dissolve'
name_fnw_inters = 'fnw_inters'
name_fnw_gen_p = 'fnw_gen_p'
name_fnw_seg = 'fnw_seg'
name_fnw_seg_id = 'fnw_seg_id'
name_fnw_seg_id_jt = 'fnw_seg_id_jt'
name_fnw_seg_gen_p = 'fnw_seg_gen_p'
name_fnw_seg_buf = 'fnw_seg_buf'
name_fnw_seg_pfa = 'fnw_seg_pfa'
name_fnw_seg_gen_p_maxfat = 'fnw_seg_gen_p_maxfat'
name_fnw_unspl = 'fnw_unspl'
# internal field names
f_fnw_fid = name_fnw + '_fid'
f_fnw_seg_fid = name_fnw_seg + '_fid'
f_fnw_o_fid = name_fnw_o + '_fid'
f_fnw_o_rp_id = name_fnw_o + '_rp_fid'
# arcpy field names
f_shp_l = 'Shape_Length'
f_oid = 'OBJECTID'
f_join_fid = 'JOIN_FID'
f_target_fid = 'TARGET_FID'
f_orig_fid = 'ORIG_FID'
f_val = 'Value'
# arcpy method names
method_first = 'FIRST'
method_max = 'MAX'
# calculations
# set workspace
arcpy.env.workspace = path_gdb_out
# allow overwriting the outputs
arcpy.env.overwriteOutput = True
# Pre-process flow network
if print_out: print('...pre-process flow network...')
# clip flow network at model watershed domain
arcpy.analysis.Clip(path_fnw, path_ws_s, name_fnw_c, None)
# dissolve features
arcpy.management.Dissolve(
name_fnw_c, name_fnw_dissolve, '', None, 'SINGLE_PART', 'DISSOLVE_LINES')
# merge features between confluence points respecting attributes
arcpy.management.FeatureToLine(
name_fnw_dissolve, name_fnw_inters, '', 'ATTRIBUTES')
# Create flow network line segments
if print_out: print('...create flow network line segments...')
# generate points along flow network
arcpy.GeneratePointsAlongLines_management(
name_fnw_inters, name_fnw_gen_p, 'DISTANCE', int(def_cs_dist / 2), '', '')
# split flow network lines at generated points
# def_pt_search_rad = 1 # [m]
arcpy.SplitLineAtPoint_management(
name_fnw_inters, name_fnw_gen_p, name_fnw_seg, None)
# '{0:d} Meters'.format(def_pt_search_rad)
# Get original Object-IDs from FGN1, join them and calculate buffer
if print_out: print('...calculate buffer...')
arcpy.SpatialJoin_analysis(
name_fnw_seg, name_fnw_inters, name_fnw_seg_id,
'JOIN_ONE_TO_MANY', 'KEEP_ALL',
'{0} {0} false true true 8 Double 0 0, First, #, {1}, {0}, -1, -1;'.format(
f_shp_l, name_fnw_seg), 'WITHIN', '', '')
stat_expr = '{0} {1}'.format(f_join_fid, method_first)
arcpy.analysis.Statistics(
name_fnw_seg_id, name_fnw_seg_id_jt, stat_expr, f_target_fid)
f_first_jfid = '{0}_{1}'.format(method_first, f_join_fid)
arcpy.management.AlterField(
name_fnw_seg_id_jt, f_first_jfid, f_fnw_fid, '', '', '',
'NULLABLE', 'CLEAR_ALIAS')
arcpy.management.AlterField(
name_fnw_seg_id_jt, f_target_fid, f_fnw_seg_fid, '', '', '',
'NULLABLE', 'CLEAR_ALIAS')
arcpy.management.JoinField(
name_fnw_seg, f_oid, name_fnw_seg_id_jt, f_fnw_seg_fid, f_fnw_fid)
arcpy.analysis.Buffer(
name_fnw_seg, name_fnw_seg_buf, str(def_cs_search_rad),
'FULL', 'FLAT', 'NONE', '', 'PLANAR')
# Calculate zonal statistics for maximum flow accumulation per eval. point,
# create evaluation points, make table out of it and delete not necessary fields
if print_out: print('...calculate zonal statistics...')
fa_fnw_seg = arcpy.ia.ZonalStatistics(
name_fnw_seg_buf, f_orig_fid, path_fa_hr, 'MAXIMUM', 'DATA')
fa_fnw_seg.save(name_fa_fnw_seg)
arcpy.management.GeneratePointsAlongLines(
name_fnw_seg, name_fnw_seg_gen_p, 'DISTANCE',
str(def_cs_search_rad + 2), '', '')
arcpy.ga.ExtractValuesToTable(
name_fnw_seg_gen_p, name_fa_fnw_seg, name_fnw_seg_pfa, '', 'ADD_WARNING_FIELD')
arcpy.management.DeleteField(name_fnw_seg_pfa, 'SrcID_Rast')
# Join original evaluation point object ID, rename field and calculate maximum
# flow accumulation per segment
if print_out: print('...calculate maximum flow accumulation per segment...')
arcpy.JoinField_management(
name_fnw_seg_gen_p, f_oid, name_fnw_seg_pfa, 'SrcID_Feat', f_val)
arcpy.AlterField_management(
name_fnw_seg_gen_p, f_orig_fid, f_fnw_seg_fid, '', 'LONG', '4',
'NULLABLE', 'CLEAR_ALIAS')
stat_expr = '{0} {1}'.format(f_val, method_max)
arcpy.Statistics_analysis(
name_fnw_seg_gen_p, name_fnw_seg_gen_p_maxfat,
stat_expr, f_fnw_seg_fid)
# Join original segment object ID, select elements smaller flow accumulation threshold,
# delete them and merge lines to segments between confluence points
if print_out: print('...create line segments between confluence points...')
f_max_val = '{0}_{1}'.format(method_max, f_val)
arcpy.JoinField_management(
name_fnw_seg, f_oid, name_fnw_seg_gen_p_maxfat, f_fnw_seg_fid, f_max_val)
# get high resolution cell size
cell_sz_x_obj = arcpy.GetRasterProperties_management(path_fa_hr, 'CELLSIZEX')
cell_sz_x = np.int32(cell_sz_x_obj.getOutput(0))
def_river_cellnb = np.int64(def_river_a_in * 10**6 / cell_sz_x)
sel_expr = '"{0:s}" >= {1:d}'.format(f_max_val, def_river_cellnb)
fnw_seg_sel = arcpy.SelectLayerByAttribute_management(
name_fnw_seg, 'NEW_SELECTION', sel_expr)
arcpy.UnsplitLine_management(fnw_seg_sel, name_fnw_unspl, f_fnw_fid, '')
# if field f_fnw_orig_fid does not exist, create and calculate it
if not arcpy.ListFields(name_fnw_inters, f_fnw_o_fid):
arcpy.AddField_management(
name_fnw_unspl, f_fnw_o_fid, 'LONG', '', '', '', '',
'NULLABLE', 'NON_REQUIRED', '')
arcpy.CalculateField_management(
name_fnw_unspl, f_fnw_o_fid, '!{0}!'.format(f_oid), 'PYTHON3', '')
# Generate transects along line features, which are longer than defined
# distance between transects
if print_out: print('...create transects along line features...')
sel_expr = '{0:s} > {1:d}'.format(f_shp_l, def_cs_dist)
arcpy.SelectLayerByAttribute_management(
name_fnw_unspl, 'NEW_SELECTION', sel_expr, '')
arcpy.CopyFeatures_management(name_fnw_unspl, name_fnw_o, '', '', '', '')
arcpy.GenerateTransectsAlongLines_management(
name_fnw_o, name_csl, str(def_cs_dist), str(def_cs_wmax_eval), 'NO_END_POINTS')
arcpy.AlterField_management(
name_csl, f_orig_fid, f_fnw_o_rp_id, '', '', '4', 'NULLABLE', 'CLEAR_ALIAS')
# %% create profile.dat based on user-defined cross sections
def df_profdat_from_cs(
path_fnw, path_fnw_o, path_csl, path_dem_hr, path_fa_hr, path_tgb_s,
ser_tgb_down, ser_tgb_down_nd, ser_tgb_type_routing,
ser_ch_form_q, ser_pef_bm, ser_pef_hm,
path_gdb_out, name_profile_par='profile_par',
name_ch_fit_coords='ch_fit_coords', name_bcwsl='bcwsl',
def_cs_wmax_eval=600, def_cs_intp_buf_dist=1, def_ch_w=0.5, def_ch_h=0.5,
def_ch_wres=0.05, def_cs_hmax_eval=10, def_lam_hres=0.1,
def_ch_vmin=0.5, def_ch_vmax=3.0, def_ch_wmax_eval=40,
def_chbank_slmin=0.1, def_ch_hmin=0.2, def_ch_hmin_eval=0.1,
def_profdat_decmax=2,
ctrl_show_plots=False, ctrl_save_plots=False,
ser_ft=None, ser_area_outfl=None,
def_ch_hmax_eval=None, path_plots_out=None,
print_out=False):
"""
This function extracts and summarizes all parameters for user-defined cross
sections and returns a DataFrame of them. Additionally, a Series is returned,
that represents the cross section line IDs, which are allocated to every
routing model element.
JM 2021
Arguments:
-----------
path_fnw: str (e.g., 'c:\model_creation.gdb\fnw.shp')
path of the flow network feature class or shape file
path_fnw_o: str (e.g., 'c:\model_creation.gdb\fnw_o')
path of reference flow network polyline feature class (copy of fnw)
path_csl: str (e.g., 'c:\model_creation.gdb\csl')
path of the output cross section line feature class
path_dem_hr: str (e.g., 'c:\model_creation.gdb\dem_hr')
path of the output watershed polygon feature class
path_fa_hr: str (e.g., 'c:\model_creation.gdb\fa_hr')
path of the output extracted high resolution flow accumulation raster
path_tgb_s: str (e.g., 'c:\model_creation.gdb\tgb_s')
path of the output model element polygon feature class
ser_rout_tgb: pandas.Series
series of routing type model element ID numbers
ser_tgb_down: pandas.Series
Series of downstream model element indices corresponding to the serie's
ascending index. The last value is outlet, identified with a zero.
The outlet will be neglected in calculations.
(e.g., pd.Series([2, 4, 4, 0], index=[1, 2, 3, 4], name='tgb_down'))
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_type_routing: pandas.Series
Boolean Series, which identifies the routing cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 1, 1], index=[1, 2, 3, 4],
name='routing', dtype='bool'))
ser_ch_form_q: pandas.Series
Series of elements' channel-forming discharge at the corresponding
model element ID in the serie's index.
ser_pef_bm, ser_pef_hm: pandas.Series
Series of estimated channel width ('bm') and depth ('wm')
path_gdb_out: str (e.g., 'c:\model_creation.gdb')
path of the output file geodatabase
name_profile_par: str (optional, default: 'profile_par')
name of parameter table for bankful discharge
name_ch_fit_coords: str (optional, default: 'ch_fit_coords')
name of coordinate table for bankful discharge line
name_bcwsl: str (optional, default: 'bcwsl')
name of bankful channel water surface line
def_cs_wmax_eval: int (optional, default: 600) [m]
Length of the automatically generated cross sections perpendicular
to the flow accumulation flow network. It should cover the valley
at least until the estimated maximum water depth.
def_cs_hmax_eval: float (optional, default: 10) [m]
maximum height of cross section evaluation
def_ch_wmax_eval: float (optional, default: 40) [m]
estimated maximum channel width
def_ch_hmin_eval: float (optional, default: 0.1) [m]
minimum height of channel evaluation
def_ch_hmax_eval: float (optional, default: None)
maximum height of channel evaluation (used to limit y-axis of plot)
def_ch_hmin: float (optional, default: 0.2, must be >= 0.2) [m]
minimum channel depth threshold for channel identification
def_ch_vmin: float (optional, default: 0.5) [m/s]
minimum reasonable flow velocity
def_ch_vmax: float (optional, default: 3.0) [m/s]
maximum reasonable flow velocity
def_chbank_slmin: float (optional, default: 0.1) [dH/dL]
minimum riverbank slope threshold for channel identification
def_ch_wres: float (optional, default: 0.05)
horizontal resolution of interpolated points within channel
def_lam_hres: float (optional, default: 0.1) [m]
spacing between evaluation lamellae
def_profdat_decmax: int (optional, default: 2) [-]
decimal numbers allowed in the file profile.dat
def_cs_intp_buf_dist: int (e.g., 1) [m]
cross section intersection points' buffer distance
def_ch_w, def_ch_h: float (optional, default: 0.5, 0.5) [m]
artificial channel width (w) and depth (h), added to continuiously
descending cross sections
ctrl_show_plots: boolean (optional, default: False) [-]
(de-)activate pop-up of figures
ctrl_save_plots: boolean (optional, default: False) [-]
(de-)activate export of figures as files
ser_ft: pandas.Series (optional, default: None) [km²]
model element subcatchment area
ser_area_outfl: pandas.Series (optional, default: None) [km²]
sum of upstream model elements' area
path_plots_out: str (optional, default: None)
path where plots are stored (e.g., 'c:\model_creation\fig')
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
df_profdat_par: pandas.DataFrame
DataFrame containing all parameters calculated for a cross section
profile ID. These are:
- csl_fid: cross section feature ID
- h, a, p, wsl: water level depth (h) and width (wsl) as well as cross
section area (a) and wetted perimeter (p) of bankful discharge
- ll_ii: ID of the lamella determining the bankful discharge
- a_ll, p_ll, wslm_ll: mean water surface level width (wslm), cross
section area (a_ll) and wetted perimeter (p_ll) per lamellae
The index is the allocated model element ID (tgb).
ser_tgb_csl: pandas.Series
Series of cross section ID numbers, which are allocated to all
routing model elements in the model structure (index: tgb)
"""
# %% get cross section GIS data information
def get_cs_gisdata(path_fnw, path_fnw_o, path_csl, path_dem_hr, path_fa_hr,
path_tgb_s, df_tgb_in, path_gdb_out,
def_cs_wmax_eval=600, def_cs_intp_buf_dist=1,
print_out=False):
"""
This function intersects cross section lines with the model element
polygons and the flow network (mid-points of cross sections and
non-intended intersections with other channels of the flow network).
Furthermore, it collects necessary elevation information for the cross
section lines itself and the created intersection points.
The function returns pandas.DataFrames with the collected information.
JM 2021
Arguments:
-----------
path_fnw: str (e.g., 'c:\fnw.shp')
path of the flow network feature class or shape file
path_fnw_o: str (e.g., 'c:\model_creation\fnw_o')
path of reference flow network polyline feature class (copy of fnw)
path_csl: str (e.g., 'c:\model_creation\csl')
path of the output cross section line feature class
path_dem_hr: str (e.g., 'c:\model_creation\dem_hr')
path of the output watershed polygon feature class
path_fa_hr: str (e.g., 'c:\model_creation\fa_hr')
path of the output extracted high resolution flow accumulation raster
path_tgb_s: str (e.g., 'c:\model_creation\tgb_s')
path of the output model element polygon feature class
ser_rout_tgb: pandas.Series
series of routing type model element ID numbers
path_gdb_out: str (e.g., 'c:\model_creation.gdb')
path of the output file geodatabase
def_cs_wmax_eval: int (optional, default: 600) [m]
length of transects (incl. both sides of river)
def_cs_intp_buf_dist: int (optional, default: 1) [m]
cross section intersection points' buffer distance
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
df_csp: pandas.DataFrame
DataFrame containing all information for the cross section line:
- csl_fid: cross section line feature ID
- x, y, z: X-, Y-, and Z-coordinates for points along the cross section
df_intp: pandas.DataFrame
DataFrame containing all information for the cross sections'
intersection points with potential other flow channel around:
- csl_fid: cross section line feature ID
- x, y, z: X-, Y-, and Z-coordinates for mid-point
- fa: flow accumulation value at intersection point
df_intp_tgb: pandas.DataFrame
DataFrame containing all information for the cross sections' mid-point
(is equal to the intersection point with the original flow channel):
- csl_fid: cross section line feature ID
- x, y, z: X-, Y-, and Z-coordinates for mid-point
- tgb: model element ID number
- fa: flow accumulation value at intersection point
"""
# workaround for bug in arcpy.ExtractMultiValuesToPoints if only one feature exists
def workar_extrmultvaltopt(path_dem_hr, path_gdb_out, name_cs_intp,
f_rasval='RASTERVALU', f_z='z'):
path_cs_intp = path_gdb_out + name_cs_intp
path_cs_intp_z = path_cs_intp + '_z'
arcpy.sa.ExtractValuesToPoints(name_cs_intp, path_dem_hr, path_cs_intp_z,
'NONE', 'VALUE_ONLY')
arcpy.management.AlterField(path_cs_intp_z, f_rasval, f_z, '', 'FLOAT', 4,
'NULLABLE', 'CLEAR_ALIAS')
arcpy.management.Delete(name_cs_intp)
arcpy.CopyFeatures_management(path_cs_intp_z, name_cs_intp, '', '', '', '')
arcpy.AddXY_management(name_cs_intp)
# definition section
# define paths of intermediates in working geodatabase
# cs: cross section
# ip: intersection point
# fnw: flow network
# csl: cross section line
# midp: mid-point
name_fnw = os.path.split(path_fnw)[1]
name_fnw_o = os.path.split(path_fnw_o)[1]
name_csl = os.path.split(path_csl)[1]
name_csl_bu = name_csl + '_bu' # backup copy of csl
name_cs_intp_mult = 'cs_intp_mult' # multi-part ips of cs with fnw
name_cs_intp = 'cs_intp' # single-part ips of cs with fnw
name_cs_midp_near_tab = 'cs_midp_near_tab' # table with closest ip to midp
name_cs_intp_buf = 'cs_inp_buf' # cs ips' buffer
name_fa_max_zonal = 'fa_max_zonal' # max fa zonal statistics raster at cs ips
name_cs_midp_minrank_tab = 'cs_midp_minrank_tab' # min distance rank per csl table
name_cs_midp = 'cs_midp' # cs midps
name_cs_intp_other = 'cs_inp_other' # cs ips with other stream section
name_cs_intp_same = 'cs_inp_same' # cs ips with same stream section
name_cs_intp_tgb = 'cs_intp_tgb' # cs ips with model cell ID number
name_csp = 'csp' # cs vertices including elevation values
name_csp_z_tab = 'csp_z_tab' # cs vertice table including z-values
# internal field names
f_fnw_fid = name_fnw + '_fid'
f_fnw_o_fid = name_fnw_o + '_fid'
f_fnw_o_rp_id = name_fnw_o + '_rp_fid'
f_fnw_o_ip_fid = name_fnw_o + '_ip_fid'
f_csl_fid = name_csl + '_fid'
f_csl_p_fid = 'csl_p_fid'
f_cs_midp_fid = name_cs_midp + '_fid'
f_cs_midp_o_fid = name_cs_midp + '_o_fid'
f_cs_midp_csp_dl = name_cs_midp + '_csp_dl'
f_cs_midp_csp_dlr = name_cs_midp + '_csp_dlr'
f_cs_midp_csp_dlrmin = name_cs_midp + '_csp_dlrmin'
f_csp_fid = name_csp + '_fid'
f_csp_c_fid = 'csp_c_fid'
f_fa = 'fa'
f_x = 'x'
f_y = 'y'
f_z = 'z'
# arcpy field names
f_oid = 'OBJECTID'
f_orig_fid = 'ORIG_FID'
f_val = 'Value'
f_in_fid = 'IN_FID'
f_near_fid = 'NEAR_FID'
f_near_dist = 'NEAR_DIST'
f_near_rank = 'NEAR_RANK'
f_rasval = 'RASTERVALU'
f_pt_x = 'POINT_X'
f_pt_y = 'POINT_Y'
f_pt_z = 'POINT_Z'
f_src_fid = 'SrcID_Feat'
# arcpy method names
method_min = 'MIN'
# calculation section
# set workspace
arcpy.env.workspace = path_gdb_out
# Calculate cross section line intersection points
if print_out: print('Calculate cross section line intersection points...')
arcpy.CopyFeatures_management(path_csl, name_csl_bu, '', '', '', '')
# Create midpoints of cross section lines and rename field
if print_out: print('...create midpoints of cross section lines...')
arcpy.FeatureVerticesToPoints_management(name_csl, name_cs_midp, 'MID')
arcpy.AlterField_management(name_cs_midp, f_orig_fid, f_cs_midp_fid,
'', '', '4', 'NULLABLE', 'CLEAR_ALIAS')
# Create intersection points of flow network and cross sections and rename fields
if print_out: print('...create intersection points of flow network and cross sections...')
arcpy.Intersect_analysis(r'{0} #;{1} #'.format(path_fnw_o, name_csl),
name_cs_intp_mult, 'ALL', '', 'POINT')
arcpy.AlterField_management(name_cs_intp_mult, f_fnw_o_fid, f_fnw_o_ip_fid,
'', '', '4', 'NULLABLE', 'CLEAR_ALIAS')
arcpy.MultipartToSinglepart_management(name_cs_intp_mult, name_cs_intp)
arcpy.AlterField_management(name_cs_intp, f_orig_fid, f_csp_c_fid,
'', 'LONG', '4', 'NULLABLE', 'CLEAR_ALIAS')
arcpy.AlterField_management(name_cs_intp, 'FID_{0}'.format(name_csl), f_csl_fid,
'', 'LONG', '4', 'NULLABLE', 'CLEAR_ALIAS')
# Find closest intersection point, rename fields and join table to midpoint feature class
if print_out: print('...find closest intersection point...')
arcpy.GenerateNearTable_analysis(
name_cs_midp, name_cs_intp, name_cs_midp_near_tab, str(def_cs_wmax_eval / 2),
'NO_LOCATION', 'NO_ANGLE', 'ALL', '0', 'PLANAR')
arcpy.AlterField_management(
name_cs_midp_near_tab, f_in_fid, f_cs_midp_o_fid,
'', '', '4', 'NULLABLE', 'CLEAR_ALIAS')
arcpy.AlterField_management(
name_cs_midp_near_tab, f_near_fid, f_csp_fid,
'', '', '4', 'NULLABLE', 'CLEAR_ALIAS')
arcpy.AlterField_management(
name_cs_midp_near_tab, f_near_dist, f_cs_midp_csp_dl,
'', '', '8', 'NULLABLE', 'CLEAR_ALIAS')
arcpy.AlterField_management(
name_cs_midp_near_tab, f_near_rank, f_cs_midp_csp_dlr,
'', '', '4', 'NULLABLE', 'CLEAR_ALIAS')
arcpy.JoinField_management(
name_cs_midp_near_tab, f_cs_midp_o_fid, name_cs_midp, f_oid, f_cs_midp_fid)
# Join closest intersection points' table to intersection points, rename field,
# and delete intersection points with other CSL
if print_out: print('...delete intersection points with other CSL...')
arcpy.JoinField_management(
name_cs_midp_near_tab, f_csp_fid, name_cs_intp, f_oid, f_csl_fid)
arcpy.AlterField_management(
name_cs_midp_near_tab, f_csl_fid, f_csl_p_fid,
'', '', '4', 'NULLABLE', 'CLEAR_ALIAS')
sel_expr = '{0} <> {1}'.format(f_cs_midp_fid, f_csl_p_fid)
cs_midp_near_tab_sel = arcpy.SelectLayerByAttribute_management(
name_cs_midp_near_tab, 'NEW_SELECTION', sel_expr, '')
arcpy.DeleteRows_management(cs_midp_near_tab_sel)
# Calculate buffer and zonal statistics of flow accumulation and extract values
# at intersection points
if print_out: print('...calculate zonal statistics of flow accumulation...')
arcpy.Buffer_analysis(
name_cs_intp, name_cs_intp_buf, str(def_cs_intp_buf_dist),
'FULL', 'ROUND', 'NONE', '', 'PLANAR')
fa_max_zonal = arcpy.ia.ZonalStatistics(
name_cs_intp_buf, f_oid, path_fa_hr, 'MAXIMUM', 'DATA')
fa_max_zonal.save(name_fa_max_zonal)
arcpy.gp.ExtractMultiValuesToPoints_sa(
name_cs_intp, r'{0} {1}'.format(name_fa_max_zonal, f_fa), 'NONE')
# Join distance rank of intersection points to rank table and copy intersection
# points with another flow network polyline to feature class
if print_out: print('...define intersection points with another CSL...')
arcpy.JoinField_management(
name_cs_intp, f_oid, name_cs_midp_near_tab, f_csp_fid, f_cs_midp_csp_dlr)
sel_expr = '{0} <> {1}'.format(f_fnw_o_ip_fid, f_fnw_o_rp_id)
cs_intp_ir_sel = arcpy.SelectLayerByAttribute_management(
name_cs_intp, 'NEW_SELECTION', sel_expr, '')
arcpy.CopyFeatures_management(cs_intp_ir_sel, name_cs_intp_other, '', '', '', '')
arcpy.DeleteRows_management(cs_intp_ir_sel)
# calculate minimum distance rank per cross section line, alter field,
# and join minimum rank to intersection points
# copy intersection points with identical flow network polyline to feature class
if print_out: print('...define intersection points with identical CSL...')
stat_expr = '{0} {1}'.format(f_cs_midp_csp_dlr, method_min)
arcpy.Statistics_analysis(
name_cs_intp, name_cs_midp_minrank_tab, stat_expr, f_csl_fid)
f_min_cs_midp_csp_dlr = '{0}_{1}'.format(method_min, f_cs_midp_csp_dlr)
arcpy.AlterField_management(
name_cs_midp_minrank_tab, f_min_cs_midp_csp_dlr, f_cs_midp_csp_dlrmin,
'', '', '8', 'NULLABLE', 'CLEAR_ALIAS')
arcpy.JoinField_management(
name_cs_intp, f_csl_fid, name_cs_midp_minrank_tab, f_csl_fid, f_cs_midp_csp_dlrmin)
sel_expr = '{0} <> {1}'.format(f_cs_midp_csp_dlr, f_cs_midp_csp_dlrmin)
cs_intp_sel_rmin = arcpy.SelectLayerByAttribute_management(
name_cs_intp, 'NEW_SELECTION', sel_expr, '')
arcpy.CopyFeatures_management(cs_intp_sel_rmin, name_cs_intp_same, '', '', '', '')
arcpy.DeleteRows_management(cs_intp_sel_rmin)
# intersect TGB shapes with cross section points and calculate TGB-ID field
if print_out: print('...add model cell ID to intersection points...')
arcpy.Intersect_analysis(
r'{0} #;{1} #'.format(name_cs_intp, path_tgb_s), name_cs_intp_tgb,
'NO_FID', '', 'POINT')
# Get X-,Y-, and Z-coordinates for crossection points
if print_out: print('...get x-, y-, and z-coordinates for crossection points...')
# If Z-Field Exists, Drop Field
if arcpy.ListFields(name_cs_intp_other, f_z):
arcpy.DeleteField_management(name_cs_intp_other, f_z)
if arcpy.ListFields(name_cs_intp_same, f_z):
arcpy.DeleteField_management(name_cs_intp_same, f_z)
if arcpy.ListFields(name_cs_intp_tgb, f_z):
arcpy.DeleteField_management(name_cs_intp_tgb, f_z)
# Extract Multi Values to Z-field in Intersection Points
# workaround for bug in ExtractMultiValuesToPoints if only one feature exists
# arcpy.sa.ExtractMultiValuesToPoints(name_cs_intp_other, path_dem_hr + ' Z', 'NONE')
# arcpy.sa.ExtractMultiValuesToPoints(name_cs_intp_same, path_dem_hr + ' Z', 'NONE')
# arcpy.sa.ExtractMultiValuesToPoints(name_cs_intp_tgb, path_dem_hr + ' Z', 'NONE')
workar_extrmultvaltopt(path_dem_hr, path_gdb_out, name_cs_intp_other,
f_rasval=f_rasval, f_z=f_z)
workar_extrmultvaltopt(path_dem_hr, path_gdb_out, name_cs_intp_same,
f_rasval=f_rasval, f_z=f_z)
workar_extrmultvaltopt(path_dem_hr, path_gdb_out, name_cs_intp_tgb,
f_rasval=f_rasval, f_z=f_z)
if print_out: print('Create Points along Cross-section...')
# Generate Points Along Lines
if print_out: print('...generate points along lines...')
cell_sz_x_obj = arcpy.GetRasterProperties_management(path_dem_hr, 'CELLSIZEX')
cell_sz_x = np.int32(cell_sz_x_obj.getOutput(0))
arcpy.GeneratePointsAlongLines_management(
path_csl, name_csp, 'DISTANCE', str(cell_sz_x), '', 'END_POINTS')
arcpy.AlterField_management(
name_csp, f_orig_fid, f_csl_fid, '', '', '', 'NULLABLE', 'CLEAR_ALIAS')
# Extract Elevation Values to Z-field in Cross Section Points
if print_out: print('...add Z-coordinate...')
arcpy.ExtractValuesToTable_ga(
name_csp, path_dem_hr, name_csp_z_tab, '', 'ADD_WARNING_FIELD')
arcpy.JoinField_management(
name_csp, f_oid, name_csp_z_tab, f_src_fid, f_val)
arcpy.AlterField_management(
name_csp, f_val, f_z, '', '', '', 'NULLABLE', 'CLEAR_ALIAS')
# Add XY Coordinates
if print_out: print('...add X- and Y-coordinates...')
arcpy.AddXY_management(name_csp)
arcpy.DeleteField_management(name_csp, f_fnw_o_rp_id)
drop_f_expr = ';'.join([f_cs_midp_csp_dlrmin, f_fnw_o_ip_fid,
f_fnw_o_rp_id, f_fnw_fid, f_csp_c_fid, f_pt_z])
arcpy.DeleteField_management(name_cs_intp_other, drop_f_expr)
drop_f_expr += ';{0}'.format(f_cs_midp_csp_dlr)
arcpy.DeleteField_management(name_cs_intp_same, drop_f_expr)
arcpy.DeleteField_management(name_cs_intp_tgb, drop_f_expr)
# import cross section information
if print_out: print('...import cross section data...')
# import cross section tables
csp_fields = [f_csl_fid, f_oid, f_pt_x, f_pt_y, f_z]
arr_csp = arcpy.da.FeatureClassToNumPyArray(name_csp, csp_fields)
cs_intp_fields = csp_fields + [f_fa]
arr_intp_other = arcpy.da.FeatureClassToNumPyArray(name_cs_intp_other, cs_intp_fields)
arr_intp_same = arcpy.da.FeatureClassToNumPyArray(name_cs_intp_same, cs_intp_fields)
cs_intp_tgb_fields = cs_intp_fields + [f_tgb]
arr_intp_tgb = arcpy.da.FeatureClassToNumPyArray(name_cs_intp_tgb, cs_intp_tgb_fields)
# convert structured arrays to pandas DataFrames
csp_fields.remove(f_oid)
df_csp = pd.DataFrame(arr_csp[csp_fields],
index=arr_csp[f_oid]).rename(
columns={f_pt_x: f_x, f_pt_y: f_y})
cs_intp_tgb_fields.remove(f_oid)
df_intp_tgb = pd.DataFrame(arr_intp_tgb[cs_intp_tgb_fields],
index=arr_intp_tgb[f_oid]).rename(
columns={f_pt_x: f_x, f_pt_y: f_y})
cs_intp_fields.remove(f_oid)
df_intp_other = pd.DataFrame(arr_intp_other[cs_intp_fields],
index=arr_intp_other[f_oid]).rename(
columns={f_pt_x: f_x, f_pt_y: f_y})
df_intp_same = pd.DataFrame(arr_intp_same[cs_intp_fields],
index=arr_intp_same[f_oid]).rename(
columns={f_pt_x: f_x, f_pt_y: f_y})
df_intp = pd.concat([df_intp_other, df_intp_same], axis=0)
# sort lists by cross section line ID
df_csp = df_csp.sort_values(f_csl_fid, axis=0).astype(
{f_csl_fid: np.int})
df_intp = df_intp.sort_values(f_csl_fid, axis=0).astype(
{f_csl_fid: np.int, f_fa: np.int})
df_intp_tgb = df_intp_tgb.sort_values(f_csl_fid, axis=0).astype(
{f_csl_fid: np.int, f_fa: np.int, f_tgb: np.int})
# control input data
error_str = ''
# CSL centered in a wrong type of model cell (e.g. head water cell)
tgb_cs_un, tgb_cs_un_count = np.unique(df_intp_tgb.loc[:, f_tgb], return_counts=True)
tgb_cs_val = np.isin(tgb_cs_un, ser_rout_tgb)
error_wrong_tgb_typ = np.any(~tgb_cs_val)
if error_wrong_tgb_typ:
tgb_cs_not_val = tgb_cs_un[~tgb_cs_val]
error_str = error_str + ('ERROR: There is/are {0:d} profile/s which '
'is/are centered in a wrong model cell type. '
'It could be a head water cell.\n'
'TGB-ID(s):\n').format(tgb_cs_not_val.shape[0])
for id_wr in tgb_cs_not_val:
error_str = error_str + ' {0:d}\n'.format(int(id_wr))
# CSL with wrong number of intersection points
csl_un, csl_un_count = np.unique(df_intp_tgb.loc[:, f_csl_fid], return_counts=True)
error_csl_wrong_intp = np.any(csl_un_count != 1)
if error_csl_wrong_intp:
csl_id_wrong = np.int64(csl_un[csl_un_count != 1])
error_str = error_str + ('ERROR: There is/are {0:d} profile/s which '
'has/have wrong number of intersection points. '
'The distance of CSL-FNW intersection point '
'to a model cell boarder could be less than 0.01m.\n'
'CSL-ID(s):\n').format(csl_id_wrong.shape[0])
for id_wr, nb_wr in zip(csl_id_wrong, csl_un_count[csl_un_count != 1]):
error_str = error_str + ' {0:d} ({1:d} intersection points)\n'.format(id_wr, nb_wr)
# profiles with no or wrong intersecting FNW element
error_csl_nowrong_fnw = df_intp_tgb.shape[0] != np.unique(df_csp.loc[:, f_csl_fid]).shape[0]
if error_csl_nowrong_fnw:
csl_id_missing = np.int64(np.setdiff1d(
|
np.unique(df_csp.loc[:,f_csl_fid])
|
numpy.unique
|
"""
Test Similarity
"""
import numpy as np
import pytest
from wav2rec.core.similarity import cosine_similarity, similarity_calculator
@pytest.mark.parametrize(
"x1,x2,expected",
[
(np.array([0.0, 0.0]), np.array([0.0, 0.0]), 0.0),
(np.array([0.0, 0.5]), np.array([0.0, 1.0]), 0.5),
(np.array([1.0, 0.0]), np.array([1.0, 0.0]), 1.0),
],
)
def test_cosine_similarity(x1: np.ndarray, x2: np.ndarray, expected: float) -> None:
sim = cosine_similarity(x1, x2)
assert np.isclose(sim, expected)
@pytest.mark.parametrize(
"X_query,X_neighbours,expected",
[
(np.array([[0.0, 0.0]]), np.array([[[0.0, 0.0]]]), np.array([[0.0]])),
(np.array([[0.0, 0.5]]), np.array([[[0.0, 1.0]]]), np.array([[0.5]])),
(
|
np.array([[1.0, 0.0]])
|
numpy.array
|
# ----------------------------------------
# Compute the full 5000 member ensemble of
# RSL and rad solutions for all products,
# and also compute basin-mean rsl trends
# ===========
# For each ensemble member:
# 1. Compute annual GRACE grd solution for
# - GrIS, AIS, TWS, Glaciers
# 2. Combine with ensemble members to get
# full fingerprint
# ----------------------------------------
from netCDF4 import Dataset
import numpy as np
import os
import pySLE
import multiprocessing as mp
import datetime as dt
import mod_gentools as gentools
import ctypes as ct
from scipy.interpolate import interp1d
def main():
global settings
set_settings()
prepare_grace() # Read GRACE/GRACE FO data
prepare_glacier_grace()
prepare_glacier_insitu()
prepare_icesheets()
prepare_tws()
set_random_numbers()
# Run ensembles
pool = mp.Pool(settings['nproc'])
out = pool.map(compute_ensemble_member, range(settings['num_ens']))
#out = pool.map(compute_ensemble_member, range(nstart,nstop))
return
def set_settings():
global settings
settings = {}
if os.uname().nodename == 'MT-110180': settings['nproc'] = 4
else: settings['nproc'] = 40
settings['test_run_ICE6G_D'] = True
settings['dir_data'] = os.getenv('HOME')+'/Data/'
# Directories
settings['dir_gwd_prep'] = settings['dir_data'] + 'Budget_20c/grd_prep/'
settings['dir_glacier_zemp'] = settings['dir_data'] + 'Glaciers/Zemp_2019/'
if settings['test_run_ICE6G_D']:
settings['dir_grd_save'] = settings['dir_data'] + 'Budget_20c/grd_ICE6G/'
else:
settings['dir_grd_save'] = settings['dir_data'] + 'Budget_20c/grd/'
# Files
settings['fn_mask'] = settings['dir_data'] +'Budget_20c/grd_prep/mask.npy'
settings['fn_love'] = settings['dir_data'] + 'Budget_20c/grd_prep/love.npy'
if settings['test_run_ICE6G_D']:
settings['fn_grace'] = settings['dir_data'] + 'Budget_20c/grd_prep/ewh_GRACE_annual.nc'
else:
settings['fn_grace'] = settings['dir_data'] + 'Budget_20c/grd_prep/ewh_GRACE_annual_noGIA.nc'
settings['fn_mascon_coords'] = settings['dir_data'] + 'GRACE/JPL_mascon/mascon_coords.npy'
settings['fn_gia_ewh'] = settings['dir_data'] + 'GIA/Caron/Ensemble/ewh_ens_05.nc'
settings['fn_marzeion_t'] = settings['dir_data']+'Glaciers/Marzeion_2015/data_for_thomas.txt'
settings['fn_marzeion_p'] = settings['dir_data']+'Glaciers/Marzeion_2015/data_marzeion_etal_update_2015_regional.txt'
settings['fn_parkes'] = settings['dir_data']+'Glaciers/Parkes2018/annual.csv'
settings['fn_grd_glacier_regions'] = settings['dir_data'] + 'Budget_20c/grd_prep/grd_glacier_regions.nc'
settings['fn_kjeldsen'] = settings['dir_data']+'IceSheets/Kjeldsen/Kjeldsen.csv'
settings['fn_bamber'] = settings['dir_data']+'IceSheets/Bamber_2018/Bamber-etal_2018.tab'
settings['fn_imbie'] = settings['dir_data']+'IceSheets/IMBIE/IMBIE2.txt'
settings['fn_mouginot'] = settings['dir_data']+'IceSheets/Mouginot_2019/GrIS_total.csv'
settings['fn_grd_dam'] = settings['dir_data']+'Budget_20c/grd_prep/grd_tws_dam.nc'
settings['fn_grd_gwd_wada'] = settings['dir_data']+'Budget_20c/grd_prep/grd_tws_gwd_wada.nc'
settings['fn_grd_gwd_doll'] = settings['dir_data']+'Budget_20c/grd_prep/grd_tws_gwd_doll.nc'
settings['tstart'] = dt.datetime.now().replace(microsecond=0)
settings['time'] = np.arange(1900,2019)
settings['time_grace'] = np.arange(2003,2019)
settings['time_insitu'] = np.arange(1900,2004)
settings['num_ens'] = 100
return
# -- Preparation routines --
def prepare_grace():
print(' Preparing GRACE data...')
global grace, settings
# Mask and mascon coordinates
mask = np.load(settings['fn_mask'],allow_pickle=True).all()
glb_mscn_coords = np.load(settings['fn_mascon_coords'],allow_pickle=True)
grace = {} # Global dict with GRACE data
file_handle = Dataset(settings['fn_grace'])
file_handle.set_auto_mask(False)
grace_time = file_handle.variables['years'][:]
grace_ewh = file_handle.variables['ewh'][np.in1d(grace_time,settings['time_grace']),:,:]
grace_ewh_ste = file_handle.variables['ewh_ste'][np.in1d(grace_time,settings['time_grace']),:,:]
file_handle.close()
# Save uncertainties in mascon coordinates
grace_mscn_map = np.zeros(mask['land'].shape, dtype=int)
grace_ewh_ste_mscn = np.zeros([len(settings['time_grace']), len(glb_mscn_coords)],dtype=np.float32)
for k in range(len(glb_mscn_coords)):
lat_acc =
|
np.where((mask['lat'] >= glb_mscn_coords[k, 0]) & (mask['lat'] < glb_mscn_coords[k, 1]))
|
numpy.where
|
# Filename: HCm_UV_v5.0.py
#####################
###### IMPORTS ######
#####################
import string
import numpy as np
import sys
#sys.stderr = open('errorlog.txt', 'w')
import warnings
warnings.filterwarnings("ignore")
#######################
###### FUNCTIONS ######
#######################
#Function for interpolation of grids
def interpolate(grid,z,zmin,zmax,n):
#Columns of the library
n_comments = 0
with open('Libraries_uv/C17_POPSTAR_1myr_uv.dat', 'r') as file1:
for line in file1:
if line[0] == '#':
n_comments += 1
auxiliar_labels = np.genfromtxt('Libraries_uv/C17_POPSTAR_1myr_uv.dat', dtype=None, names=True, encoding = 'ascii', skip_header=n_comments).dtype.names
ncol = len(auxiliar_labels)
vec = []
if z == 2:
label_z = 'logU'
if z == 1:
label_z = 'logCO'
if z == 0:
label_z = '12logOH'
type_list_names = []
for col in auxiliar_labels:
inter = 0
no_inter = 0
type_list_names.append((col, float))
for row in range(0,len(grid)):
if grid[label_z][row] < zmin or grid[label_z][row] > zmax: continue
if z == 2: x = '12logOH'; y = 'logCO'
if z == 1: x = '12logOH'; y = 'logU'
if z == 0: x = 'logCO'; y = 'logU'
if row == (len(grid)-1):
vec.append(grid[col][row])
no_inter = no_inter + 1
elif grid[x][row] < grid[x][row+1] or grid[y][row] < grid[y][row+1] :
vec.append(grid[col][row])
no_inter = no_inter + 1
else:
inter = inter + 1
for index in range(0,n):
i = grid[col][row]+(index)*(grid[col][row+1]-grid[col][row])/n
vec.append(i)
out_aux = np.transpose(np.reshape(vec,(-1,n*inter+no_inter)))
out = np.zeros(out_aux.shape[0], dtype=type_list_names)
for col_n in range(0, len(auxiliar_labels)):
out[auxiliar_labels[col_n]] = out_aux[:, col_n]
return out
################################
###### INITIAL ITERATIONS ######
################################
#Description of the code
print ('-------------------------------------------------')
print ('This is HII-CHI-mistry for UV version 5.0')
print ('See Perez-Montero, & Amorin (2017) for details')
print ('Insert the name of your input text file with some or all of the following columns:')
print (' Lya 1216')
print (' NV] 1239')
print (' CIV 1549')
print (' HeII 1640')
print (' OIII 1665')
print (' CIII 1909')
print (' Hb 4861')
print (' OIII 5007')
print ('in arbitrary units and reddening corrected. Each column must be given with labels for the lines and their corresponding flux errors.')
print ('-------------------------------------------------')
# Input file reading
if len(sys.argv) == 1:
if int(sys.version[0]) < 3:
input00 = raw_input('Insert input file name:')
else:
input00 = input('Insert input file name:')
else:
input00 = str(sys.argv[1])
try:
#Counting comments:
n_comments = 0
with open(input00, 'r') as file2:
for line in file2:
if line[0] == '#':
n_comments += 1
input0 = np.genfromtxt(input00,dtype=None,names=True, encoding = 'ascii', skip_header = n_comments)
print ('The input file is:'+input00)
except:
print ('Input file error: It does not exist or has wrong format')
sys.exit
print ('')
if input0.size == 1:
input1 = np.stack((input0,input0))
else:
input1 = input0
# Iterations for Montecarlo error derivation
if len(sys.argv) < 3:
n = 25
else:
n = int(sys.argv[2])
print ('The number of iterations for MonteCarlo simulation is: ',n)
print ('')
#############################################
###### SELECTION OF THE GRID OF MODELS ######
#############################################
#Interface with the user
print ('')
question = True
while question:
print ('-------------------------------------------------')
print ('Default SEDs')
print ('------------')
print ('(1) POPSTAR with Chabrier IMF, age = 1 Myr')
print ('(2) BPASS v.2.1 a_IMF = 1.35, Mup = 300, age = 1Myr with binaries')
print ('(3) AGN, double component, a(UV) = -1.0')
print ('')
print ('Other SED')
print ('---------')
print ('(4) Different library')
print ('-------------------------------------------------')
if int(sys.version[0]) < 3:
sed = raw_input('Choose SED of the models: ')
else:
sed = input('Choose SED of the models: ')
if sed == '1' or sed == '2' or sed == '3' or sed == '4': question = False
print ('')
#Further questions on the AGN models
if sed == '3':
#SLOPE ALPHA
question = True
while question:
if int(sys.version[0]) < 3:
alpha = raw_input('Choose value for alpha(OX) in the AGN models: [1] -0.8 [2] -1.2: ')
else:
alpha = input('Choose value for alpha(OX) in the AGN models: [1] -0.8 [2] -1.2: ')
if alpha == '1' or alpha == '2': question = False
print ('')
#FRACTION OF FREE ELECTRONS
question = True
while question:
if int(sys.version[0]) < 3:
efrac = raw_input('Choose stop criterion in the AGN models: [1] 2% free electrons [2] 98% free electrons: ')
else:
efrac = input('Choose stop criterion in the AGN models: [1] 2% free electrons [2] 98% free electrons: ')
if efrac == '1' or efrac == '2': question = False
#Presence or absence of dust in the models
question = True
while question:
if int(sys.version[0]) < 3:
grains = raw_input('Choose AGN models with [1] or without [2] dust grains: ')
else:
grains = input('Choose AGN models with [1] or without [2] dust grains: ')
if grains == '1' or grains == '2': question = False
print ('')
#Particular file introduced by the user
if sed == '4':
question = True
while question:
print ('Introduce name of the file containing the models. It must be located in the folder "Libraries_uv".')
print (' ')
if int(sys.version[0]) < 3:
new_library = raw_input('Name of file: ')
else:
new_library = input('Name of file: ')
#Searching for the file
try:
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+new_library, 'r') as file3:
for line in file3:
if line[0] == '#':
n_comments += 1
library_user = np.genfromtxt('Libraries_uv/'+new_library, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
print (' ')
print ('Loading library '+new_library+'. Checking correct format of the file.')
question = False
except:
print (' ')
print ('Library was not found in folder "Libraries_uv" or file does not exist.')
question = True
while question:
try:
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+new_library, 'r') as file4:
for line in file4:
if line[0] == '#':
n_comments += 1
library_user = np.genfromtxt('Libraries_uv/'+new_library, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
#Checking correct format:
#Counting comments:
n_comments = 0
with open('Libraries_uv/C17_POPSTAR_1myr_uv.dat', 'r') as file5:
for line in file5:
if line[0] == '#':
n_comments += 1
auxiliar_labels = np.genfromtxt('Libraries_uv/C17_POPSTAR_1myr_uv.dat', dtype=None, names=True, encoding = 'ascii', skip_header=n_comments).dtype.names
missing_labels = []
for label in auxiliar_labels:
if label in library_user.dtype.names:
continue
else:
missing_labels.append(label)
#Displaying message for the user:
print('Succesfully reading of the file')
if len(missing_labels) == 0:
print ('File presents the correct format')
question = False
else:
print ('File does not present the correct format. The following columns are missing:')
for need_label in missing_labels:
print('- '+need_label)
print ('More details on the correct format for the library are found in readme file.')
print (' ')
print ('Reintroduce name of the file with fixed format:')
print (' ')
if int(sys.version[0]) < 3:
new_library = raw_input('Name of file: ')
else:
new_library = input('Name of file: ')
except:
print ('Something went wrong while reading file. Please, reintroduce name of the file:')
print ('')
if int(sys.version[0]) < 3:
new_library = raw_input('Name of file: ')
else:
new_library = input('Name of file: ')
#Interpolation in the grid of models
question = True
print ('')
while question:
if int(sys.version[0]) < 3:
inter = raw_input('Choose models [0] No interpolated [1] Interpolated: ')
else:
inter = input('Choose models [0] No interpolated [1] Interpolated: ')
if inter == '0' or inter == '1': question = False
print ('')
sed = int(sed)
inter = int(inter)
alpha = int(alpha)
efrac = int(efrac)
grains = int(grains)
#POPSTAR MODEL
if sed==1:
file_lib = 'C17_POPSTAR_1myr_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file6:
for line in file6:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. No interpolation.'
print ('No interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
print ('')
res_CO = 0.125
elif inter == 1:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. Interpolation.'
print ('Interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.01dex for O/H and 0.0125dex for C/O.')
print ('')
res_CO = 0.125
#BPASS MODEL
elif sed==2:
file_lib = 'C17_BPASS_IMF135_mup300_1myr_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file7:
for line in file7:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr, with binaries. No interpolation.'
print ('No interpolation for the BPASS models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
print ('')
res_CO = 0.125
elif inter == 1:
sed_type = 'BPASS v.2.1, a_IMF = 1.35, M_up = 300, age = 1Myr. Interpolation.'
print ('Interpolation for the BPASS models is going to be used.')
print ('The grid has a resolution of 0.01dex for O/H and 0.0125dex for C/O.')
print ('')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -0.8, efrac = 2%, with dust grains
elif sed==3 and alpha ==1 and efrac == 1 and grains == 1:
file_lib = 'C17_AGN_alpha08_efrac02_CNfix_grains_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 2% with dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -0.8 with 2% free electrons and dust grains models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_CO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 2% and with dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -0.8, 2% free electrons and with dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -0.8, efrac = 2%, without dust grains
elif sed==3 and alpha ==1 and efrac == 1 and grains == 2:
file_lib = 'C17_AGN_alpha08_efrac02_CNfix_nograins_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 2% without dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -0.8 with 2% free electrons models without grains is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_CO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 2% and without dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -0.8, 2% free electrons and without dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -0.8, efrac = 98%, with dust grains
elif sed==3 and alpha ==1 and efrac == 2 and grains == 1:
file_lib = 'C17_AGN_alpha08_efrac98_CNfix_grains_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 98% with dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -0.8 with 98% free electrons and dust grains models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_CO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 98% and with dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -0.8, 98% free electrons and with dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -0.8, efrac = 98%, without dust grains
elif sed==3 and alpha ==1 and efrac == 2 and grains == 2:
file_lib = 'C17_AGN_alpha08_efrac98_CNfix_nograins_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 98% without dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -0.8 with 98% free electrons models without grains is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_CO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 98% and without dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -0.8, 98% free electrons and without dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -1.2, efrac = 2%, with dust grains
elif sed==3 and alpha ==2 and efrac == 1 and grains == 1:
file_lib = 'C17_AGN_alpha12_efrac02_CNfix_grains_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 2% with dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -1.2 with 2% free electrons and dust grains models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_CO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 2% and with dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -1.2, 2% free electrons and with dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -1.2, efrac = 2%, without dust grains
elif sed==3 and alpha ==2 and efrac == 1 and grains == 2:
file_lib = 'C17_AGN_alpha12_efrac02_CNfix_nograins_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 2% without dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -1.2 with 2% free electrons models without grains is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_CO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 2% and without dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -1.2, 2% free electrons and without dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -1.2, efrac = 98%, with dust grains
elif sed==3 and alpha ==2 and efrac == 2 and grains == 1:
file_lib = 'C17_AGN_alpha12_efrac98_CNfix_grains_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 98% with dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -1.2 with 98% free electrons and dust grains models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_CO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 98% and with dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -1.2, 98% free electrons and with dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_CO = 0.125
#AGN MODEL FOR alpha_OX = -1.2, efrac = 98%, without dust grains
elif sed==3 and alpha ==2 and efrac == 2 and grains == 2:
file_lib = 'C17_AGN_alpha12_efrac98_CNfix_nograins_uv.dat'
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 98% without dust grains. No interpolation.'
print ('No interpolation for the AGN a(ox) = -1.2 with 98% free electrons models without grains is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.')
res_NO = 0.125
elif inter == 1:
sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 98% and without dust grains. Interpolation.'
print ('Interpolation for the AGN a(ox) = -1.2, 98% free electrons and without dust models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.')
res_NO = 0.125
#Different library
elif sed==4:
file_lib = new_library
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+new_library, 'r') as file8:
for line in file8:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+new_library,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if inter == 0:
sed_type = 'User file ' + new_library + ' used as library for the models no interpolated'
print ('No interpolation for the library '+new_library)
res_CO = 0.125
elif inter == 1:
sed_type = 'User file ' + new_library + ' used as library for the models interpolated'
print ('Interpolation for the library '+new_library)
res_CO = 0.125
#Valuable columns of the files
uv_lin = ['12logOH', 'logCO', 'logU', 'Lya_1216', 'CIV_1549', 'HeII_1640', 'OIII_1665', 'CIII_1909', 'OIII_5007']
lin_uv_label = ['12+log(O/H)', 'log(C/O)', 'log(U)', 'Lya_1216', 'CIV_1549', 'HeII_1640', 'OIII_1665', 'CIII_1909', 'OIII_5007']
########################################
###### SORTING THE GRID OF MODELS ######
########################################
print (' ')
print ('Sorting the grid of models')
print (' ')
index_OH_CO_U_sorted = [] #storing the correct order of the indexes
#Sorting abundances 12+log(O/H)
OH_values = grid_aux['12logOH'] #Oxygen abundances
if len(OH_values) != 1:
sorted_list_OH = sorted(range(len(OH_values)),key=OH_values.__getitem__)
if len(OH_values) == 1:
sorted_list_OH = [0]
#Sorting abundance ratios log(C/O)
OH_values_diff = list(set(OH_values[sorted_list_OH]))
OH_values_diff.sort() #It is necessary to sort again the list of different elements
for OH_num in OH_values_diff:
index_OH_fix = np.where(OH_values == OH_num)[0] #Index(es) for a particular abundance 12+log(O/H)
CO_values = grid_aux['logCO'][index_OH_fix]
if len(CO_values) != 1:
sorted_list_CO = sorted(range(len(CO_values)), key=CO_values.__getitem__)
if len(CO_values) == 1:
sorted_list_CO = [0]
CO_values_diff = list(set(CO_values[sorted_list_CO]))
CO_values_diff.sort() #It s necessary to sort again the list of different elements
for CO_num in CO_values_diff:
index_OH_CO_fix = np.where(CO_values == CO_num)[0] #Index(es) for particular abundances 12+log(O/H) and log(C/O)
#Sorting ionization parameters
U_values = grid_aux['logU'][index_OH_fix[index_OH_CO_fix]]
if len(U_values) != 1:
sorted_list_U = sorted(range(len(U_values)), key=U_values.__getitem__)
if len(U_values) == 1:
sorted_list_U = [0]
index_OH_CO_U = index_OH_fix[index_OH_CO_fix[sorted_list_U]] #Sorted index(es) for U at fixed O/H and C/O
for index_sort in index_OH_CO_U:
index_OH_CO_U_sorted.append(index_sort) #Adding index in the correct order
#Generating new library file
list_comments = [] #Storing comments in the file:
with open('Libraries_uv/'+file_lib, 'r') as file_aux:
for line in file_aux:
if line[0] == '#':
list_comments.append(line)
#Storing columns:
lin_uv_col = []
#Retrieving each column of the grid
for label in uv_lin:
aux_col = grid_aux[label].tolist()
lin_uv_col.append(aux_col)
#Comments
grid_to_write = open('Libraries_uv/'+file_lib, 'w')
for line_com in list_comments:
grid_to_write.write(line_com)
#Header line
label_line = '{:15} '.format(lin_uv_label[0].replace(' ',''))
for ind in range(1, len(lin_uv_label)-1):
label_line += '\t {:15} '.format(lin_uv_label[ind].replace(' ',''))
label_line += '\t {:15}\n'.format(lin_uv_label[-1].replace(' ',''))
grid_to_write.write(label_line)
#Values:
for ind_val in index_OH_CO_U_sorted:
val_line = '{:7.7f} '.format(lin_uv_col[0][ind_val])
for ind2 in range(1, len(lin_uv_label)-1):
val_line += '\t {:7.7f} '.format(lin_uv_col[ind2][ind_val])
val_line += '\t {:7.7f}\n'.format(lin_uv_col[-1][ind_val])
grid_to_write.write(val_line)
grid_to_write.close()
#Opening sorted grid of models
n_comments = 0
with open('Libraries_uv/'+file_lib, 'r') as file12:
for line in file12:
if line[0] == '#':
n_comments += 1
grid_aux = np.genfromtxt('Libraries_uv/'+file_lib, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
################################################
###### CONSTRAINTS FOR THE GRID OF MODELS ######
################################################
#Reading constraints and creating library with constraints
print (' ')
print ('Select a file with the constraint laws to be used to limit the grid of models when the measurement of a quantity is impossible without any relation.')
print (' ')
print ('')
question = True
while question:
print ('-------------------------------------------------')
print ('Default constraints')
print ('-------------------')
print ('(1) Constraints for Star-Forming Galaxies')
print ('(2) Constraints for Extreme Emission Line Galaxies')
print ('(3) Constraints for AGNs (no restriction in the ionization parameter)')
print ('')
print ('Other constraints')
print ('-----------------')
print ('(4) Different constraint file')
print ('-------------------------------------------------')
if int(sys.version[0]) < 3:
const = raw_input('Choose constraint for the grids: ')
else:
const = input('Choose constraint for the grids: ')
if const == '1' or const == '2' or const == '3' or const == '4': question = False
print ('')
#Particular file introduced by the user
if const == '4':
question = True
while question:
print ('Introduce name of the file containing the constraints for the grids. It must be located in the folder "Constraints".')
print (' ')
if int(sys.version[0]) < 3:
new_const = raw_input('Name of file: ')
else:
new_const = input('Name of file: ')
#Searching for the file
try:
#Counting comments:
n_comments = 0
with open('Constraints/'+new_const, 'r') as file9:
for line in file9:
if line[0] == '#':
n_comments += 1
const_user = np.genfromtxt('Constraints/'+new_const, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
print (' ')
print ('Loading constraint file '+new_const+'. Checking correct format of the file.')
question = False
except:
print (' ')
print ('File was not found in folder "Constraints" or file does not exist.')
question = True
while question:
try:
#Counting comments:
n_comments = 0
with open('Constraints/'+new_const, 'r') as file10:
for line in file10:
if line[0] == '#':
n_comments += 1
const_user = np.genfromtxt('Constraints/'+new_const, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
#Checking correct format:
#Counting comments:
n_comments = 0
with open('Constraints/template_OH.dat', 'r') as file11:
for line in file11:
if line[0] == '#':
n_comments += 1
auxiliar_labels = np.genfromtxt('Constraints/template_OH.dat', dtype=None, names=True, encoding = 'ascii', skip_header=n_comments).dtype.names
missing_labels = []
for label in auxiliar_labels:
if label in const_user.dtype.names:
continue
else:
missing_labels.append(label)
#Displaying message for the user:
print ('Succesfully reading of the file')
if len(missing_labels) == 0:
print ('File presents the correct format')
question = False
else:
print ('File does not present the correct format. The following columns are missing:')
for need_label in missing_labels:
print('- '+need_label)
print ('More details on the correct format for the library are found in readme file.')
print (' ')
print ('Reintroduce name of the file with fixed format:')
print (' ')
if int(sys.version[0]) < 3:
new_const = raw_input('Name of file: ')
else:
new_const = input('Name of file: ')
except:
print ('Something went wrong while reading file. Please, reintroduce name of the file:')
print (' ')
if int(sys.version[0]) < 3:
new_const = raw_input('Name of file: ')
else:
new_const = input('Name of file: ')
#Generation of grids with constraints laws:
if const == '1' or const == '2' or const == '3' or const == '4':
#First grid does not change
grid1 = grid_aux
file_lib_2 = file_lib
#Generating libraries for the constraints in the files
if const == '1': #Star-Forming Galaxies
const_file = 'template_OH.dat'
name_const = 'Constraints/template_OH.dat'
n_comments = 0
with open(name_const, 'r') as file12:
for line in file12:
if line[0] == '#':
n_comments += 1
const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if const == '2':
const_file = 'template_OH_eelg.dat'
name_const = 'Constraints/template_OH_eelg.dat'
n_comments = 0
with open(name_const, 'r') as file13:
for line in file13:
if line[0] == '#':
n_comments += 1
const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if const == '3':
name_const = 'Constraints/template_OH_agn.dat'
const_file = 'template_OH_agn.dat'
n_comments = 0
with open(name_const, 'r') as file18:
for line in file18:
if line[0] == '#':
n_comments += 1
const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
if const == '4':
const_file = new_const
name_const = 'Constraints/'+new_const
n_comments = 0
with open(name_const, 'r') as file14:
for line in file14:
if line[0] == '#':
n_comments += 1
const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
#Limiting the grids:
lin_uv_val = []
#The initial grid need to be constrained in the ionization parameter
#Retrieving each column of the grid
for label in uv_lin:
aux_col = grid1[label].tolist()
lin_uv_val.append(aux_col)
#Creation of the grids
name_OH_U = '.'.join(file_lib_2.split('.')[0:-1])+'_OH_U_constrained.'+file_lib.split('.')[-1]
name_OH_U_CO = '.'.join(file_lib_2.split('.')[0:-1])+'_OH_U_CO_constrained.'+file_lib.split('.')[-1]
file_open = open('Libraries_uv/'+ name_OH_U, 'w') #OH and U relation
file_open_2 = open('Libraries_uv/'+name_OH_U_CO, 'w') #OH, CO and U relation
file_open.write('#Constrained by relation between 12+log(O/H) and log(U)\n')
file_open_2.write('#Constrained by relation between 12+log(O/H), log(U) and log(C/O)\n')
#Header line
label_line = '{:15} '.format(lin_uv_label[0].replace(' ',''))
for ind in range(1, len(lin_uv_label)-1):
label_line += '\t {:15} '.format(lin_uv_label[ind].replace(' ',''))
label_line += '\t {:15}\n'.format(lin_uv_label[-1].replace(' ',''))
file_open.write(label_line)
file_open_2.write(label_line)
#Values:
for ind_val in range(0, len(lin_uv_val[0])):
index_desired = np.where(const_data['12logOH'] == lin_uv_val[0][ind_val])[0][0] #Searching for constrain in given value of O/H
if lin_uv_val[2][ind_val] <= const_data['logU_max'][index_desired] and lin_uv_val[2][ind_val] >= const_data['logU_min'][index_desired]:
val_line = '{:7.7f} '.format(lin_uv_val[0][ind_val])
for ind2 in range(1, len(lin_uv_label)-1):
val_line += '\t {:7.7f} '.format(lin_uv_val[ind2][ind_val])
val_line += '\t {:7.7f}\n'.format(lin_uv_val[-1][ind_val])
file_open.write(val_line)
if lin_uv_val[2][ind_val] <= const_data['logU_max'][index_desired] and lin_uv_val[2][ind_val] >= const_data['logU_min'][index_desired] and lin_uv_val[1][ind_val] <= const_data['logCO_max'][index_desired] and lin_uv_val[1][ind_val] >= const_data['logCO_min'][index_desired]:
val_line = '{:7.7f} '.format(lin_uv_val[0][ind_val])
for ind2 in range(1, len(lin_uv_label)-1):
val_line += '\t {:7.7f} '.format(lin_uv_val[ind2][ind_val])
val_line += '\t {:7.7f}\n'.format(lin_uv_val[-1][ind_val])
file_open_2.write(val_line)
file_open.close()
file_open_2.close()
#Counting comments:
n_comments = 0
with open('Libraries_uv/'+name_OH_U, 'r') as file15:
for line in file15:
if line[0] == '#':
n_comments += 1
grid2 = np.genfromtxt('Libraries_uv/'+name_OH_U,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
n_comments = 0
with open('Libraries_uv/'+name_OH_U_CO, 'r') as file:
for line in file:
if line[0] == '#':
n_comments += 1
grid3 = np.genfromtxt('Libraries_uv/'+name_OH_U_CO,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments)
#Residual in CO
if inter==0:
res_CO = np.max([sorted(set(grid1['logCO']))[ind+1]-sorted(set(grid1['logCO']))[ind] for ind in range(0, len(set(grid1['logCO']))-1)])
if inter==1:
res_CO = np.max([sorted(set(grid1['logCO']))[ind+1]-sorted(set(grid1['logCO']))[ind] for ind in range(0, len(set(grid1['logCO']))-1)])/10
###########################################
###### SUMMARY OF THE GRID OF MODELS ######
###########################################
print ('-------------------------------------------------')
print ('Summary of the models')
print ('---------------------')
print ('Libraries generated with the constraints. The following grids are going to be used:')
print ('- Full library (Grid#1): '+file_lib_2)
print (' Total number of models: ' + str(len(grid1)))
print ('- Library constrained by 12+log(O/H) - log(U) relation (Grid#2): '+name_OH_U)
print (' Total number of models: ' + str(len(grid2)))
print ('- Library constrained by 12+log(O/H) - log(U) - log(C/O) relation (Grid#3): '+name_OH_U_CO)
print (' Total number of models: ' + str(len(grid3)))
print ('-------------------------------------------------')
print (' ')
#################################################
###### CREATING ARRAY TO STORE ESTIMATIONS ######
#################################################
grids = []
OHffs = []
eOHffs = []
COffs = []
eCOffs = []
logUffs = []
elogUffs = []
Label_ID = False
Label_Lya = False
Label_eLya = False
Label_NV = False
Label_eNV = False
Label_CIV = False
Label_eCIV = False
Label_HeII = False
Label_eHeII = False
Label_OIII_1665 = False
Label_eOIII_1665 = False
Label_CIII = False
Label_eCIII = False
Label_OIII_5007 = False
Label_eOIII_5007 = False
Label_Hbeta = False
Label_eHbeta = False
#Checking input information
for col in range(0,len(input1.dtype.names),1):
if input1.dtype.names[col] == 'ID':
Label_ID = True
if input1.dtype.names[col] == 'Lya_1216':
Label_Lya = True
if input1.dtype.names[col] == 'eLya_1216':
Label_eLya = True
if input1.dtype.names[col] == 'NV_1239':
Label_NV = True
if input1.dtype.names[col] == 'eNV_1239':
Label_eNV = True
if input1.dtype.names[col] == 'CIV_1549':
Label_CIV = True
if input1.dtype.names[col] == 'eCIV_1549':
Label_eCIV = True
if input1.dtype.names[col] == 'HeII_1640':
Label_HeII = True
if input1.dtype.names[col] == 'eHeII_1640':
Label_eHeII = True
if input1.dtype.names[col] == 'OIII_1665':
Label_OIII_1665 = True
if input1.dtype.names[col] == 'eOIII_1665':
Label_eOIII_1665 = True
if input1.dtype.names[col] == 'CIII_1909':
Label_CIII = True
if input1.dtype.names[col] == 'eCIII_1909':
Label_eCIII = True
if input1.dtype.names[col] == 'Hb_4861':
Label_Hbeta = True
if input1.dtype.names[col] == 'eHb_4861':
Label_eHbeta = True
if input1.dtype.names[col] == 'OIII_5007':
Label_OIII_5007 = True
if input1.dtype.names[col] == 'eOIII_5007':
Label_eOIII_5007 = True
#Adapting final output with information from given input
if Label_ID == False:
Names = np.arange(1,input1.size+1,1)
else:
Names = input1['ID']
if Label_Lya == False:
Lya_1216 = np.zeros(input1.size)
else:
Lya_1216 = input1['Lya_1216']
if Label_eLya == False:
eLya_1216 =
|
np.zeros(input1.size)
|
numpy.zeros
|
import matplotlib.pyplot as plt
import numpy as np
import nanonet.tb as tb
# noinspection PyUnresolvedReferences
from nanonet.negf.greens_functions import simple_iterative_greens_function, sancho_rubio_iterative_greens_function, \
surface_greens_function
# noinspection PyPep8Naming
def main(surf_greens_fun):
""" An example for the Green's function usage"""
a = tb.Orbitals('A')
a.add_orbital('s', -0.7)
tb.Orbitals.orbital_sets = {'A': a}
tb.set_tb_params(PARAMS_A_A={'ss_sigma': -0.5})
xyz_file = """1
A cell
A1 0.0000000000 0.0000000000 0.0000000000
"""
h = tb.Hamiltonian(xyz=xyz_file, nn_distance=1.1)
h.initialize()
h.set_periodic_bc([[0, 0, 1.0]])
h_l, h_0, h_r = h.get_hamiltonians()
energy = np.linspace(-3.0, 1.5, 700)
sgf_l = []
sgf_r = []
for E in energy:
sf = surf_greens_fun(E, h_l, h_0, h_r, damp=0.001j)
if isinstance(sf, tuple):
L = sf[0]
R = sf[1]
else:
L = sf
R = surf_greens_fun(E, h_r, h_0, h_l, damp=0.001j)
sgf_l.append(L)
sgf_r.append(R)
sgf_l = np.array(sgf_l)
sgf_r = np.array(sgf_r)
num_sites = h_0.shape[0]
gf = np.linalg.pinv(np.multiply.outer(energy, np.identity(num_sites)) - h_0 - sgf_l - sgf_r)
dos = -np.trace(
|
np.imag(gf)
|
numpy.imag
|
import warnings
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from core.lattice import *
class Diffract(object):
def __init__(self, lattice, energy, polarization, surface, aziref, absorb):
self.lat = lattice
self.e = energy
self.lam = 1.23984193e4 / energy
self.pol = polarization
self.n = surface
self.azir = aziref
self.absorb = absorb
def orientate(self, Q):
"""
Computes tensor that projects crystal frame onto diffraction frame
:param Q:
:return:
"""
Q = np.asarray(Q) / np.asarray([self.lat.a,self.lat.b,self.lat.c])
Qnorm = Q / np.linalg.norm(Q)
if (Q[0] == 0 and Q[2] == 0):
zeta = 0
else:
zeta = np.arctan2(np.dot(Qnorm, np.array([0, 0, 1])), np.dot(Qnorm, np.array([1, 0, 0])))
eta = np.arccos(np.dot(Qnorm, np.array([0, 1, 0])))
T = np.array([[-np.cos(zeta) * np.cos(eta), np.sin(eta), -np.sin(zeta) * np.cos(eta)],
[ np.sin(zeta), 0, -np.cos(zeta)],
[-np.cos(zeta) * np.sin(eta),-np.cos(eta), -np.sin(zeta) *
|
np.sin(eta)
|
numpy.sin
|
from __future__ import division
import traits.api as traits
from traitsui.api import View, Item, Group
import cgtypes # import cgkit 1.x
import numpy as np
D2R = np.pi / 180.0
def cgmat2np(cgkit_mat):
"""convert cgkit matrix to numpy matrix"""
arr = np.array(cgkit_mat.toList())
if len(arr) == 9:
arr.shape = 3, 3
elif len(arr) == 16:
arr.shape = 4, 4
else:
raise ValueError("unknown shape")
return arr.T
def test_cgmat2mp():
point1 = (1, 0, 0)
point1_out = (0, 1, 0)
cg_quat = cgtypes.quat().fromAngleAxis(90.0 * D2R, (0, 0, 1))
cg_in = cgtypes.vec3(point1)
m_cg = cg_quat.toMat3()
cg_out = m_cg * cg_in
cg_out_tup = (cg_out[0], cg_out[1], cg_out[2])
assert
|
np.allclose(cg_out_tup, point1_out)
|
numpy.allclose
|
from parcels import (FieldSet, Field, ScipyParticle, JITParticle, ErrorCode, StateCode,
AdvectionEE, AdvectionRK4, AdvectionRK45, AdvectionRK4_3D,
AdvectionAnalytical, AdvectionDiffusionM1, AdvectionDiffusionEM)
from parcels import ParticleSetSOA, ParticleFileSOA, KernelSOA # noqa
from parcels import ParticleSetAOS, ParticleFileAOS, KernelAOS # noqa
import numpy as np
import pytest
import math
from netCDF4 import Dataset
from datetime import timedelta as delta
from parcels import logger
pset_modes = ['soa', 'aos']
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
pset_type = {'soa': {'pset': ParticleSetSOA, 'pfile': ParticleFileSOA, 'kernel': KernelSOA},
'aos': {'pset': ParticleSetAOS, 'pfile': ParticleFileAOS, 'kernel': KernelAOS}}
kernel = {'EE': AdvectionEE, 'RK4': AdvectionRK4, 'RK45': AdvectionRK45,
'AdvDiffEM': AdvectionDiffusionEM, 'AdvDiffM1': AdvectionDiffusionM1}
# Some constants
f = 1.e-4
u_0 = 0.3
u_g = 0.04
gamma = 1/(86400. * 2.89)
gamma_g = 1/(86400. * 28.9)
def lon(xdim=200):
return np.linspace(-170, 170, xdim, dtype=np.float32)
@pytest.fixture(name="lon")
def lon_fixture(xdim=200):
return lon(xdim=xdim)
def lat(ydim=100):
return np.linspace(-80, 80, ydim, dtype=np.float32)
@pytest.fixture(name="lat")
def lat_fixture(ydim=100):
return lat(ydim=ydim)
def depth(zdim=2):
return np.linspace(0, 30, zdim, dtype=np.float32)
@pytest.fixture(name="depth")
def depth_fixture(zdim=2):
return depth(zdim=zdim)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_zonal(lon, lat, depth, pset_mode, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
data2D = {'U': np.ones((lon.size, lat.size), dtype=np.float32),
'V': np.zeros((lon.size, lat.size), dtype=np.float32)}
data3D = {'U': np.ones((lon.size, lat.size, depth.size), dtype=np.float32),
'V': np.zeros((lon.size, lat.size, depth.size), dtype=np.float32)}
dimensions = {'lon': lon, 'lat': lat}
fieldset2D = FieldSet.from_data(data2D, dimensions, mesh='spherical', transpose=True)
assert fieldset2D.U.creation_log == 'from_data'
pset2D = pset_type[pset_mode]['pset'](fieldset2D, pclass=ptype[mode],
lon=np.zeros(npart) + 20.,
lat=np.linspace(0, 80, npart))
pset2D.execute(AdvectionRK4, runtime=delta(hours=2), dt=delta(seconds=30))
assert (np.diff(pset2D.lon) > 1.e-4).all()
dimensions['depth'] = depth
fieldset3D = FieldSet.from_data(data3D, dimensions, mesh='spherical', transpose=True)
pset3D = pset_type[pset_mode]['pset'](fieldset3D, pclass=ptype[mode],
lon=np.zeros(npart) + 20.,
lat=np.linspace(0, 80, npart),
depth=np.zeros(npart) + 10.)
pset3D.execute(AdvectionRK4, runtime=delta(hours=2), dt=delta(seconds=30))
assert (np.diff(pset3D.lon) > 1.e-4).all()
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_meridional(lon, lat, pset_mode, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
data = {'U': np.zeros((lon.size, lat.size), dtype=np.float32),
'V': np.ones((lon.size, lat.size), dtype=np.float32)}
dimensions = {'lon': lon, 'lat': lat}
fieldset = FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(-60, 60, npart),
lat=np.linspace(0, 30, npart))
delta_lat = np.diff(pset.lat)
pset.execute(AdvectionRK4, runtime=delta(hours=2), dt=delta(seconds=30))
assert np.allclose(np.diff(pset.lat), delta_lat, rtol=1.e-4)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['jit', 'scipy'])
def test_advection_3D(pset_mode, mode, npart=11):
""" 'Flat' 2D zonal flow that increases linearly with depth from 0 m/s to 1 m/s
"""
xdim = ydim = zdim = 2
dimensions = {'lon': np.linspace(0., 1e4, xdim, dtype=np.float32),
'lat': np.linspace(0., 1e4, ydim, dtype=np.float32),
'depth': np.linspace(0., 1., zdim, dtype=np.float32)}
data = {'U': np.ones((xdim, ydim, zdim), dtype=np.float32),
'V': np.zeros((xdim, ydim, zdim), dtype=np.float32)}
data['U'][:, :, 0] = 0.
fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.zeros(npart),
lat=np.zeros(npart) + 1e2,
depth=np.linspace(0, 1, npart))
time = delta(hours=2).total_seconds()
pset.execute(AdvectionRK4, runtime=time, dt=delta(seconds=30))
assert np.allclose(pset.depth*time, pset.lon, atol=1.e-1)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['jit', 'scipy'])
@pytest.mark.parametrize('direction', ['up', 'down'])
@pytest.mark.parametrize('wErrorThroughSurface', [True, False])
def test_advection_3D_outofbounds(pset_mode, mode, direction, wErrorThroughSurface):
xdim = ydim = zdim = 2
dimensions = {'lon': np.linspace(0., 1, xdim, dtype=np.float32),
'lat': np.linspace(0., 1, ydim, dtype=np.float32),
'depth': np.linspace(0., 1, zdim, dtype=np.float32)}
wfac = -1. if direction == 'up' else 1.
data = {'U': 0.01*np.ones((xdim, ydim, zdim), dtype=np.float32),
'V': np.zeros((xdim, ydim, zdim), dtype=np.float32),
'W': wfac * np.ones((xdim, ydim, zdim), dtype=np.float32)}
fieldset = FieldSet.from_data(data, dimensions, mesh='flat')
def DeleteParticle(particle, fieldset, time):
particle.delete()
def SubmergeParticle(particle, fieldset, time):
particle.depth = 0
AdvectionRK4(particle, fieldset, time) # perform a 2D advection because vertical flow will always push up in this case
particle.time = time + particle.dt # to not trigger kernels again, otherwise infinite loop
particle.set_state(StateCode.Success)
recovery_dict = {ErrorCode.ErrorOutOfBounds: DeleteParticle}
if wErrorThroughSurface:
recovery_dict[ErrorCode.ErrorThroughSurface] = SubmergeParticle
pset = pset_type[pset_mode]['pset'](fieldset=fieldset, pclass=ptype[mode], lon=0.5, lat=0.5, depth=0.9)
pset.execute(AdvectionRK4_3D, runtime=10., dt=1, recovery=recovery_dict)
if direction == 'up' and wErrorThroughSurface:
assert np.allclose(pset.lon[0], 0.6)
assert np.allclose(pset.depth[0], 0)
else:
assert len(pset) == 0
def periodicfields(xdim, ydim, uvel, vvel):
dimensions = {'lon': np.linspace(0., 1., xdim+1, dtype=np.float32)[1:], # don't include both 0 and 1, for periodic b.c.
'lat': np.linspace(0., 1., ydim+1, dtype=np.float32)[1:]}
data = {'U': uvel * np.ones((xdim, ydim), dtype=np.float32),
'V': vvel * np.ones((xdim, ydim), dtype=np.float32)}
return FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True)
def periodicBC(particle, fieldset, time):
particle.lon = math.fmod(particle.lon, 1)
particle.lat = math.fmod(particle.lat, 1)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_periodic_zonal(pset_mode, mode, xdim=100, ydim=100, halosize=3):
fieldset = periodicfields(xdim, ydim, uvel=1., vvel=0.)
fieldset.add_periodic_halo(zonal=True, halosize=halosize)
assert(len(fieldset.U.lon) == xdim + 2 * halosize)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=[0.5], lat=[0.5])
pset.execute(AdvectionRK4 + pset.Kernel(periodicBC), runtime=delta(hours=20), dt=delta(seconds=30))
assert abs(pset.lon[0] - 0.15) < 0.1
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_periodic_meridional(pset_mode, mode, xdim=100, ydim=100):
fieldset = periodicfields(xdim, ydim, uvel=0., vvel=1.)
fieldset.add_periodic_halo(meridional=True)
assert(len(fieldset.U.lat) == ydim + 10) # default halo size is 5 grid points
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=[0.5], lat=[0.5])
pset.execute(AdvectionRK4 + pset.Kernel(periodicBC), runtime=delta(hours=20), dt=delta(seconds=30))
assert abs(pset.lat[0] - 0.15) < 0.1
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_periodic_zonal_meridional(pset_mode, mode, xdim=100, ydim=100):
fieldset = periodicfields(xdim, ydim, uvel=1., vvel=1.)
fieldset.add_periodic_halo(zonal=True, meridional=True)
assert(len(fieldset.U.lat) == ydim + 10) # default halo size is 5 grid points
assert(len(fieldset.U.lon) == xdim + 10) # default halo size is 5 grid points
assert np.allclose(np.diff(fieldset.U.lat), fieldset.U.lat[1]-fieldset.U.lat[0], rtol=0.001)
assert np.allclose(np.diff(fieldset.U.lon), fieldset.U.lon[1]-fieldset.U.lon[0], rtol=0.001)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=[0.4], lat=[0.5])
pset.execute(AdvectionRK4 + pset.Kernel(periodicBC), runtime=delta(hours=20), dt=delta(seconds=30))
assert abs(pset.lon[0] - 0.05) < 0.1
assert abs(pset.lat[0] - 0.15) < 0.1
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('u', [-0.3, np.array(0.2)])
@pytest.mark.parametrize('v', [0.2, np.array(1)])
@pytest.mark.parametrize('w', [None, -0.2, np.array(0.7)])
def test_length1dimensions(pset_mode, mode, u, v, w):
logger.info("mode: {} pset_mode {}".format(mode, pset_mode))
(lon, xdim) = (np.linspace(-10, 10, 21), 21) if isinstance(u, np.ndarray) else (0, 1)
(lat, ydim) = (np.linspace(-15, 15, 31), 31) if isinstance(v, np.ndarray) else (-4, 1)
(depth, zdim) = (np.linspace(-5, 5, 11), 11) if (isinstance(w, np.ndarray) and w is not None) else (3, 1)
dimensions = {'lon': lon, 'lat': lat, 'depth': depth}
dims = []
if zdim > 1:
dims.append(zdim)
if ydim > 1:
dims.append(ydim)
if xdim > 1:
dims.append(xdim)
if len(dims) > 0:
U = u * np.ones(dims, dtype=np.float32)
V = v * np.ones(dims, dtype=np.float32)
if w is not None:
W = w * np.ones(dims, dtype=np.float32)
else:
U, V, W = u, v, w
data = {'U': U, 'V': V}
if w is not None:
data['W'] = W
fieldset = FieldSet.from_data(data, dimensions, mesh='flat')
x0, y0, z0 = 2, 8, -4
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=x0, lat=y0, depth=z0)
pfunc = AdvectionRK4 if w is None else AdvectionRK4_3D
kernel = pset.Kernel(pfunc)
pset.execute(kernel, runtime=4)
assert (len(pset.lon) == len([p.lon for p in pset]))
assert ((np.array([p.lon - x0 for p in pset]) - 4 * u) < 1e-6).all()
assert ((np.array([p.lat - y0 for p in pset]) - 4 * v) < 1e-6).all()
if w:
assert ((np.array([p.depth - y0 for p in pset]) - 4 * w) < 1e-6).all()
def truth_stationary(x_0, y_0, t):
lat = y_0 - u_0 / f * (1 - math.cos(f * t))
lon = x_0 + u_0 / f * math.sin(f * t)
return lon, lat
@pytest.fixture
def fieldset_stationary(xdim=100, ydim=100, maxtime=delta(hours=6)):
"""Generate a FieldSet encapsulating the flow field of a stationary eddy.
Reference: <NAME>, 2009, "Numerical simulations of passive
tracers dispersion in the sea"
"""
time = np.arange(0., maxtime.total_seconds()+1e-5, 60., dtype=np.float64)
dimensions = {'lon': np.linspace(0, 25000, xdim, dtype=np.float32),
'lat': np.linspace(0, 25000, ydim, dtype=np.float32),
'time': time}
data = {'U': np.ones((xdim, ydim, 1), dtype=np.float32) * u_0 * np.cos(f * time),
'V': np.ones((xdim, ydim, 1), dtype=np.float32) * -u_0 * np.sin(f * time)}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('method, rtol, diffField', [
('EE', 1e-2, False),
('AdvDiffEM', 1e-2, True),
('AdvDiffM1', 1e-2, True),
('RK4', 1e-5, False),
('RK45', 1e-5, False)])
def test_stationary_eddy(pset_mode, fieldset_stationary, mode, method, rtol, diffField, npart=1):
fieldset = fieldset_stationary
if diffField:
fieldset.add_field(Field('Kh_zonal', np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid))
fieldset.add_field(Field('Kh_meridional', np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid))
fieldset.add_constant('dres', 0.1)
lon = np.linspace(12000, 21000, npart)
lat = np.linspace(12500, 12500, npart)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=lon, lat=lat)
endtime = delta(hours=6).total_seconds()
pset.execute(kernel[method], dt=delta(minutes=3), endtime=endtime)
exp_lon = [truth_stationary(x, y, endtime)[0] for x, y, in zip(lon, lat)]
exp_lat = [truth_stationary(x, y, endtime)[1] for x, y, in zip(lon, lat)]
assert np.allclose(pset.lon, exp_lon, rtol=rtol)
assert np.allclose(pset.lat, exp_lat, rtol=rtol)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_stationary_eddy_vertical(pset_mode, mode, npart=1):
lon = np.linspace(12000, 21000, npart)
lat = np.linspace(10000, 20000, npart)
depth = np.linspace(12500, 12500, npart)
endtime = delta(hours=6).total_seconds()
xdim = ydim = 100
lon_data = np.linspace(0, 25000, xdim, dtype=np.float32)
lat_data = np.linspace(0, 25000, ydim, dtype=np.float32)
time_data = np.arange(0., 6*3600+1e-5, 60., dtype=np.float64)
fld1 = np.ones((xdim, ydim, 1), dtype=np.float32) * u_0 * np.cos(f * time_data)
fld2 = np.ones((xdim, ydim, 1), dtype=np.float32) * -u_0 * np.sin(f * time_data)
fldzero = np.zeros((xdim, ydim, 1), dtype=np.float32) * time_data
dimensions = {'lon': lon_data, 'lat': lat_data, 'time': time_data}
data = {'U': fld1, 'V': fldzero, 'W': fld2}
fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=lon, lat=lat, depth=depth)
pset.execute(AdvectionRK4_3D, dt=delta(minutes=3), endtime=endtime)
exp_lon = [truth_stationary(x, z, endtime)[0] for x, z, in zip(lon, depth)]
exp_depth = [truth_stationary(x, z, endtime)[1] for x, z, in zip(lon, depth)]
assert np.allclose(pset.lon, exp_lon, rtol=1e-5)
assert np.allclose(pset.lat, lat, rtol=1e-5)
assert np.allclose(pset.depth, exp_depth, rtol=1e-5)
data = {'U': fldzero, 'V': fld2, 'W': fld1}
fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=lon, lat=lat, depth=depth)
pset.execute(AdvectionRK4_3D, dt=delta(minutes=3), endtime=endtime)
exp_depth = [truth_stationary(z, y, endtime)[0] for z, y, in zip(depth, lat)]
exp_lat = [truth_stationary(z, y, endtime)[1] for z, y, in zip(depth, lat)]
assert np.allclose(pset.lon, lon, rtol=1e-5)
assert np.allclose(pset.lat, exp_lat, rtol=1e-5)
assert np.allclose(pset.depth, exp_depth, rtol=1e-5)
def truth_moving(x_0, y_0, t):
lat = y_0 - (u_0 - u_g) / f * (1 - math.cos(f * t))
lon = x_0 + u_g * t + (u_0 - u_g) / f * math.sin(f * t)
return lon, lat
@pytest.fixture
def fieldset_moving(xdim=100, ydim=100, maxtime=delta(hours=6)):
"""Generate a FieldSet encapsulating the flow field of a moving eddy.
Reference: <NAME>, 2009, "Numerical simulations of passive
tracers dispersion in the sea"
"""
time = np.arange(0., maxtime.total_seconds()+1e-5, 60., dtype=np.float64)
dimensions = {'lon': np.linspace(0, 25000, xdim, dtype=np.float32),
'lat': np.linspace(0, 25000, ydim, dtype=np.float32),
'time': time}
data = {'U': np.ones((xdim, ydim, 1), dtype=np.float32) * u_g + (u_0 - u_g) * np.cos(f * time),
'V': np.ones((xdim, ydim, 1), dtype=np.float32) * -(u_0 - u_g) * np.sin(f * time)}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('method, rtol, diffField', [
('EE', 1e-2, False),
('AdvDiffEM', 1e-2, True),
('AdvDiffM1', 1e-2, True),
('RK4', 1e-5, False),
('RK45', 1e-5, False)])
def test_moving_eddy(pset_mode, fieldset_moving, mode, method, rtol, diffField, npart=1):
fieldset = fieldset_moving
if diffField:
fieldset.add_field(Field('Kh_zonal', np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid))
fieldset.add_field(Field('Kh_meridional', np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid))
fieldset.add_constant('dres', 0.1)
lon = np.linspace(12000, 21000, npart)
lat = np.linspace(12500, 12500, npart)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=lon, lat=lat)
endtime = delta(hours=6).total_seconds()
pset.execute(kernel[method], dt=delta(minutes=3), endtime=endtime)
exp_lon = [truth_moving(x, y, endtime)[0] for x, y, in zip(lon, lat)]
exp_lat = [truth_moving(x, y, endtime)[1] for x, y, in zip(lon, lat)]
assert np.allclose(pset.lon, exp_lon, rtol=rtol)
assert np.allclose(pset.lat, exp_lat, rtol=rtol)
def truth_decaying(x_0, y_0, t):
lat = y_0 - ((u_0 - u_g) * f / (f ** 2 + gamma ** 2)
* (1 - np.exp(-gamma * t) * (np.cos(f * t) + gamma / f * np.sin(f * t))))
lon = x_0 + (u_g / gamma_g * (1 - np.exp(-gamma_g * t))
+ (u_0 - u_g) * f / (f ** 2 + gamma ** 2)
* (gamma / f + np.exp(-gamma * t)
* (math.sin(f * t) - gamma / f * math.cos(f * t))))
return lon, lat
@pytest.fixture
def fieldset_decaying(xdim=100, ydim=100, maxtime=delta(hours=6)):
"""Generate a FieldSet encapsulating the flow field of a decaying eddy.
Reference: <NAME>, 2009, "Numerical simulations of passive
tracers dispersion in the sea"
"""
time = np.arange(0., maxtime.total_seconds()+1e-5, 60., dtype=np.float64)
dimensions = {'lon': np.linspace(0, 25000, xdim, dtype=np.float32),
'lat': np.linspace(0, 25000, ydim, dtype=np.float32),
'time': time}
data = {'U': np.ones((xdim, ydim, 1), dtype=np.float32) * u_g * np.exp(-gamma_g * time) + (u_0 - u_g) * np.exp(-gamma * time) * np.cos(f * time),
'V': np.ones((xdim, ydim, 1), dtype=np.float32) * -(u_0 - u_g) * np.exp(-gamma * time) * np.sin(f * time)}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('method, rtol, diffField', [
('EE', 1e-2, False),
('AdvDiffEM', 1e-2, True),
('AdvDiffM1', 1e-2, True),
('RK4', 1e-5, False),
('RK45', 1e-5, False)])
def test_decaying_eddy(pset_mode, fieldset_decaying, mode, method, rtol, diffField, npart=1):
fieldset = fieldset_decaying
if diffField:
fieldset.add_field(Field('Kh_zonal', np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid))
fieldset.add_field(Field('Kh_meridional', np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid))
fieldset.add_constant('dres', 0.1)
lon = np.linspace(12000, 21000, npart)
lat = np.linspace(12500, 12500, npart)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=lon, lat=lat)
endtime = delta(hours=6).total_seconds()
pset.execute(kernel[method], dt=delta(minutes=3), endtime=endtime)
exp_lon = [truth_decaying(x, y, endtime)[0] for x, y, in zip(lon, lat)]
exp_lat = [truth_decaying(x, y, endtime)[1] for x, y, in zip(lon, lat)]
assert np.allclose(pset.lon, exp_lon, rtol=rtol)
assert np.allclose(pset.lat, exp_lat, rtol=rtol)
@pytest.mark.parametrize('pset_mode', pset_modes)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_analyticalAgrid(pset_mode, mode):
lon =
|
np.arange(0, 15, dtype=np.float32)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
Neural network - nsp
"""
import numpy as np
def get_random_data(Ni, t_Nh, No, ptrain, ptest, noise):
"""Create input and output data from a 'teacher' network. The outputs are
contaminated with additive white noise.
Args:
Ni: Number of external inputs to net.\n
t_Nh: Number of hidden units for the 'teacher' net.\n
No: Number of output units.\n
ptrain: Number of training examples.\n
ptest: Number of test examples.\n
noise: Relatice amplitude of additive noise.
Yields:
tr_i, te_i: Input for training and test set.\n
tr_t, te_t: Target values.
Examples:
Load data
>>> tr_i, tr_t, te_i, te_t = getdata(...)
"""
# Initialize 'teacher' weights
TWi =
|
np.random.randn(t_Nh, Ni+1)
|
numpy.random.randn
|
# Copyright (C) 2018-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
import numpy
from collections import OrderedDict
from ocellaris.utils import init_mesh_geometry, timeit, ocellaris_error
from ocellaris.simulation.io_impl.xdmf import get_xdmf_file_name
from . import Probe, register_probe
WRITE_INTERVAL = 1
@register_probe('PlaneProbe')
class PlaneProbe(Probe):
description = 'Produce a 2D slice function from a 3D mesh'
def __init__(self, simulation, probe_input):
self.simulation = simulation
self.family = None
self.degree = None
assert self.simulation.ndim == 3, 'PlaneProbe only implemented in 3D'
assert not self.simulation.mesh_morpher.active, 'PlaneProbe does not support ALE yet'
# Read input
inp = probe_input
self.name = inp.get_value('name', required_type='string')
self.plane_point = inp.get_value(
'plane_point', required_type='list(float)', required_length=3
)
self.plane_normal = inp.get_value(
'plane_normal', required_type='list(float)', required_length=3
)
xlim = inp.get_value('xlim', None, 'list(float)', required_length=2)
ylim = inp.get_value('ylim', None, 'list(float)', required_length=2)
zlim = inp.get_value('zlim', None, 'list(float)', required_length=2)
# Get the names of the function(s) to be sliced
fn = inp.get_value('field', required_type='any')
if isinstance(fn, str):
self.field_names = [fn]
else:
self.field_names = inp.validate_and_convert('field', fn, 'list(string)')
# Get the functions and verify the function spaces
for fn in self.field_names:
func_3d = simulation.get_data(fn)
V = func_3d.function_space()
fam = V.ufl_element().family()
deg = V.ufl_element().degree()
if self.family is None:
self.family = fam
self.degree = deg
elif fam != self.family or deg != self.degree:
ocellaris_error(
'Mismatching function spaces in PlainProbe %s' % self.name,
'All functions must have the same function space. '
+ '%s is %r but %r was expected' % (fn, (fam, deg), (self.family, self.degree)),
)
# Create the slice
self.slice = FunctionSlice(self.plane_point, self.plane_normal, V, xlim, ylim, zlim)
prefix = simulation.input.get_value('output/prefix', '', 'string')
# Get the XDMF file name (also ensures it does not exist)
fn = '%s_slice_%s.xdmf' % (prefix, self.name)
self.file_name = get_xdmf_file_name(simulation, fn)
if simulation.rank == 0:
V_2d = self.slice.slice_function_space
mesh_2d = V_2d.mesh()
simulation.log.info(' Created 2D mesh with %r cells' % mesh_2d.num_cells())
simulation.log.info(' Creating XDMF file %s' % self.file_name)
self.xdmf_file = dolfin.XDMFFile(dolfin.MPI.comm_self, self.file_name)
self.xdmf_file.parameters['flush_output'] = True
self.xdmf_file.parameters['rewrite_function_mesh'] = False
self.xdmf_file.parameters['functions_share_mesh'] = True
# Create storage for 2D functions
self.funcs_2d = []
for fn in self.field_names:
func_2d = dolfin.Function(V_2d)
func_2d.rename(fn, fn)
self.funcs_2d.append(func_2d)
else:
self.funcs_2d = [None] * len(self.field_names)
# Add field to list of IO plotters
inp_key = probe_input.basepath + 'write_interval'
simulation.io.add_plotter(self.write_field, inp_key, WRITE_INTERVAL)
def write_field(self):
"""
Find and output the plane probe
"""
for fn, func_2d in zip(self.field_names, self.funcs_2d):
func_3d = self.simulation.get_data(fn)
self.slice.get_slice(func_3d, func_2d)
if self.simulation.rank == 0:
self.xdmf_file.write(func_2d, self.simulation.time)
class FunctionSlice:
def __init__(self, pt, n, V3d, xlim=None, ylim=None, zlim=None):
"""
Take the definition of a plane and a 3D function space
Construct a 2D mesh on rank 0 (only) and the necessary
data structures to extract function values at the 2D
mesh in an efficient way
* pt: a point in the plane
* n: a normal vector to the plane. Does not need to be a unit normal
* V3d: the 3D function space to be intersected by the plane
"""
gdim = V3d.mesh().geometry().dim()
assert gdim == 3, 'Function slice only supported in 3D'
# 3D function space data
comm = V3d.mesh().mpi_comm()
elem_3d = V3d.ufl_element()
family = elem_3d.family()
degree = elem_3d.degree()
# Create the 2D mesh
# The 2D mesh uses MPI_COMM_SELF and lives only on the root process
mesh_2d, cell_origins = make_cut_plane_mesh(pt, n, V3d.mesh(), xlim, ylim, zlim)
# Precompute data on root process
if comm.rank == 0:
# Make the 2D function space
V2d = dolfin.FunctionSpace(mesh_2d, family, degree)
self.slice_function_space = V2d
# Get 2D dof coordinates and dofmap
dof_pos_2d = V2d.tabulate_dof_coordinates().reshape((-1, gdim))
dofmap_2d = V2d.dofmap()
# Link 2D dofs and 3D ranks
links_for_rank = [[] for _ in range(comm.size)]
for cell in dolfin.cells(mesh_2d):
cid = cell.index()
# Assume no cell renumbering
orig_rank, orig_cell_index = cell_origins[cid]
for dof in dofmap_2d.cell_dofs(cid):
links_for_rank[orig_rank].append((dof, orig_cell_index))
# Distribute data to all ranks
distribute_this = []
for rank in range(comm.size):
positions = [dof_pos_2d[i] for i, _ in links_for_rank[rank]]
orig_cells = [ocid for _, ocid in links_for_rank[rank]]
positions = numpy.array(positions, float)
orig_cells = numpy.array(orig_cells, numpy.intc)
distribute_this.append((positions, orig_cells))
# Store which 2D dof belongs on which rank
self._dofs_for_rank = []
for rank in range(comm.size):
dfr = [dof for dof, _ in links_for_rank[rank]]
self._dofs_for_rank.append(numpy.array(dfr, int))
else:
distribute_this = None
# Get positions along with the index of the 3D cell for all points that
# need to be evaluated in order to build the 2D function
# Each rank gets positions corresponding to cells located on that rank
positions, cell_index_3d = comm.scatter(distribute_this)
# Establish efficient ways to get the 2D data from the 3D function
cell_dofs = [V3d.dofmap().cell_dofs(i) for i in cell_index_3d]
self._cell_dofs = numpy.array(cell_dofs, int)
self._factors = numpy.zeros(self._cell_dofs.shape, float)
self._local_data = numpy.zeros(len(cell_dofs), float)
evaluate_basis_functions(V3d, positions, cell_index_3d, self._factors)
@timeit.named('FunctionSlice.get_slice')
def get_slice(self, func_3d, func_2d=None):
"""
Return the function on the 2D slice of the 3D mesh
"""
comm = func_3d.function_space().mesh().mpi_comm()
# Get local values from all processes
arr_3d = func_3d.vector().get_local()
local_data = self._local_data
N = local_data.size
facs = self._factors
cd = self._cell_dofs
for i in range(N):
local_data[i] = arr_3d[cd[i]].dot(facs[i])
all_data = comm.gather(local_data)
if comm.rank == 0:
if func_2d is None:
func_2d = dolfin.Function(self.slice_function_space)
arr_2d = func_2d.vector().get_local()
for data, dofs in zip(all_data, self._dofs_for_rank):
arr_2d[dofs] = data
func_2d.vector().set_local(arr_2d)
func_2d.vector().apply('insert')
return func_2d
def make_cut_plane_mesh(pt, n, mesh3d, xlim=None, ylim=None, zlim=None):
"""
Returns a 2D mesh of the intersection of a 3D mesh and a plane. The result
can optionally be restricted to the intersection of the plane and a cube
with sides parallel to the axes by specifying one or more of the coordinate
limits as tuples, they are by default None which means _lim = (-inf, inf).
* pt: a point in the plane
* n: a normal vector to the plane. Does not need to be a unit normal
* mesh3d: the 3D mesh to be intersected by the plane
* xlim, ylim, zlim: each of these must be a tuple of two numbers or None
This function assumes that the 3D mesh consists solely of tetrahedra which
gives a 2D mesh of triangles
"""
# Get results on this rank and send to root process
rank_results = get_points_in_plane(pt, n, mesh3d)
rank_results = split_cells(rank_results)
rank_results = limit_plane(rank_results, xlim, ylim, zlim)
comm = mesh3d.mpi_comm()
all_results = comm.gather((comm.rank, rank_results))
# No collective operatione below this point!
if comm.rank != 0:
return None, None
point_ids = {}
points = []
connectivity = []
cell_origins = []
for rank, res in all_results:
for cell_id, subcells in res.items():
for cell_coords in subcells:
cell_points = []
for coords in cell_coords:
if coords not in point_ids:
point_ids[coords] = len(point_ids)
points.append(coords)
cell_points.append(point_ids[coords])
connectivity.append(cell_points)
cell_origins.append((rank, cell_id))
# Create the mesh
points = numpy.array(points, float)
connectivity = numpy.array(connectivity, int)
tdim, gdim = 2, 3
mesh2d = dolfin.Mesh(dolfin.MPI.comm_self)
init_mesh_geometry(mesh2d, points, connectivity, tdim, gdim)
return mesh2d, cell_origins
def limit_plane(unlimited, xlim=None, ylim=None, zlim=None, eps=1e-8):
"""
Given a set of triangles in a 2D plane, limit the extents of the plane to a
given quadratic area which sides are parallel to the axes
"""
if xlim is None and ylim is None and zlim is None:
return unlimited
# Remove / alter cells that are outside the coordinate limits
results = OrderedDict()
for cell_id, subcells in unlimited.items():
subcells = limit_triangles(subcells, xlim, ylim, zlim, eps)
if subcells:
results[cell_id] = subcells
return results
def limit_triangles(triangles, xlim, ylim, zlim, eps=1e-8):
"""
Given a list of triangles, return a new list of triangles which covers the
same area, limited to the given axis limits (i.e. the area of the triangles
returned is always equal to or lower than the area of the input triangles.
An ampty list can be returned if no area exists inside the axes limits
"""
for d, lim in enumerate((xlim, ylim, zlim)):
if lim is None:
continue
# Check lower limit
new_triangles = []
for cell_coords in triangles:
below = [1 if c[d] < lim[0] - eps else 0 for c in cell_coords]
num_below = sum(below)
if num_below == 0:
# Cell completely inside the bounds
new_triangles.append(cell_coords)
continue
elif num_below in (1, 2):
# Cell partially inside the bounds
if num_below == 1:
# Get the index of the one vertex that is below lim
i = 0 if below[0] == 1 else (1 if below[1] == 1 else 2)
else:
# Get the index of the one vertex that is not below lim
i = 0 if below[0] == 0 else (1 if below[1] == 0 else 2)
# Coordinates of the vertices
c0 = cell_coords[(i + 0) % 3]
c1 = cell_coords[(i + 1) % 3]
c2 = cell_coords[(i + 2) % 3]
# Coordinates of the crossing points
f01 = (lim[0] - c0[d]) / (c1[d] - c0[d])
f02 = (lim[0] - c0[d]) / (c2[d] - c0[d])
c01 = (
c0[0] * (1 - f01) + c1[0] * f01,
c0[1] * (1 - f01) + c1[1] * f01,
c0[2] * (1 - f01) + c1[2] * f01,
)
c02 = (
c0[0] * (1 - f02) + c2[0] * f02,
c0[1] * (1 - f02) + c2[1] * f02,
c0[2] * (1 - f02) + c2[2] * f02,
)
# Create new triangles that are inside the bounds
if num_below == 1:
# Split into two new triangles
new_triangles.append((c1, c01, c2))
new_triangles.append((c2, c01, c02))
else:
new_triangles.append((c0, c01, c02))
elif num_below == 3:
# Cell completely outside the bounds
continue
triangles = new_triangles
# Check upper limit
new_triangles = []
for cell_coords in triangles:
above = [1 if c[d] > lim[1] + eps else 0 for c in cell_coords]
num_above = sum(above)
if num_above == 0:
# Cell completely inside the bounds
new_triangles.append(cell_coords)
continue
elif num_above in (1, 2):
# Cell partially inside the bounds
if num_above == 1:
# Get the index of the one vertex that is above lim
i = 0 if above[0] == 1 else (1 if above[1] == 1 else 2)
else:
# Get the index of the one vertex that is not above lim
i = 0 if above[0] == 0 else (1 if above[1] == 0 else 2)
# Coordinates of the vertices
c0 = cell_coords[(i + 0) % 3]
c1 = cell_coords[(i + 1) % 3]
c2 = cell_coords[(i + 2) % 3]
# Coordinates of the crossing points
f01 = (lim[1] - c0[d]) / (c1[d] - c0[d])
f02 = (lim[1] - c0[d]) / (c2[d] - c0[d])
c01 = (
c0[0] * (1 - f01) + c1[0] * f01,
c0[1] * (1 - f01) + c1[1] * f01,
c0[2] * (1 - f01) + c1[2] * f01,
)
c02 = (
c0[0] * (1 - f02) + c2[0] * f02,
c0[1] * (1 - f02) + c2[1] * f02,
c0[2] * (1 - f02) + c2[2] * f02,
)
# Create new triangles that are inside the bounds
if num_above == 1:
# Split into two new triangles
new_triangles.append((c1, c01, c2))
new_triangles.append((c2, c01, c02))
else:
new_triangles.append((c0, c01, c02))
elif num_above == 3:
# Cell completely outside the bounds
continue
triangles = new_triangles
def has_area(cell_coords):
c0, c1, c2 = cell_coords
u = (c1[0] - c0[0], c1[1] - c0[1], c1[2] - c0[2])
v = (c2[0] - c0[0], c2[1] - c0[1], c2[2] - c0[2])
# Cross product to find area of trapezoid, squared
areaish = (
(u[1] * v[2] - u[2] * v[1]) ** 2
+ (u[2] * v[0] - u[0] * v[2]) ** 2
+ (u[0] * v[1] - u[1] * v[0]) ** 2
)
return areaish > eps ** 2
return [cell_coords for cell_coords in triangles if has_area(cell_coords)]
def split_cells(unsplit):
"""
Split non-triangles into triangles
"""
results = OrderedDict()
for cell_id, cell_coords in unsplit.items():
N = len(cell_coords)
if N == 4:
results[cell_id] = [cell_coords[1:], cell_coords[:-1]]
elif N == 3:
results[cell_id] = [cell_coords]
else:
raise NotImplementedError('Expected elements with 3 or 4 ' 'vertices, not %d' % N)
return results
def get_points_in_plane(pt, n, mesh, eps=1e-8):
"""
Returns a dictionary of cell_id -> list of three/four coordinate (x, y, z)
tuples for any cell that has three points on a mesh edge that crosses the
given plane. For edges coincident with the plane only the two end points
are returned
* pt, n: needed input to get_plane_coefficients()
* mesh: only the local part of the mesh is explored, no MPI communication
* eps: distance to be considered in the plane or not
"""
assert mesh.geometry().dim() == 3, 'Can only find planes in 3D meshes'
conn_C_V = mesh.topology()(3, 0)
coords = mesh.coordinates()
plane_coefficients = get_plane_coefficients(pt, n)
all_sides = get_point_sides(plane_coefficients, coords)
def sign(x):
return -1 if x < 0 else 1
n = plane_coefficients[:3] # normal to the plane
results = OrderedDict()
for cell in dolfin.cells(mesh, 'regular'):
cid = cell.index()
verts = conn_C_V(cid)
sides = [all_sides[v] for v in verts]
cell_points = []
# Check for points on plane
on_plane = [abs(s) < eps for s in sides]
for i, v in enumerate(verts):
if on_plane[i]:
pos = coords[v]
cell_points.append((pos[0], pos[1], pos[2]))
for v0, v1 in [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]:
if on_plane[v0] or on_plane[v1]:
continue
elif sign(sides[v0]) == sign(sides[v1]):
continue
# Points v0 and v1 are on different sides of the plane
# Get the mesh-local vertex numbers in ascending order
vid0, vid1 = verts[v0], verts[v1]
if vid1 < vid0:
vid1, vid0 = vid0, vid1
# Get the crossing point
c0 = coords[vid0]
c1 = coords[vid1]
u = c1 - c0
f = numpy.dot(pt - c0, n) /
|
numpy.dot(u, n)
|
numpy.dot
|
import numpy as np
from identpy.objects import Model
class Pendulum(Model):
name = 'Simple Pendulum Model'
parameters = {
'b': 'Air friction',
'l': 'Wire length',
'g': 'Gravity acceleration',
'm': 'Mass',
}
inputs = {
'F': 'External force',
}
outputs = {
'θ': 'Angle from vertical',
'ω': 'Angular velocity',
}
states = {
'θ': 'Angle from vertical',
'ω': 'Angular velocity',
}
def __init__(self, x_0=0, u_0=0, u=0, method=None):
super().__init__(x_0, u_0, u, method)
def f(self, x=None, u=None, factor=0):
x, u = super().f(x, u)
f1 = x[1]
f2 = -self.p[2]*
|
np.cos(self.x_0[0])
|
numpy.cos
|
from classes.dataframes import *
import numpy as np
from classes.utility import *
from classes.order import *
from classes.warehouses import *
import pandas as pd
class Drone(): # inherit #product #warehouse #order (#utility)
def __init__(self, num, weight_prod_types):
self.num = num
# self.prod_types = np.array([], dtype = np.int32)
# self.prod_amounts = np.array([], dtype = np.int32)
self.pld_weight = 0
self.cur_pos = [0, 0]
self.turns = 0 # ?
self.actions = [] # ?
self.state = 'W'
self.weights = np.array(weight_prod_types)
# self.df = pd.DataFrame({'Amounts': self.prod_amounts},index = self.prod_types)
self.orders = []
self.amounts = np.zeros(400)
self.types = np.arange(400)
# Df for use throughout
# self.Data = Dataframes()
# self.Util = Utility()
self.remainder = 0
def __repr__(self):
return '(num: ' + str(self.num) + ', ' + 'types: ' + str(self.types[self.amounts>0]) + ', ' + 'amounts: ' + str(self.amounts[self.amounts>0]) + ')'
def load(self, prod_types, prod_qnty, wrhs):
self.state = 'L'
# new_df = pd.DataFrame({'Amounts':prod_qnty}, index = prod_types)
# already_existing_types = self.df[self.df.index.isin(new_df.index)].index.values
# self.df.loc[already_existing_types, 'Amounts'] += new_df.loc[already_existing_types, 'Amounts']
# self.df = pd.concat([self.df, new_df[~new_df.index.isin(self.df.index)]])
# self.prod_types = self.df.index.values
# self.prod_amounts = self.df['Amounts'].values
self.amounts[prod_types] += 1
self.compute_weight()
# wrhs.remove_product(prod_types, prod_qnty)
# pactions.append([0 L 1 2 3 ]) ?order_number
message = []
for i in range(prod_types.shape[0]):
# print(f'{self.num} {self.state} {wrhs.num} {prod_qnty[i]} {prod_types[i]}')
message.append(f'{self.num} {self.state} {wrhs.num} {prod_qnty[i]} {prod_types[i]}')
self.turns += prod_types.shape[0]
return message
# TODO update payload mass
def unload(self, prod_types, qnty): # TODO
# self.prod_types.remove(prod_types)
# self.prod_amounts.remove(qnty)
# actions.append([0 U 1 2 3 ])?order_number
self.turns += 1
def compute_weight(self):
return self.weights[self.types]*self.amounts[self.types]
# return self.weights[self.prod_types]*self.prod_amounts
# tot_weight = 0
# for i in range(len(self.prod_types)):
# unit_weight = self.weights[self.prod_types[i]]
# tot_weight += unit_weight * self.prod_amounts[i]
# self.pld_weight = tot_weight
def deliver(self, prod_types, prod_qnty, order, orders): # to the order
self.state = 'D'
# pactions.append([0 D 1 2 3 ]) ?order_number
# print(prod_types.shape[0])
message = []
for i in range(prod_types.shape[0]):
# print(f'{self.num} {self.state} {order.num} {prod_qnty[i]} {prod_types[i]}')
message.append(f'{self.num} {self.state} {order.num} {prod_qnty[i]} {prod_types[i]}')
self.turns += prod_types.shape[0]
self.amounts[prod_types] -= prod_qnty
self.compute_weight()
return message
# self.df.loc[prod_types, 'Amounts'] -= prod_qnty
# self.df = self.df[self.df['Amounts']>0]
# self.prod_types = self.df.index.values
# self.prod_amounts = self.df['Amounts'].values
# order.remove_prod(prod_types, prod_qnty)
# order.assigned -=1
# self.compute_weight()
# order.check_completed(self.turns, orders)
# self.prod_types.remove(prod_type)
# self.prod_amounts.remove(qnty)
# # actions.append([0 D 1 2 3 ])?order_number
# self.turns += 1
def wait(self, n_turns):
self.state = 'W'
self.turns += n_turns
# print(f'{self.num} {self.state} {n_turns}')
def get_cur_pos(self):
return self.cur_pos
def update_cur_pos(self, new_pos):
self.turns += np.int(np.ceil(dist(self.cur_pos, new_pos)))
self.cur_pos = new_pos # define in utility class
# account for distance in the count of turns for delivery
def find_nearest_wh(self, warehouses):
wh = np.array(warehouses.positions, dtype=np.float64)
# wh[warehouses.check_empty()|(warehouses.all == 0)] = np.inf
# if np.min(wh) == np.inf:
# wh = np.array(warehouses.positions, dtype=np.float64)
wh[(warehouses.check_empty())|warehouses.not_avail] = np.inf
d = dist(self.cur_pos, wh)
return warehouses.dict[np.argmin(d)]
def check_pld_weight(self):
return self.pld_weight <= 200
#filter the product types which ae available at the warehouse
def select_avail_types(self, wrhs, order):
avail_types = order.prod_types[order.check_avail_types(wrhs)]
# avail_types = order.types[]
return avail_types
#selects the minimum quantity between the that available in the warehouse and the one required in order
def select_avail_quantities(self, avail_types, order, wrhs):
# wrhs_qnty = wrhs.prod_amounts.loc[avail_types, 'Amounts'].values
wrhs_qnty = wrhs.amounts[avail_types]
# order_qnty = order.df.loc[avail_types, 'Amounts'].values
order_qnty = order.amounts[avail_types]
# assert np.all(order_qnty == order_qnty_)
selected_qnty = np.column_stack((wrhs_qnty, order_qnty)).min(1)
assert np.all(selected_qnty == np.min((wrhs_qnty, order_qnty), 0))
# print(f'wrhs q: {wrhs_qnty}, order q: {order_qnty}, selected: {selected_qnty}, avail_types: {avail_types}')
return selected_qnty
def find_nearest_order(self, orders, warehouses, wrhs): # dictionary of orders (class Orders)
if self.orders != []:
last_order = self.orders[-1]
if last_order.amount>0 and np.any(last_order.check_avail_types(wrhs)):
return self.orders[-1], 'last' #54229
order_pos = orders.positions.astype(np.float64)
c = orders.completed
check_avail = warehouses.all_avail_orders[wrhs.num]
# if check_avail.sum() == 0:
# check_avail = warehouses.any_avail_orders[wrhs.num]
if c.sum() == 1250:
return 'All orders are completed', 'completed'
order_pos[(c)|(~check_avail)] = np.inf
# print(f'avail_orders: {np.sum(order_pos==np.inf)}')
d = dist(self.cur_pos, order_pos)
if np.min(d) == np.inf:
order_pos = orders.positions.astype(np.float64)
check_avail = warehouses.any_avail_orders[wrhs.num]
order_pos[(c)|(~check_avail)] = np.inf
d = dist(self.cur_pos, order_pos)
if np.min(d) == np.inf:
assert order_pos.min() == np.inf
wrhs.update_not_avail(warehouses)
nearest_order = orders.dict[np.argmin(d)]
return nearest_order, 0
nearest_order = orders.dict[np.argmin(d)]
# print((order_pos<np.inf).sum())
return nearest_order, 1 #53990
def assign_order(self, order, wrhs, warehouses, orders):
self.orders.append(order)
# order.assigned += 1
avail_types = self.select_avail_types(wrhs, order)
# avail_types = wrhs.select_avail_types(order.prod_types)
avail_qnty = self.select_avail_quantities(avail_types, order, wrhs)
# avail_qnty = wrhs.select_avail_quantities(avail_types, order.df.loc[avail_types, 'Amounts'])
# If all order fits in the drone
if np.sum(self.weights[avail_types]*avail_qnty)<=200: # the drone can leave the warehouse with spare space
new_types = avail_types
new_qnty = avail_qnty
payload = self.weights[new_types].sum()
# wrhs.remove_product(new_types, new_qnty, warehouses)
# self.load(new_types, new_qnty, wrhs)
#if order is heavier than drone max payload mass
# why would it look for another nearest warehouse?
else:
# types = np.repeat(avail_types, avail_qnty)
types = np.repeat(avail_types, avail_qnty)
weights = self.weights[types]
# print(weights)
# repeated_matrix = np.column_stack((types, weights))
# rep_mat_sorted = repeated_matrix[repeated_matrix[:,1].argsort()]
types_sorted = types[weights.argsort()][::-1]
# x = np.median(weights)
# heaviest = rep_mat_sorted[rep_mat_sorted[:,1]>x]
# lightest_reverted = rep_mat_sorted[rep_mat_sorted[:,1]<=x][::-1]
# if len(heaviest)==0:
# new_sorted_matrix = lightest_reverted
# else:
# new_sorted_matrix = np.vstack([heaviest[0], lightest_reverted, heaviest[1:]])
# mask_le200 = new_sorted_matrix[:,1].cumsum() <= 200
# new_sorted_matrix = rep_mat_sorted
# new_sorted_matrix = rep_mat_sorted[::-1]
# mask_le_200 = new_sorted_matrix[:,1].cumsum() <= 200
weights.sort()
mask_le_200 = weights.cumsum() <= 200
# mask_le_200 = self.weights[types_sorted].cumsum() <= 200
new_types_repeated = types_sorted[mask_le_200]
# types_nono = np.repeat(avail_types_nono, avail_qnty_nono)
# weights_nono = self.weights[types_nono]
# sorted_types_nono = types_nono[weights_nono.argsort()]
# weights_nono.sort()
# mask_le_rem = weights_nono.cumsum() <= remainder
# new_types_repeated = rep_mat_sorted[mask_le200][:,0]
# new_types_repeated = repeated_matrix[repeated_matrix[repeated_matrix[:,1].argsort()][:,1].cumsum()<=200][:,0]
new_types, new_qnty = np.unique(new_types_repeated, return_counts=True)
# new_weigths_repeated = self.weights[new_types_repeated]
payload = self.weights[new_types_repeated].sum()
wrhs.remove_product(new_types, new_qnty, warehouses)
loading_message = self.load(new_types, new_qnty, wrhs)
# wrhs.update_availability(warehouses, orders)
remainder = 200 - payload
self.remainder = remainder
loading_message_nono = []
result_nono = []
if remainder>0:
order_neighbors = orders.neighbors[order.num]
if
|
np.any(order_neighbors)
|
numpy.any
|
from PIL import Image
import numpy as np
import builders.transform_builder as transform_builder
from augmentations import normalize
from augmentations import Normalize
import imgaug as ia
from datasets.utils import cv2_loader
from utils import visualize, to_cv
from datasets.pose_dataset import PoseDataset
from datasets.mpii import make_dataset as make_mpii
import cv2
from torchvision import transforms
from augmentations import ToTensor
from datasets.utils import pil_loader, pil_2_cv_loader
np.set_printoptions(precision=3)
def make_random_2d_keypoints(num, img_shape):
img_shape = np.asarray(img_shape)
print(img_shape)
keypoints = np.random.rand(num, 2)
for keypoint in keypoints:
factor = np.random.rand(2) * img_shape
keypoint *= factor
return keypoints
def make_edge_case_keypoints(shape):
return np.asarray([[0, 0], [shape[0], shape[1]]])
source_file = '/work/pfeiffer/datasets/mpii/mpii_human_pose_v1_u12_1.mat'
data_dir = '/work/pfeiffer/datasets/mpii/'
def test_random_crop_transform():
transform_cfg = {
"RandomHorizontalFlipWithPairs": {'p': 0.5},
"RandomCrop": {
"width": 256,
"height": 384,
"scale": 1.125
},
"debug": True
}
data, header, dataset_info = make_mpii(source_file, data_dir)
joint_info = dataset_info['joint_info']
transform = transform_builder.build(transform_cfg)
pose_dataset = PoseDataset(data, header, {}, cv2_loader, 'mpii', transform=transform)
for _ in range(4):
idx = np.random.randint(0, len(pose_dataset))
data = pose_dataset[idx]
keypoints = data['coords']
image = data['img']
visualize(image, keypoints, joint_info.stick_figure_edges)
# (batch, height, width, channels)
# This is just imgaug augmentation
# pytorch will convert again
assert image.shape == (384, 256, 3)
assert not np.any(keypoints > 384*1.125)
def test_keypoints_normalize():
shape = (640, 360)
keypoints = make_random_2d_keypoints(10, shape)
edge_cases = make_edge_case_keypoints(shape)
keypoints = np.concatenate([keypoints, edge_cases])
keypoints = ia.KeypointsOnImage.from_coords_array(keypoints, shape)
norm_transform = Normalize(width=640, height=360)
keypoints_aug = norm_transform.augment_keypoints([keypoints])
keypoints_aug = keypoints_aug[0].get_coords_array()
assert not np.any(keypoints_aug > 1.0)
assert not np.any(keypoints_aug < -1.0)
def test_keypoints_normalize_with_scale():
transform_cfg = {
"resize": {
"width": 256,
"height": 384
}
}
shape = (640, 360)
normalize = Normalize(0, 0, height=384, width=256)
transform = transform_builder.build(transform_cfg)
transform.append(normalize)
# points are x,y
keypoints = make_random_2d_keypoints(10, shape)
edge_cases = make_edge_case_keypoints(shape)
keypoints = np.concatenate([keypoints, edge_cases])
# shape is expected height, width
keypoints = ia.KeypointsOnImage.from_coords_array(keypoints, (360, 640))
keypoints_aug = transform.augment_keypoints([keypoints])
keypoints_aug = keypoints_aug[0].get_coords_array()
assert not np.any(keypoints_aug > 1.0)
assert not
|
np.any(keypoints_aug < -1.0)
|
numpy.any
|
"""
output.py
Output metadata and I/O handling.
"""
import uuid
import datetime
from .constants import FREQUENCY_INTERVALS, SEA_STATE_INTERVALS
from .operators import get_proc_version
import numpy as np
import netCDF4
# times are measured in milliseconds since this date
TIME_ORIGIN = '1980-01-01'
# fill values to use
FILL_VALUE_NUMBER = -9999
FILL_VALUE_STR = 'MISSING'
# chunk sizes to use for each dimension
CHUNKSIZES = {
'meta_station_name': 1,
'wave_id_local': 10_000,
'meta_frequency_band': len(FREQUENCY_INTERVALS),
}
DATASET_VARIABLES = dict(
# metadata
meta_source_file_name=dict(
dims=('wave_id_local',),
dtype=str,
attrs=dict(
long_name='File name of raw input data file',
)
),
meta_source_file_uuid=dict(
dims=('wave_id_local',),
dtype=str,
attrs=dict(
long_name='UUID of raw input data file',
)
),
# wave data
wave_start_time=dict(
dims=('wave_id_local',),
dtype='int64',
attrs=dict(
long_name='Wave start time',
units=f'milliseconds since {TIME_ORIGIN}',
),
),
wave_end_time=dict(
dims=('wave_id_local',),
dtype='int64',
attrs=dict(
long_name='Wave end time',
units=f'milliseconds since {TIME_ORIGIN}',
),
),
wave_zero_crossing_period=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Wave zero-crossing period relative to 30m sea surface elevation',
units='seconds',
comment='Zero-crossings determined through linear interpolation',
)
),
wave_zero_crossing_wavelength=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Wave zero-crossing wavelength relative to 30m sea surface elevation',
units='meters',
)
),
wave_raw_elevation=dict(
dims=('wave_id_local',),
dtype='vlen',
attrs=dict(
long_name='Raw surface elevation relative to 30m sea surface elevation',
units='meters',
comment='Spacing in time as given by meta_sampling_rate',
)
),
wave_crest_height=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Wave crest height relative to 30m sea surface elevation',
units='meters',
)
),
wave_trough_depth=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Wave trough depth relative to 30m sea surface elevation',
units='meters',
)
),
wave_height=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Absolute wave height relative to 30m sea surface elevation',
units='meters',
)
),
wave_ursell_number=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Ursell number',
units='1',
valid_min=0,
)
),
wave_maximum_elevation_slope=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Maximum slope of surface elevation in time',
units='m s-1',
)
),
# station metadata
meta_deploy_latitude=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Deploy latitude of instrument',
units='degrees_north',
)
),
meta_deploy_longitude=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Deploy longitude of instrument',
units='degrees_east',
)
),
meta_water_depth=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Water depth at deployment location',
units='meters',
positive='down',
)
),
meta_sampling_rate=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Measurement sampling frequency in time',
units='hertz',
)
),
)
# sea state parameter metadata
for interval in SEA_STATE_INTERVALS:
if not isinstance(interval, str):
interval = f'{interval}m'
if interval == 'dynamic':
DATASET_VARIABLES.update({
'sea_state_dynamic_window_length': dict(
dims=('wave_id_local',),
dtype='int64',
attrs=dict(
long_name='Length of dynamically computed sea state window',
units='minutes',
)
)
})
DATASET_VARIABLES.update({
f'sea_state_{interval}_start_time': dict(
dims=('wave_id_local',),
dtype='int64',
attrs=dict(
long_name='Sea state aggregation start time',
units=f'milliseconds since {TIME_ORIGIN}',
)
),
f'sea_state_{interval}_end_time': dict(
dims=('wave_id_local',),
dtype='int64',
attrs=dict(
long_name='Sea state aggregation end time',
units=f'milliseconds since {TIME_ORIGIN}',
)
),
f'sea_state_{interval}_significant_wave_height_spectral': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Significant wave height estimated from wave spectrum (Hm0)',
units='meters',
)
),
f'sea_state_{interval}_significant_wave_height_direct': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Significant wave height estimated from wave history (H1/3)',
units='meters',
)
),
f'sea_state_{interval}_maximum_wave_height': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Maximum wave height estimated from wave history',
units='meters',
)
),
f'sea_state_{interval}_rel_maximum_wave_height': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name=(
'Maximum wave height estimated from wave history '
'relative to spectral significant wave height'
),
units='1',
)
),
f'sea_state_{interval}_mean_period_direct': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Mean zero-crossing period estimated from wave history',
units='seconds',
)
),
f'sea_state_{interval}_mean_period_spectral': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Mean zero-crossing period estimated from wave spectrum',
units='seconds',
)
),
f'sea_state_{interval}_skewness': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Skewness of sea surface elevation',
units='1',
)
),
f'sea_state_{interval}_kurtosis': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Excess kurtosis of sea surface elevation',
units='1',
)
),
f'sea_state_{interval}_valid_data_ratio': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Ratio of valid measurements to all measurements',
valid_min=0,
valid_max=1,
units='1',
)
),
f'sea_state_{interval}_peak_wave_period': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Dominant wave period',
units='seconds',
)
),
f'sea_state_{interval}_peak_wavelength': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Dominant wavelength',
units='meters',
)
),
f'sea_state_{interval}_steepness': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Dominant wave steepness',
units='1',
)
),
f'sea_state_{interval}_bandwidth_peakedness': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name=(
'Spectral bandwidth estimated through spectral peakedness '
'(quality factor)',
),
units='1',
)
),
f'sea_state_{interval}_bandwidth_narrowness': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Spectral bandwidth estimated through spectral narrowness',
units='1',
)
),
f'sea_state_{interval}_benjamin_feir_index_peakedness': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Benjamin-Feir index estimated through steepness and peakedness',
units='1',
)
),
f'sea_state_{interval}_benjamin_feir_index_narrowness': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Benjamin-Feir index estimated through steepness and narrowness',
units='1',
)
),
f'sea_state_{interval}_crest_trough_correlation': dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Crest-trough correlation parameter (r) estimated from spectral density',
units='1',
valid_min=0,
valid_max=1,
)
),
f'sea_state_{interval}_energy_in_frequency_interval': dict(
dims=('wave_id_local', 'meta_frequency_band'),
dtype='float32',
attrs=dict(
long_name='Total energy density contained in frequency band',
units='J m-2',
)
),
f'sea_state_{interval}_rel_energy_in_frequency_interval': dict(
dims=('wave_id_local', 'meta_frequency_band'),
dtype='float32',
attrs=dict(
long_name='Relative energy contained in frequency band',
units='1',
valid_min=0,
valid_max=1,
)
),
})
# directional parameter metadata
DIRECTIONAL_VARIABLES = dict(
direction_sampling_time=dict(
dims=('wave_id_local',),
dtype='int64',
attrs=dict(
long_name='Time at which directional quantities are sampled',
units=f'milliseconds since {TIME_ORIGIN}',
),
),
direction_dominant_spread_in_frequency_interval=dict(
dims=('wave_id_local', 'meta_frequency_band'),
dtype='float32',
attrs=dict(
long_name='Dominant directional spread in frequency band',
units='degrees',
valid_min=0,
valid_max=90,
)
),
direction_dominant_direction_in_frequency_interval=dict(
dims=('wave_id_local', 'meta_frequency_band'),
dtype='float32',
attrs=dict(
long_name='Dominant wave direction in frequency band',
units='degrees',
valid_min=0,
valid_max=360,
)
),
direction_peak_wave_direction=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name='Peak wave direction relative to normal-north',
units='degrees',
valid_min=0,
valid_max=360
)
),
direction_directionality_index=dict(
dims=('wave_id_local',),
dtype='float32',
attrs=dict(
long_name=(
'Directionality index R (squared ratio of directional spread and '
'spectral bandwidth)'
),
units='1',
valid_min=0,
)
)
)
freq_lower, freq_upper = list(zip(*FREQUENCY_INTERVALS))
# additional output variables that are constant across stations
EXTRA_VARIABLES = dict(
meta_frequency_band_lower=dict(
data=np.array(freq_lower, dtype='float32'),
dims=('meta_frequency_band',),
attrs=dict(
long_name='Lower limit of frequency band',
units='hertz',
),
),
meta_frequency_band_upper=dict(
data=np.array(freq_upper, dtype='float32'),
dims=('meta_frequency_band',),
attrs=dict(
long_name='Upper limit of frequency band',
units='hertz',
),
),
)
# attributes added to coordinate variables
COORD_ATTRS = dict(
meta_station_name=dict(
long_name='Name of original measurement station',
),
wave_id_local=dict(
long_name='Incrementing wave ID for given station',
comment=(
'This ID is not guaranteed to denote the same wave between data versions.'
),
),
meta_frequency_band=dict(
long_name='Index of frequency band',
comment=(
'Frequency ranges are given by '
'(meta_frequency_band_lower, meta_frequency_band_upper)'
),
),
)
def get_dataset_metadata(station_name, start_time, end_time, extra_metadata=None):
"""Get all metadata attributes related to the whole dataset."""
dataset_metadata = dict(
id=f'FOWD_{station_name}',
title=f'Free Ocean Wave Dataset (FOWD), station {station_name}',
summary=(
'A catalogue of ocean waves and associated sea states, derived from in-situ '
'measurement data.'
),
project='Free Ocean Wave Dataset (FOWD)',
keywords=(
'EARTH SCIENCE, OCEANS, OCEAN WAVES, GRAVITY WAVES, WIND WAVES, '
'SIGNIFICANT WAVE HEIGHT, WAVE FREQUENCY, WAVE PERIOD, WAVE SPECTRA'
),
processing_version=get_proc_version(),
processing_url='https://github.com/dionhaefner/FOWD',
date_created=f'{datetime.datetime.utcnow():%Y-%m-%dT%H:%M:%S.%f}',
uuid=str(uuid.uuid4()),
creator_name='<NAME>',
creator_url='https://www.nbi.ku.dk/english/research/pice/oceanography/',
creator_email='<EMAIL>',
institution='Niels Bohr Institute, University of Copenhagen',
geospatial_lat_units='degrees_north',
geospatial_lat_resolution=1e-5,
geospatial_lon_units='degrees_east',
geospatial_lon_resolution=1e-5,
geospatial_vertical_units='meters',
geospatial_vertical_origin='sea surface height',
geospatial_vertical_positive='up',
time_coverage_start=str(start_time),
time_coverage_end=str(end_time),
source='insitu observations',
license='These data may be redistributed and used without restriction.',
)
if extra_metadata is not None:
for key, val in extra_metadata.items():
dataset_metadata[key] = ''.join([dataset_metadata.get(key, ''), str(val)])
return dataset_metadata
def write_records(wave_record_iterator, filename, station_name, extra_metadata=None,
include_direction=False):
"""Write given wave records in FOWD's netCDF4 output format.
First argument is an iterable of chunks of wave records.
"""
dimension_data = (
# (name, dtype, data)
('meta_station_name', str, np.array([np.string_(station_name)])),
('wave_id_local', 'int64', None),
('meta_frequency_band', 'uint8', np.arange(len(FREQUENCY_INTERVALS))),
)
variables = DATASET_VARIABLES
if include_direction:
variables.update(DIRECTIONAL_VARIABLES)
with netCDF4.Dataset(filename, 'w') as f:
# create variable length dtype
vlen_type = f.createVLType('float32', 'float_array')
# create dimensions
for dim, dtype, val in dimension_data:
if val is None:
f.createDimension(dim, None)
else:
f.createDimension(dim, len(val))
extra_args = dict(
zlib=True,
fletcher32=True,
chunksizes=[CHUNKSIZES[dim]]
)
v = f.createVariable(dim, dtype, (dim,), **extra_args)
if val is not None:
v[:] = val
for name, meta in variables.items():
# add meta_station_name as additional scalar dimension
dims = ('meta_station_name',) + meta['dims']
extra_args = dict(
zlib=True,
fletcher32=True,
chunksizes=[CHUNKSIZES[dim] for dim in dims]
)
# determine dtype
if meta['dtype'] == 'vlen':
dtype = vlen_type
else:
dtype = meta['dtype']
# add correct fill value
is_number = np.issubdtype(dtype, np.floating) or
|
np.issubdtype(dtype, np.integer)
|
numpy.issubdtype
|
# general
import logging
import json
import os
import random
import math
from collections import defaultdict, Counter
import glob
import shutil, io, base64
from collections import OrderedDict
# general package
from natsort import natsorted
from numpy.lib.recfunctions import _repack_fields_dispatcher
import pandas as pd
import numpy
import numpy as np
import regex as re
import h5py
# image
import skimage
from skimage import measure as sk_measure
from adjustText import adjust_text
# processing
import ctypes
import subprocess
import dill as pickle
#vis
import dabest
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#methods
import umap
import hdbscan
import diffxpy.api as de
import anndata
from scipy import ndimage, stats
from scipy.spatial.distance import squareform, pdist
import scipy.cluster as spc
from scipy.cluster.vq import kmeans2
from sklearn import cluster, decomposition
#from fcmeans import FCM
from .imzml import IMZMLExtract
from .regions import SpectraRegion, RegionClusterer
#web/html
import jinja2
# applications
import progressbar
def makeProgressBar():
return progressbar.ProgressBar(widgets=[
progressbar.Bar(), ' ', progressbar.Percentage(), ' ', progressbar.AdaptiveETA()
])
import abc
import networkx as nx
class RegionModel:
def __init__(self, no_relation_weight=0, bi_directional=True) -> None:
self.no_relation_weight = no_relation_weight
self.bi_directional = bi_directional
self.relations = nx.DiGraph()
def from_image(self, filepath, mapping=None, diagonal=True):
regImg =np.load(filepath)
if mapping is None:
mapping = {x:x for x in np.unique(regImg)} #id
if not set(np.unique(regImg)).issubset([x for x in mapping]):
raise ValueError
adjacencyCounter = Counter()
for i in range(0, regImg.shape[0]):
for j in range(0, regImg.shape[1]):
curFieldRegion = regImg[i,j]
otherRegions = []
#right
if i+1 < regImg.shape[0]:
otherRegions.append(regImg[i+1, j])
#bottom
if j+1 < regImg.shape[1]:
otherRegions.append(regImg[i, j+1])
if diagonal and i+1 < regImg.shape[0] and j+1 < regImg.shape[1]:
#diagonal
otherRegions.append(regImg[i+1, j+1])
for oRegion in otherRegions:
adjacencyCounter[ (mapping[curFieldRegion], mapping[oRegion]) ] += 1
adjacencyCounter[ (mapping[oRegion],mapping[curFieldRegion]) ] += 1
for interaction in adjacencyCounter:
self.add_relation(interaction[0], interaction[1], weight=1)
def add_relation(self, src, tgt, weight=1.0):
self.relations.add_edge(src, tgt, weight=weight)
if self.bi_directional:
self.relations.add_edge(tgt, src, weight=weight)
def get_score(self, src, tgt):
if (src, tgt) in self.relations.edges:
return self.relations.edges[(src, tgt)]["weight"]
return self.no_relation_weight
def plot_model(self):
plt.figure()
labels = {n: "{} ({})".format(n, self.relations.nodes[n]['weight']) for n in self.relations.nodes}
colors = [self.relations.nodes[n]['weight'] for n in self.relations.nodes]
edgeColors = [self.relations.edges[n]['weight'] for n in self.relations.edges]
nx.draw(self.relations, with_labels=True, labels=labels, node_color=colors, edge_colors=edgeColors)
plt.show()
plt.close()
class RegionEmbedding(metaclass=abc.ABCMeta):
def __init__(self, region:SpectraRegion) -> None:
self.region = region
self.embedded_matrix = None
self.logger = None
self.__set_logger()
def __set_logger(self):
self.logger = logging.getLogger(self.methodname())
self.logger.setLevel(logging.INFO)
if not self.logger.hasHandlers():
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
self.logger.addHandler(consoleHandler)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
consoleHandler.setFormatter(formatter)
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'fit_transform') and callable(subclass.fit_transform) and
hasattr(subclass, 'embedding') and callable(subclass.embedding) and
hasattr(subclass, 'region')
)
def methodname(self):
"""Brief description of the specific clusterer
"""
return self.__class__.__name__
@abc.abstractmethod
def embedding(self) -> numpy.array:
"""Returns the final embedding for given region
Raises:
NotImplementedError: [description]
Returns:
np.array: embedding
"""
raise NotImplementedError
@abc.abstractmethod
def fit_transform(self, verbose:bool=False) -> numpy.array:
"""
Returns the final embedding
Args:
num_target_clusters (int): number of target clusters
verbose (bool, optional): Verbose output. Defaults to False.
Raises:
NotImplementedError: (abstract class)
Returns:
np.array: segmentation
"""
raise NotImplementedError
class PCAEmbedding(RegionEmbedding):
def __init__(self, region: SpectraRegion, dimensions: int=2) -> None:
super().__init__(region)
self.dimensions = dimensions
self.idx2coord = None
self.embedding_object = None
def fit_transform(self, verbose: bool = False) -> numpy.array:
elem_matrix, self.idx2coord = self.region.prepare_elem_matrix()
#np-array dims (n_samples, n_features)
self.logger.info("PCA reduction")
self.embedding_object = decomposition.PCA(
n_components=self.dimensions,
random_state=42,
)
self.logger.info("PCA fit+transform")
self.embedded_matrix = self.embedding_object.fit_transform(elem_matrix)
def embedding(self) -> numpy.array:
outArray = np.zeros((self.region.region_array.shape[0], self.region.region_array.shape[1], self.dimensions))
for idx in self.idx2coord:
(x,y) = self.idx2coord[idx]
outArray[x,y,:] = self.embedded_matrix[idx]
return outArray
def covariances(self):
return self.embedding_object.get_covariance()
def loading(self):
# according to https://scentellegher.github.io/machine-learning/2020/01/27/pca-loadings-sklearn.html
computedPCs = ["PC{}".format(x) for x in range(1, self.dimensions+1)] # 1-based
loadings = pd.DataFrame(self.embedding_object.components_.T, columns=computedPCs, index=self.region.idx2mass)
return loadings
def explained_variance_ratio(self):
return self.embedding_object.explained_variance_ratio_
def plot_embedding(self, colors=None):
dimExplained = self.embedding_object.explained_variance_ratio_
reductionName = "PCA"
plt.figure(figsize=(12, 12))
dotlabels = None
labels=None
if not colors is None:
dotlabels = [0] * self.embedded_matrix.shape[0]
cmap = matplotlib.cm.get_cmap('viridis')
norm = matplotlib.colors.Normalize(vmin=np.min(colors), vmax=np.max(colors))
for idx in self.idx2coord:
(x,y) = self.idx2coord[idx]
dotlabels[idx] = colors[x,y]
dotlabels = np.array(dotlabels)
for g in np.unique(dotlabels):
grpIndices = np.where(dotlabels == g)
labelColor = cmap(norm(g))
plt.scatter(self.embedded_matrix[grpIndices, 0], self.embedded_matrix[grpIndices, 1], color=labelColor, label=g, s=10, cmap='viridis')
else:
scatter=plt.scatter(self.embedded_matrix[:, 0], self.embedded_matrix[:, 1], s=10, cmap='viridis')
plt.xlabel("{} dim1 ({:.2})".format(reductionName, dimExplained[0]))
plt.ylabel("{} dim2 ({:.2})".format(reductionName, dimExplained[1]))
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(bbox_to_anchor=(0, -0.2, 1, 0), loc="upper left", mode="expand", ncol=2)
plt.show()
plt.close()
class UMAPEmbedding(RegionEmbedding):
def __init__(self, region: SpectraRegion, dimensions: int=2) -> None:
super().__init__(region)
self.dimensions = dimensions
self.idx2coord = None
self.embedding_object = None
def fit_transform(self, verbose: bool = False, densmap: bool=False, n_neighbours: int=10, min_dist: float=0) -> numpy.array:
elem_matrix, self.idx2coord = self.region.prepare_elem_matrix()
#np-array dims (n_samples, n_features)
self.logger.info("UMAP reduction")
self.embedding_object = umap.UMAP(
densmap=densmap,
n_neighbors=n_neighbours,
min_dist=min_dist,
n_components=self.dimensions,
random_state=42,
)
self.embedded_matrix = self.embedding_object.fit_transform(elem_matrix)
def embedding(self) -> numpy.array:
outArray = np.zeros((self.region.region_array.shape[0], self.region.region_array.shape[1], self.dimensions))
for idx in self.idx2coord:
(x,y) = self.idx2coord[idx]
outArray[x,y,:] = self.embedded_matrix[idx]
return outArray
class UMAP_WARD_Clusterer(RegionClusterer):
def __init__(self, region: SpectraRegion) -> None:
super().__init__(region)
self.umapEmbedding = UMAPEmbedding(region=region, dimensions=2)
self.pwdist = None
self.dimred_labels = None
self.segmented = None
def fit(self, num_target_clusters: int, densmap: bool = False, n_neighbours: int = 10, min_dist: float = 0, verbose: bool = False):
"""Performs UMAP dimension reduction on region array followed by Euclidean pairwise distance calculation in order to do Ward's linkage.
Args:
num_target_clusters (int): Number of desired clusters.
densmap (bool, optional): Whether to use densMAP (density-preserving visualization tool based on UMAP). Defaults to False. To use densMAP please use UMAP_WARD_Clusterer instead.
n_neighbours (int, optional): The size of the local neighborhood (in terms of number of neighboring sample points) used for manifold approximation. For more information check UMAP documentation. Defaults to 10.
min_dist (float, optional): The min_dist parameter controls how tightly UMAP is allowed to pack points together. For more information check UMAP documentation. Defaults to 0.
verbose (bool, optional): Defaults to False.
"""
self.umapEmbedding.fit_transform(verbose=verbose, densmap=densmap, n_neighbours=n_neighbours, min_dist=min_dist)
dimred_elem_matrix = self.umapEmbedding.embedded_matrix
self.pwdist = pdist(dimred_elem_matrix, metric='euclidean')
_ = self.transform(num_target_clusters=num_target_clusters)
def _update_segmented(self):
image = np.zeros(self.region.region_array.shape, dtype=np.int16)
image = image[:,:,0]
# cluster 0 has special meaning: not assigned !
assert(not 0 in [self.dimred_labels[x] for x in self.dimred_labels])
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
image[i,j] = self.dimred_labels[self.region.pixel2idx[(i,j)]]
self.segmented = image
def transform(self, num_target_clusters: int, verbose: bool = False) -> numpy.array:
"""Allows to redo the WARD's clustering using the reduced data during fit operation.
Args:
num_target_clusters (int): Number of desired clusters.
verbose (bool, optional): Defaults to False.
Returns:
np.array: Segmented array.
"""
Z = spc.hierarchy.ward(self.pwdist)
self.dimred_labels = spc.hierarchy.fcluster(Z, t=num_target_clusters, criterion='maxclust')
self._update_segmented()
return self.segmented
def segmentation(self) -> numpy.array:
return self.segmented
class DENSMAP_WARD_Clusterer(UMAP_WARD_Clusterer):
def __init__(self, region: SpectraRegion) -> None:
super().__init__(region)
def fit(self, num_target_clusters: int, n_neighbours: int = 10, min_dist: float = 0, verbose: bool = True):
"""Uses densMAP (density-preserving visualization tool based on UMAP) dimension reduction on region array followed by Euclidean pairwise distance calculation in order to do Ward's linkage.
Args:
num_target_clusters (int): Number of desired clusters.
n_neighbours (int, optional): The size of the local neighborhood (in terms of number of neighboring sample points) used for manifold approximation. For more information check UMAP documentation. Defaults to 10.
min_dist (float, optional): The min_dist parameter controls how tightly UMAP is allowed to pack points together. For more information check UMAP documentation. Defaults to 0.
verbose (bool, optional): Defaults to False.
"""
return super().fit(num_target_clusters=num_target_clusters, densmap=True, n_neighbours=n_neighbours, min_dist=min_dist, verbose=verbose)
def transform(self, num_target_clusters: int, verbose: bool = False) -> numpy.array:
"""Allows to redo the WARD's clustering using the reduced data during fit operation.
Args:
num_target_clusters (int): Number of desired clusters.
verbose (bool, optional): Defaults to False.
Returns:
np.array: Segmented array.
"""
return super().transform(num_target_clusters, verbose)
def segmentation(self) -> numpy.array:
return super().segmentation()
class UMAP_DBSCAN_Clusterer(RegionClusterer):
def __init__(self, region: SpectraRegion) -> None:
super().__init__(region)
self.umapEmbedding = UMAPEmbedding(region=region, dimensions=2)
self.dimred_labels = None
self.dimred_elem_matrix = None
self.segmented = None
def fit(self, num_target_clusters: int, verbose: bool = False, densmap: bool=False, n_neighbours: int=10, min_dist: float=0, min_cluster_size: int=15, num_samples: int=10000) -> numpy.array:
"""Performs UMAP dimension reduction on region array followed by the HDBSCAN clustering.
Args:
num_target_clusters (int): Number of desired clusters.
verbose (bool, optional): Defaults to False.
densmap (bool, optional): Whether to use densMAP (density-preserving visualization tool based on UMAP). Defaults to False. If you want to apply densMAP please use DENSMAP_DBSCAN_Clusterer instead.
n_neighbours (int, optional): The size of the local neighborhood (in terms of number of neighboring sample points) used for manifold approximation. For more information check UMAP documentation. Defaults to 10.
min_dist (float, optional): The min_dist parameter controls how tightly UMAP is allowed to pack points together. For more information check UMAP documentation. Defaults to 0.
min_cluster_size (int, optional): The minimum size of HDBSCAN clusters. Defaults to 15.
num_samples (int, optional): Number of intensity values that will be used during HDBSCAN clustering. Defaults to 10000.
"""
self.umapEmbedding.fit_transform(verbose=verbose, densmap=densmap, n_neighbours=n_neighbours, min_dist=min_dist)
self.dimred_elem_matrix = self.umapEmbedding.embedded_matrix
_ = self.transform(num_target_clusters=num_target_clusters, min_cluster_size=min_cluster_size, num_samples=num_samples)
def transform(self, num_target_clusters: int, min_cluster_size: int = 15, num_samples: int = 10000, verbose: bool = False) -> numpy.array:
"""Performs HDBSCAN clustering (Hierarchical Density-Based Spatial Clustering of Applications with Noise) on the previously reduced data.
Args:
num_target_clusters (int): Number of desired clusters.
min_cluster_size (int, optional): The minimum size of HDBSCAN clusters. Defaults to 15.
num_samples (int, optional): Number of intensity values that will be used during HDBSCAN clustering. Defaults to 10000.
verbose (bool, optional): Defaults to False.
Returns:
np.array: Segmented array.
"""
self.logger.info("HDBSCAN reduction")
if num_samples > self.dimred_elem_matrix.shape[0]:
num_samples = self.dimred_elem_matrix.shape[0]
self.logger.info("HDBSCAN reduction num_samples reset: {}".format(num_samples))
if num_samples == -1 or self.dimred_elem_matrix.shape[0] < num_samples:
selIndices = [x for x in range(0, self.dimred_elem_matrix.shape[0])]
else:
selIndices = random.sample([x for x in range(0, self.dimred_elem_matrix.shape[0])], num_samples)
dr_matrix = self.dimred_elem_matrix[selIndices, :]
clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, prediction_data=True).fit(dr_matrix)
clusterer.generate_prediction_data()
soft_clusters = hdbscan.prediction.membership_vector(clusterer, self.dimred_elem_matrix)
self.dimred_labels = np.array([
|
np.argmax(x)
|
numpy.argmax
|
from skillmodels import SkillModel
import json
import pandas as pd
import numpy as np
from skillmodels.model_functions.transition_functions import \
no_squares_translog
from numpy.testing import assert_array_almost_equal as aaae
from nose.tools import nottest, assert_almost_equal
with open('skillmodels/tests/estimation/no_squares_translog_model.json') as j:
model_dict = json.load(j)
@nottest
def generate_test_data(nobs, factors, periods, included_positions, meas_names,
initial_mean, initial_cov, intercepts, loadings,
meas_sd, gammas, trans_sd, anch_intercept,
anch_loadings, anch_sd):
# np.random.seed(12345)
np.random.seed(547185)
nfac = len(factors)
initial_factors = np.random.multivariate_normal(
mean=initial_mean, cov=initial_cov, size=(nobs))
factor_data = []
meas_data = []
m_to_factor = [0, 0, 0, 1, 1, 1, 2, 2, 2]
counter = 0
for t in periods:
if t == 0:
new_facs = initial_factors
else:
new_facs = np.zeros((nobs, nfac))
new_facs[:, :nfac - 1] += np.random.normal(
loc=np.zeros(nfac - 1), scale=trans_sd[t - 1],
size=(nobs, nfac - 1))
for f, factor in enumerate(factors):
if f in [0, 1]:
new_facs[:, f] += no_squares_translog(
factor_data[t - 1], gammas[f][t - 1],
included_positions[f])
else:
new_facs[:, f] = factor_data[t - 1][:, f]
factor_data.append(new_facs)
nmeas = 9 if t == 0 else 6
# noise part of measurements
measurements = np.random.normal(
loc=np.zeros(nmeas), scale=meas_sd[counter: counter + nmeas],
size=(nobs, nmeas))
# add structural part of measurements
for m in range(nmeas):
factor_pos = m_to_factor[m]
measurements[:, m] += (new_facs[:, factor_pos] * loadings[counter])
measurements[:, m] += intercepts[counter]
counter += 1
df = pd.DataFrame(data=measurements, columns=meas_names[:nmeas])
if t == periods[-1]:
# add the anchoring outcome to the data
df['anch_out'] = np.dot(new_facs[:, :-1], anch_loadings)
df['anch_out'] += anch_intercept
df['anch_out'] += np.random.normal(loc=0, scale=anch_sd, size=nobs)
df['period'] = t
df['id'] = np.arange(nobs)
meas_data.append(df)
large_df = pd.concat(meas_data, sort=True)
large_df.sort_values(by=['id', 'period'], inplace=True)
# print('true cov matrices for generated factor data')
# for arr in factor_data:
# df = pd.DataFrame(data=arr, columns=factors)
# print(df.cov())
return large_df
class TestOfWAEstimator:
def setup(self):
self.factor_names = ['fac1', 'fac2', 'fac3']
self.nfac = len(self.factor_names)
self.nperiods = 4
self.periods = list(range(self.nperiods))
self.included_positions = [np.arange(3), np.array([1, 2]), []]
self.anch_intercept = 3.0
self.anch_loadings = np.array([1.2, 1.3])
self.meas_names = ['y{}'.format(i + 1) for i in range(9)]
self.true_gammas = [
[[0.725, 0.01, 0.02, 0.0015, 0.0018, 0.0014, 0.5],
[0.750, 0.03, 0.03, 0.0003, 0.0020, 0.0024, 0.6],
[0.775, 0.05, 0.04, 0.0023, 0.0026, 0.0012, 0.7]],
[[.90, 0.01, 0.0004, 0.25],
[.925, 0.04, 0.0014, 0.75],
[.950, 0.07, 0.0002, 1.25]],
np.zeros((3, 0))]
self.true_loadings =
|
np.arange(start=2.5, stop=3.85, step=0.05)
|
numpy.arange
|
from collections import OrderedDict
from functools import partial
from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, LabelCategories, Mask, MaskCategories,
)
from datumaro.components.dataset import Dataset
from datumaro.components.environment import Environment
from datumaro.components.extractor import DatasetItem, Extractor
from datumaro.components.media import Image
from datumaro.plugins.camvid_format import CamvidConverter, CamvidImporter
from datumaro.util.meta_file_util import parse_meta_file
from datumaro.util.test_utils import (
TestDir, check_save_and_load, compare_datasets,
)
import datumaro.plugins.camvid_format as Camvid
from .requirements import Requirements, mark_requirement
class CamvidFormatTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_write_and_parse_labelmap(self):
src_label_map = Camvid.CamvidLabelMap
with TestDir() as test_dir:
file_path = osp.join(test_dir, 'label_colors.txt')
Camvid.write_label_map(file_path, src_label_map)
dst_label_map = Camvid.parse_label_map(file_path)
self.assertEqual(src_label_map, dst_label_map)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_write_and_parse_meta_file(self):
src_label_map = Camvid.CamvidLabelMap
with TestDir() as test_dir:
source_dataset = Dataset.from_iterable([],
categories=Camvid.make_camvid_categories(src_label_map))
CamvidConverter.convert(source_dataset, test_dir,
save_dataset_meta=True)
dst_label_map = parse_meta_file(test_dir)
self.assertEqual(src_label_map, dst_label_map)
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'camvid_dataset')
class TestExtractorBase(Extractor):
def _label(self, camvid_label):
return self.categories()[AnnotationType.label].find(camvid_label)[0]
def categories(self):
return Camvid.make_camvid_categories()
class CamvidImportTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='0001TP_008550', subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 0, 0, 0]]), label=1),
Mask(image=np.array([[0, 0, 1, 0, 0]]), label=18),
Mask(image=np.array([[0, 0, 0, 1, 1]]), label=22),
]
),
DatasetItem(id='0001TP_008580', subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 0, 0, 0]]), label=2),
Mask(image=np.array([[0, 0, 1, 0, 0]]), label=4),
Mask(image=np.array([[0, 0, 0, 1, 1]]), label=27),
]
),
DatasetItem(id='0001TP_006690', subset='train',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 0, 1, 1]]), label=3),
Mask(image=np.array([[0, 0, 1, 0, 0]]), label=18),
]
),
DatasetItem(id='0016E5_07959', subset = 'val',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 1, 0, 0]]), label=1),
Mask(image=np.array([[0, 0, 0, 1, 1]]), label=8),
]
),
], categories=Camvid.make_camvid_categories())
parsed_dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'camvid')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_camvid(self):
detected_formats = Environment().detect_dataset(DUMMY_DATASET_DIR)
self.assertEqual([CamvidImporter.NAME], detected_formats)
class CamvidConverterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def _test_save_and_load(self, source_dataset, converter, test_dir,
target_dataset=None, importer_args=None, **kwargs):
return check_save_and_load(self, source_dataset, converter, test_dir,
importer='camvid',
target_dataset=target_dataset, importer_args=importer_args, **kwargs)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_camvid_segm(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='a/b/1', subset='test',
image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3),
Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CamvidConverter.convert, label_map='camvid'),
test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_camvid_segm_unpainted(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='a', image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3),
Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4),
]),
])
class DstExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='a', image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3),
Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CamvidConverter.convert,
label_map='camvid', apply_colormap=False),
test_dir, target_dataset=DstExtractor())
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_no_subsets(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 1]]), label=3),
]),
DatasetItem(id=2, image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 1, 0, 1, 0]]), label=1),
Mask(image=np.array([[0, 0, 1, 0, 1]]), label=2),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CamvidConverter.convert, label_map='camvid'), test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='кириллица с пробелом',
image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 1]]), label=3),
]
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CamvidConverter.convert, label_map='camvid'), test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_with_no_masks(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='a/b/1', subset='test',
image=np.ones((2, 5, 3)),
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CamvidConverter.convert, label_map='camvid'),
test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_dataset_with_source_labelmap_undefined(self):
class SrcExtractor(TestExtractorBase):
def __iter__(self):
yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 1, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 0, 1, 0, 0]]), label=1),
])
def categories(self):
label_cat = LabelCategories()
label_cat.add('Label_1')
label_cat.add('label_2')
return {
AnnotationType.label: label_cat,
}
class DstExtractor(TestExtractorBase):
def __iter__(self):
yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 1, 0, 1, 0]]),
label=self._label('Label_1')),
Mask(image=np.array([[0, 0, 1, 0, 0]]),
label=self._label('label_2')),
])
def categories(self):
label_map = OrderedDict()
label_map['background'] = None
label_map['Label_1'] = None
label_map['label_2'] = None
return Camvid.make_camvid_categories(label_map)
with TestDir() as test_dir:
self._test_save_and_load(SrcExtractor(),
partial(CamvidConverter.convert, label_map='source'),
test_dir, target_dataset=DstExtractor())
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_dataset_with_source_labelmap_defined(self):
class SrcExtractor(TestExtractorBase):
def __iter__(self):
yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 1, 0, 1, 0]]), label=1),
Mask(image=np.array([[0, 0, 1, 0, 1]]), label=2),
])
def categories(self):
label_map = OrderedDict()
label_map['background'] = (0, 0, 0)
label_map['label_1'] = (1, 2, 3)
label_map['label_2'] = (3, 2, 1)
return Camvid.make_camvid_categories(label_map)
class DstExtractor(TestExtractorBase):
def __iter__(self):
yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 1, 0, 1, 0]]),
label=self._label('label_1')),
Mask(image=np.array([[0, 0, 1, 0, 1]]),
label=self._label('label_2')),
])
def categories(self):
label_map = OrderedDict()
label_map['background'] = (0, 0, 0)
label_map['label_1'] = (1, 2, 3)
label_map['label_2'] = (3, 2, 1)
return Camvid.make_camvid_categories(label_map)
with TestDir() as test_dir:
self._test_save_and_load(SrcExtractor(),
partial(CamvidConverter.convert, label_map='source'),
test_dir, target_dataset=DstExtractor())
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
class SrcExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='q/1', image=Image(path='q/1.JPEG',
data=np.zeros((4, 3, 3)))),
DatasetItem(id='a/b/c/2', image=Image(
path='a/b/c/2.bmp', data=np.ones((1, 5, 3))
),
annotations=[
Mask(np.array([[0, 0, 0, 1, 0]]),
label=self._label('a')),
Mask(np.array([[0, 1, 1, 0, 0]]),
label=self._label('b')),
])
])
def categories(self):
label_map = OrderedDict()
label_map['a'] = None
label_map['b'] = None
return Camvid.make_camvid_categories(label_map)
class DstExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='q/1', image=Image(path='q/1.JPEG',
data=np.zeros((4, 3, 3)))),
DatasetItem(id='a/b/c/2', image=Image(
path='a/b/c/2.bmp', data=np.ones((1, 5, 3))
),
annotations=[
Mask(np.array([[1, 0, 0, 0, 1]]),
label=self._label('background')),
Mask(np.array([[0, 0, 0, 1, 0]]),
label=self._label('a')),
Mask(np.array([[0, 1, 1, 0, 0]]),
label=self._label('b')),
])
])
def categories(self):
label_map = OrderedDict()
label_map['background'] = None
label_map['a'] = None
label_map['b'] = None
return Camvid.make_camvid_categories(label_map)
with TestDir() as test_dir:
self._test_save_and_load(SrcExtractor(),
partial(CamvidConverter.convert, save_images=True),
test_dir, require_images=True,
target_dataset=DstExtractor())
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data(self):
src_mask_cat = MaskCategories.generate(3, include_background=False)
expected = Dataset.from_iterable([
DatasetItem(1, subset='a', image=np.ones((2, 1, 3)),
annotations=[
Mask(np.ones((2, 1)), label=2)
]),
DatasetItem(2, subset='a', image=
|
np.ones((3, 2, 3))
|
numpy.ones
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 27 01:06:59 2016
@author: yxl
"""
from imagepy import IPy, wx
import numpy as np
from imagepy.core.engine import Simple, Filter
from imagepy.core.manager import WindowsManager
from scipy.ndimage import label, generate_binary_structure
from skimage.measure import regionprops
class Mark:
def __init__(self, data):
self.data = data
def draw(self, dc, f, **key):
dc.SetPen(wx.Pen((255,255,0), width=1, style=wx.SOLID))
dc.SetTextForeground((255,255,0))
font = wx.Font(8, wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)
dc.SetFont(font)
data = self.data[0 if len(self.data)==1 else key['cur']]
for i in range(len(data)):
pos = f(*(data[i][0][1], data[i][0][0]))
dc.DrawCircle(pos[0], pos[1], 2)
dc.DrawText('id={}'.format(i), pos[0], pos[1])
if data[i][1]==None:continue
k1, k2, a = data[i][1]
aixs = np.array([[-np.sin(a), np.cos(a)],
[np.cos(a), np.sin(a)]])*[k1/2, k2/2]
ar = np.linspace(0, np.pi*2,25)
xy = np.vstack((np.cos(ar),
|
np.sin(ar)
|
numpy.sin
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 29 16:10:40 2021
@author: alankar
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import illustris_python as il
import h5py as hdf
from operator import itemgetter
basePath = '/virgo/simulations/IllustrisTNG/L35n2160TNG/output'
snapNum = 67
haloID = 8
fields = ['SubhaloMass','SubhaloSFRinRad', 'SubhaloVel']
snapLoc = '%s/snapdir_%03d/snap_%03d.0.hdf5'%(basePath, snapNum, snapNum)
header = hdf.File(snapLoc, 'r')
print('Data Labels: ',list(header.keys()))
header = header['Config']
print('Header: ',header.keys())
print()
gas_part = il.snapshot.loadHalo(basePath, snapNum, id=haloID, partType='gas', fields=None)
print('Gas properties: ')
print(gas_part.keys())
print()
cloud_data = '/freya/ptmp/mpa/dnelson/sims.TNG/L35n2160TNG/data.files/voronoi/segmentation_67_h8_Mg-II-numdens_gt_1e-08.hdf5'
cloud_file = hdf.File(cloud_data,'r')
print('Cloud properties: ')
print(list(cloud_file.keys()))
print(list(cloud_file['objects'].keys()))
print(list(cloud_file['props'].keys()))
print('Total number of clouds: %d\n'%len( np.array(cloud_file['props/radius'])))
redshift = 0.5
age = 8.604 #Gyr from cosmological model of Ilustris TNG ( from: http://www.astro.ucla.edu/%7Ewright/CosmoCalc.html )
#age = age*1e9*365*24*60**2 #s
a = 1/(1+redshift)
gamma = 5/3.
mp = 1.6726e-24
kB = 1.3807e-16
Msun = 2e33
yr = 365*24*60**2
kpc = 3.086e21
ckpc = kpc*a
h = 0.67
XH = 0.76
mu = 4/(1+3*XH+4*XH*np.array(gas_part['ElectronAbundance']))
density = np.array(gas_part['Density'])*((1e10*Msun/h)/(ckpc/h)**3)
Temperature = (gamma-1)*np.array(gas_part['InternalEnergy'])*(1e5**2)*(mu*mp/kB) #[1kpc/1Gyr = 1km/s] squared
nH = XH*density/mp #nH in cm^-3
pos =
|
np.array(gas_part['Coordinates'])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 18:36:22 2019
agent_helper.py: This is the agent helper function,
used to help agent act during training
@author: weijianzheng
"""
import random
import numpy as np
# the function to return the two nodes needs to be merged randomly
def find_two_nodes_random(state):
num_functions = len(state)
merge_node_index = random.sample(range(0, num_functions-1), 2)
first_node_index = merge_node_index[0]
second_node_index = merge_node_index[1]
#print("first select two nodes " + str(first_node_index) + " and " \
# + str(second_node_index))
while(state[first_node_index][num_functions] == -1 or \
state[second_node_index][num_functions] == -1):
merge_node_index = random.sample(range(0, num_functions), 2)
first_node_index = merge_node_index[0]
second_node_index = merge_node_index[1]
# print("then select two nodes " + str(first_node_index) + " and " \
# + str(second_node_index))
#print("Two selected nodes are " + str(first_node_index) + \
# " and " + str(second_node_index))
return (first_node_index, second_node_index)
# the function to return the two nodes needs to be merged
def find_two_nodes(state, A2):
num_functions = len(state)
temp_action_array = A2.copy()
temp_action_array = np.asarray(temp_action_array)
#print(temp_action_array.shape)
first_node = np.amax(temp_action_array)
first_node_index_array = np.argwhere(temp_action_array == first_node)
first_node_index = first_node_index_array[0][2]
#print(first_node_index)
while(state[first_node_index][num_functions] == -1):
if(temp_action_array.size > 1):
index_array = np.argwhere(temp_action_array == first_node)
temp_action_array = np.delete(temp_action_array, index_array)
first_node = np.amax(temp_action_array)
first_node_index_array = np.argwhere(np.asarray(A2) == first_node)
first_node_index = first_node_index_array[0][2]
#print(temp_action_array)
# find the content for the first node
first_node = np.amax(temp_action_array)
# this is used to find the index of the largest array
first_node_index_array = np.argwhere(np.asarray(A2) == first_node)
first_node_index = first_node_index_array[0][2]
#print(first_node_index)
#print(temp_action_array)
index_array = np.argwhere(temp_action_array == first_node)
index = index_array#[0][2]
temp_action_array = np.delete(temp_action_array, index)
second_node = np.amax(temp_action_array)
second_node_index_array = np.argwhere(np.asarray(A2) == second_node)
second_node_index = second_node_index_array[0][2]
#print(second_node_index)
#print(state)
while(state[second_node_index][num_functions] == -1):
# now need to remove one with -1
if(temp_action_array.size > 1):
index_array = np.argwhere(temp_action_array == second_node)
#print(temp_action_array)
temp_action_array = np.delete(temp_action_array, index_array)
#print("delete " + str(index_array))
second_node = np.amax(temp_action_array)
second_node_index_array = np.argwhere(
|
np.asarray(A2)
|
numpy.asarray
|
import ald
import numpy as np
import h5py
lH = 4
ld = 4
# Set scales to compute other parameters.
DT = 1.0
H = 1.0
ell = lH * H
delta = ell / ld
tauR = delta ** 2 / DT
U0 = ell / tauR
# specify RTP type
particle = ald.ABP(U0=U0, tauR=tauR, DT=DT)
flow = ald.ZeroVelocity()
domain = ald.Box(bottom=-H / 2, top=H / 2, left=-H / 2, right=H / 2)
ic = ald.InitialConfig(
x=ald.Uniform(domain.left, domain.right),
y=ald.Uniform(domain.bottom, domain.top),
theta=ald.Uniform(0, 2 * np.pi),
)
# determine timescales in our problem
# flow timescale
timescales = []
if U0 != 0.0:
timescales.append(H / U0)
timescales.append(tauR)
if DT != 0.0:
timescales.append(H ** 2 / DT)
tmin =
|
np.min(timescales)
|
numpy.min
|
# This file was adopted from NN seminars and changed (to fit project purposes) by <NAME>
# Neural Networks (2-AIN-132/15), FMFI UK BA
# (c) <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> 2017-2022
import matplotlib
matplotlib.use('TkAgg') # fixme if plotting doesn`t work (try 'Qt5Agg' or 'Qt4Agg')
import matplotlib.pyplot as plt
# for 3D visualization
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
from matplotlib import cm
from sklearn.metrics import confusion_matrix
import numpy as np
import atexit
import os
import time
import functools
import json
import string
## Utilities
def onehot_decode(X):
return np.argmax(X, axis=0)
def onehot_encode(L, c):
if isinstance(L, int):
L = [L]
n = len(L)
out = np.zeros((c, n))
out[L, range(n)] = 1
return np.squeeze(out)
def vector(array, row_vector=False):
'''
Construts a column vector (i.e. matrix of shape (n,1)) from given array/numpy.ndarray, or row
vector (shape (1,n)) if row_vector = True.
'''
v = np.array(array)
if np.squeeze(v).ndim > 1:
raise ValueError('Cannot construct vector from array of shape {}!'.format(v.shape))
return v.reshape((1, -1) if row_vector else (-1, 1))
def add_bias(X):
'''
Add bias term to vector, or to every (column) vector in a matrix.
'''
if X.ndim == 1:
return np.concatenate((X, [1]))
else:
pad = np.ones((1, X.shape[1]))
return np.concatenate((X, pad), axis=0)
def timeit(func):
'''
Profiling function to measure time it takes to finish function.
Args:
func(*function): Function to meassure
Returns:
(*function) New wrapped function with meassurment
'''
@functools.wraps(func)
def newfunc(*args, **kwargs):
start_time = time.time()
out = func(*args, **kwargs)
elapsed_time = time.time() - start_time
print('Function [{}] finished in {:.3f} s'.format(func.__name__, elapsed_time))
return out
return newfunc
## Interactive drawing
def clear():
plt.clf()
def interactive_on():
plt.ion()
plt.show(block=False)
time.sleep(0.1)
def interactive_off():
plt.ioff()
plt.close()
def redraw():
# plt.gcf().canvas.draw() # fixme: uncomment if interactive drawing does not work
plt.waitforbuttonpress(timeout=0.001)
time.sleep(0.001)
def keypress(e):
if e.key in {'q', 'escape'}:
os._exit(0) # unclean exit, but exit() or sys.exit() won't work
if e.key in {' ', 'enter'}:
plt.close() # skip blocking figures
def use_keypress(fig=None):
if fig is None:
fig = plt.gcf()
fig.canvas.mpl_connect('key_press_event', keypress)
## Non-blocking figures still block at end
def finish():
plt.show(block=True) # block until all figures are closed
atexit.register(finish)
## Plotting
palette = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628','#f781bf','#999999']
def limits(values, gap=0.05):
x0 = np.min(values)
x1 = np.max(values)
xg = (x1 - x0) * gap
return np.array((x0-xg, x1+xg))
def plot_errors(title, errors, test_error=None, block=True):
plt.figure(1)
use_keypress()
plt.clf()
plt.ylim(bottom=0)
plt.plot(errors)
if test_error:
plt.plot([test_error]*len(errors))
plt.tight_layout()
plt.gcf().canvas.set_window_title(title)
plt.show(block=block)
def plot_both_errors(trainCEs, trainREs, testCE=None, testRE=None, pad=None, figsize=(1918, 1025), block=True, filename=None):
px = 1/plt.rcParams['figure.dpi']
fig = plt.figure(2, figsize=tuple(i*px for i in figsize))
use_keypress()
plt.clf()
if pad is None:
pad = max(len(trainCEs), len(trainREs))
else:
trainCEs = np.concatentate((trainCEs, [None]*(pad-len(trainCEs))))
trainREs = np.concatentate((trainREs, [None]*(pad-len(trainREs))))
ax = plt.subplot(2,1,1)
plt.ylim(bottom=0, top=100)
plt.title('Classification error [%]')
plt.plot(100*np.array(trainCEs), label='train set')
if testCE is not None:
plt.plot([100*testCE]*pad, label='valid set')
plt.legend()
plt.subplot(2,1,2)
plt.ylim(bottom=0, top=1)
plt.title('Model loss [MSE/sample]')
plt.plot(trainREs, label='train set')
if testRE is not None:
plt.plot([testRE]*pad, label='test set')
plt.tight_layout()
plt.gcf().canvas.set_window_title('Error metrics')
plt.legend()
if filename is not None:
plt.savefig(filename, dpi=fig.dpi)
plt.show(block=block)
def plot_dots(inputs, labels=None, predicted=None, test_inputs=None, test_labels=None, test_predicted=None, s=60, i_x=0, i_y=1, title=None, figsize=(1918, 1025), block=True, filename=None):
px = 1/plt.rcParams['figure.dpi']
fig = plt.figure(title or 3, figsize=tuple(i*px for i in figsize))
use_keypress()
plt.clf()
if inputs is not None:
if labels is None:
plt.gcf().canvas.set_window_title('Data distribution')
plt.scatter(inputs[i_x,:], inputs[i_y,:], s=s, c=palette[-1], edgecolors=[0.4]*3, alpha=0.5, label='train data')
elif predicted is None:
plt.gcf().canvas.set_window_title('Class distribution')
for i, c in enumerate(set(labels)):
plt.scatter(inputs[i_x,labels==c], inputs[i_y,labels==c], s=s, c=palette[i], edgecolors=[0.4]*3, label='train cls {}'.format(c))
else:
plt.gcf().canvas.set_window_title('Predicted vs. actual')
for i, c in enumerate(set(labels)):
plt.scatter(inputs[i_x,labels==c], inputs[i_y,labels==c], s=2.0*s, c=palette[i], edgecolors=None, alpha=0.333, label='train cls {}'.format(c))
for i, c in enumerate(set(labels)):
plt.scatter(inputs[i_x,predicted==c], inputs[i_y,predicted==c], s=0.5*s, c=palette[i], edgecolors=None, label='predicted {}'.format(c))
plt.xlim(limits(inputs[i_x,:]))
plt.ylim(limits(inputs[i_y,:]))
if test_inputs is not None:
if test_labels is None:
plt.scatter(test_inputs[i_x,:], test_inputs[i_y,:], marker='s', s=s, c=palette[-1], edgecolors=[0.4]*3, alpha=0.5, label='test data')
elif test_predicted is None:
for i, c in enumerate(set(test_labels)):
plt.scatter(test_inputs[i_x,test_labels==c], test_inputs[i_y,test_labels==c], marker='s', s=s, c=palette[i], edgecolors=[0.4]*3, label='test cls {}'.format(c))
else:
for i, c in enumerate(set(test_labels)):
plt.scatter(test_inputs[i_x,test_labels==c], test_inputs[i_y,test_labels==c], marker='s', s=2.0*s, c=palette[i], edgecolors=None, alpha=0.333, label='test cls {}'.format(c))
for i, c in enumerate(set(test_labels)):
plt.scatter(test_inputs[i_x,test_predicted==c], test_inputs[i_y,test_predicted==c], marker='s', s=0.5*s, c=palette[i], edgecolors=None, label='predicted {}'.format(c))
if inputs is None:
plt.xlim(limits(test_inputs[i_x,:]))
plt.ylim(limits(test_inputs[i_y,:]))
plt.legend()
if title is not None:
plt.gcf().canvas.set_window_title(title)
plt.tight_layout()
if filename is not None:
plt.savefig(filename, dpi=fig.dpi)
plt.show(block=block)
def plot_areas(model, inputs, labels=None, w=30, h=20, i_x=0, i_y=1, block=True):
plt.figure(4)
use_keypress()
plt.clf()
plt.gcf().canvas.set_window_title('Decision areas')
dim = inputs.shape[0]
data = np.zeros((dim, w*h))
# # "proper":
# X = np.linspace(*limits(inputs[i_x,:]), w)
# Y = np.linspace(*limits(inputs[i_y,:]), h)
# YY, XX = np.meshgrid(Y, X)
#
# for i in range(dim):
# data[i,:] = np.mean(inputs[i,:])
# data[i_x,:] = XX.flat
# data[i_y,:] = YY.flat
X1 = np.linspace(*limits(inputs[0,:]), w)
Y1 = np.linspace(*limits(inputs[1,:]), h)
X2 = np.linspace(*limits(inputs[2,:]), w)
Y2 = np.linspace(*limits(inputs[3,:]), h)
YY1, XX1 = np.meshgrid(Y1, X1)
YY2, XX2 = np.meshgrid(Y2, X2)
data[0,:] = XX1.flat
data[1,:] = YY1.flat
data[2,:] = XX2.flat
data[3,:] = YY2.flat
outputs, *_ = model.predict(data)
assert outputs.shape[0] == model.dim_out,\
f'Outputs do not have correct shape, expected ({model.dim_out}, ?), got {outputs.shape}'
outputs = outputs.reshape((-1,w,h))
outputs -= np.min(outputs, axis=0, keepdims=True)
outputs = np.exp(1*outputs)
outputs /= np.sum(outputs, axis=0, keepdims=True)
plt.imshow(outputs.T)
plt.tight_layout()
plt.show(block=block)
def read_data(filepath):
metadata = None
data = np.array(list(map(list, np.genfromtxt(filepath, dtype=None, skip_header=1, encoding=None))))
inputs = data[:,:-1].astype(float)
labels = data[:,-1].astype(str)
_, labels = np.unique(labels, return_inverse=True)
return inputs.T, labels
def get_hyperparameter_configurations(json_filepath):
data = None
with open(json_filepath) as f:
data = json.load(f)
return data
def plot_confusion_matrix(test_labels, test_predicted, n_classes=3, block=False):
conf_mat = confusion_matrix(test_labels, test_predicted)
alphabets = tuple(string.ascii_lowercase)[:n_classes]
fig, ax = plt.subplots()
cax = ax.matshow(conf_mat, cmap='Blues')
fig.colorbar(cax)
plt.xticks(np.arange(n_classes), alphabets)
plt.yticks(np.arange(n_classes), alphabets)
for (i, j), z in
|
np.ndenumerate(conf_mat)
|
numpy.ndenumerate
|
from Sampler import *
import numpy as np
import scipy.stats as st
import math
def mean(nlist):
return np.mean(nlist)
def var(nlist):
return np.var(nlist, ddof=1)
# Choose a sampler from Sampler.py
def genSample(k=1, n=1, RandomSeed=-1):
return Normal(x=k, n=int(n), RandomSeed=RandomSeed)
def OCBA(k, n0, N, τ):
index = [x for x in range(0,k)]
samples = []
means = np.zeros(k)
variances = np.zeros(k)
dik = np.zeros(k) # the difference of means between system i and maxi
for i in range(0,k):
sample_i=genSample(i, n0)
samples.append(sample_i)
t = 0
nt = np.array( [n0 for i in range(0,k)] )
nt1 = np.zeros(k)
bt = sum( nt )
while bt<N:
for i in range(0,k):
means[i] = mean(samples[i])
variances[i] = var(samples[i])
maxi = np.argmax(means)
for i in range(0,k):
dik[i] = means[maxi] - means[i]
bt = bt + τ
# calculate nt1
term1 = sum( variances[i] / pow(dik[i], 2) for i in range(0,k) if i!=maxi ) # note: dik[maxi] = 0
term2 = [variances[i]/pow(dik[i],4) for i in range(0,k) if i!=maxi]
term3 = np.sqrt( variances[maxi] * (sum(term2)) )
r = bt / (term1 + term3)
for i in range(0,k):
if i==maxi:
nt1[maxi] = term3 * r
else:
nt1[i] = variances[i] * r / pow(dik[i], 2)
new_n = np.zeros(k)
for i in range(0,k):
new_n[i] = max(0, math.ceil(nt1[i] - nt[i]))
new_sample = genSample(i, int(new_n[i]))
samples[i] = samples[i] + new_sample
t = t+1
nt = nt + new_n # update the allocation
return [maxi, nt, means] # {'index': maxi,'N': nt, 'means': means}
def get_nt1(k, maxi, bt, variances, lambdaik, dik, nt, survive):
nt1 = [nt[i] for i in range(0,k)]
if sum(survive) == 1: # only one system survived
remain = survive.index(True)
nt1[remain] = bt - sum(nt) + nt[remain]
return nt1
eta =
|
np.zeros(k)
|
numpy.zeros
|
import sys, os
import numpy as np
from scipy.linalg import null_space
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from grasp_functions import block_diag, check_equal_matrices, get_rank
from data_types import Contact, Joint, Finger
from class_grasp import Grasp
from class_jacobian import Jacobian
from quality_metrics import force_closure
zv = np.array([0, 0, 1]).reshape(3, 1)
p = np.array([2, 10, 0])
h = np.array(["H", "H"])
c1 = np.array([6, 10, 0])
c2 = np.array([-3, 10, 0])
c3 = np.array([-7, 5, 0])
R1 = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
R2 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
R3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
q1c = np.array([9, 0, 0])
q2c = np.array([8, 6, 0])
q3c = np.array([-8, 0, 0])
q4c = np.array([-8, 3, 0])
q5c = np.array([-6, 7, 0])
def S(r):
rx = r[0]
ry = r[1]
rz = r[2]
return np.array([[0, -rz, ry], [rz, 0, -rx], [-ry, rx, 0]])
def fHi(h):
if h == "P":
return np.array([[1, 0, 0, 0, 0, 0]])
elif h == "S":
return np.concatenate((np.identity(4), np.zeros((4, 2))), axis=1)
else:
return np.concatenate((np.identity(3), np.zeros((3, 3))), axis=1)
def fH(h):
H = fHi(h[0])
for hi in h[1:]:
Hi = fHi(hi)
Hi = np.concatenate((np.zeros((Hi.shape[0], H.shape[1])), Hi), axis=1)
H = np.concatenate((H, np.zeros((H.shape[0], 6))), axis=1)
H = np.concatenate((H, Hi), axis=0)
return H
P1 = np.block([[np.identity(3), np.zeros((3, 3))], [S(c1 - p), np.identity(3)]])
P2 = np.block([[np.identity(3), np.zeros((3, 3))], [S(c2 - p), np.identity(3)]])
P3 = np.block([[np.identity(3), np.zeros((3, 3))], [S(c3 - p), np.identity(3)]])
pG1t = np.dot(block_diag([R1, R1]).transpose(), P1.transpose())
pG2t = np.dot(block_diag([R2, R2]).transpose(), P2.transpose())
pG3t = np.dot(block_diag([R3, R3]).transpose(), P3.transpose())
pGt = np.concatenate((pG1t, pG2t), axis=0)
H = fH(h)
# print(H.shape)
Gt = np.dot(H, pGt)
# print(Gt)
contact1 = Contact(c1, R1)
contact2 = Contact(c2, R2)
C =
|
np.array([contact1, contact2])
|
numpy.array
|
# -*- coding:utf-8 -*-
#######################################################################
# Copyright (C) 2016 <NAME> (<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import scipy.stats as sts
from typing import Tuple
class MonteCarloOptionPricing:
def __init__(self, r, S0: float, K: float, T: float, mue: float, sigma: float, div_yield: float = 0.0,
simulation_rounds: int = 10000, no_of_slices: int = 4, fix_random_seed: bool or int = False):
"""
An important reminder, here the models rely on the assumption of constant interest rate and volatility.
:param S0: current price of the underlying asset (e.g. stock)
:param K: exercise price
:param T: time to maturity, in years, can be float
:param r: interest rate, by default we assume constant interest rate model
:param sigma: volatility (in standard deviation) of the asset annual returns
:param div_yield: annual dividend yield
:param simulation_rounds: in general, monte carlo option pricing requires many simulations
:param no_of_slices: between time 0 and time T, the number of slices PER YEAR, e.g. 252 if trading days are required
:param fix_random_seed: boolean or integer
"""
assert sigma >= 0, 'volatility cannot be less than zero'
assert S0 >= 0, 'initial stock price cannot be less than zero'
assert T >= 0, 'time to maturity cannot be less than zero'
assert div_yield >= 0, 'dividend yield cannot be less than zero'
assert no_of_slices >= 0, 'no of slices per year cannot be less than zero'
assert simulation_rounds >= 0, 'simulation rounds cannot be less than zero'
self.S0 = float(S0)
self.K = float(K)
self.T = float(T)
self.mue = float(mue)
self.div_yield = float(div_yield)
self.no_of_slices = int(no_of_slices)
self.simulation_rounds = int(simulation_rounds)
self.h = self.T / self.no_of_slices
self.r = np.full((self.simulation_rounds, self.no_of_slices), r * self.h)
self.discount_table = np.exp(np.cumsum(-self.r, axis=1))
self.sigma = np.full((self.simulation_rounds, self.no_of_slices), sigma)
self.terminal_prices = []
if type(fix_random_seed) is bool:
if fix_random_seed:
np.random.seed(15000)
elif type(fix_random_seed) is int:
np.random.seed(fix_random_seed)
def vasicek_model(self, r0: float, alpha: float, b: float, interest_vol: float) -> np.ndarray:
"""
vasicek model for interest rate simulation
this is the continuous-time analog of the AR(1) process.
Interest rate in the vesicke model can be negative.
:param r0: current interest rate
:param alpha: speed of mean-reversion
:param b: risk-free rate is mean-reverting to b
:param interest_vol: interest rate volatility (standard deviation)
:return:
"""
self.interest_z_t = np.random.standard_normal((self.simulation_rounds, self.no_of_slices))
self.interest_array = np.full((self.simulation_rounds, self.no_of_slices), r0 * self.h)
for i in range(1, self.no_of_slices):
self.interest_array[:, i] = b + np.exp(-alpha / self.no_of_slices) * (
self.interest_array[:, i - 1] - b) + np.sqrt(
interest_vol ** 2 / (2 * alpha) * (1 - np.exp(-2 * alpha / self.no_of_slices))
) * self.interest_z_t[:, i]
# re-define the interest rate array
self.r = self.interest_array
return self.interest_array
def cox_ingersoll_ross_model(self, r0: float, alpha: float, b: float, interest_vol: float) -> np.ndarray:
"""
if asset volatility is stochastic
incorporate term structure to model risk free rate (r)
non central chi-square distribution. \n
Interest rate in CIR model cannot be negative
"""
self.interest_z_t = np.random.standard_normal((self.simulation_rounds, self.no_of_slices))
self.interest_array = np.full((self.simulation_rounds, self.no_of_slices), r0 * self.h)
# CIR noncentral chi-square distribution degree of freedom
self.degree_freedom = 4 * b * alpha / interest_vol ** 2
for i in range(1, self.no_of_slices):
self.Lambda = (4 * alpha * np.exp(-alpha / self.no_of_slices) * self.interest_array[:, i - 1] / (
interest_vol ** 2 * (1 - np.exp(-alpha / self.no_of_slices))))
self.chi_square_factor = np.random.noncentral_chisquare(df=self.degree_freedom,
nonc=self.Lambda,
size=self.simulation_rounds)
self.interest_array[:, i] = interest_vol ** 2 * (1 - np.exp(-alpha / self.no_of_slices)) / (
4 * alpha) * self.chi_square_factor
# re-define the interest rate array
self.r = self.interest_array
return self.interest_array
def CIR_heston(self, r0: float, alpha_r: float, b_r: float, interest_vol: float, v0: float, alpha_v: float,
b_v: float, asset_vol: float) -> Tuple[np.ndarray, np.ndarray]:
self.df_r = 4 * b_r * alpha_r / interest_vol ** 2 # CIR noncentral chi-square distribution degree of freedom
self.df_v = 4 * b_v * alpha_v / asset_vol ** 2 # CIR noncentral chi-square distribution degree of freedom
self.interest_z_t = np.random.standard_normal((self.simulation_rounds, self.no_of_slices))
self.interest_array = np.full((self.simulation_rounds, self.no_of_slices), r0 * self.h)
self.vol_z_t = np.random.standard_normal((self.simulation_rounds, self.no_of_slices))
self.vol_array = np.full((self.simulation_rounds, self.no_of_slices), v0 / (self.T * self.no_of_slices))
for i in range(1, self.no_of_slices):
# for interest rate simulation
self.Lambda = (4 * alpha_r * np.exp(-alpha_r / self.no_of_slices) * self.interest_array[:, i - 1] / (
interest_vol ** 2 * (1 - np.exp(-alpha_r / self.no_of_slices))))
self.chi_square_factor = np.random.noncentral_chisquare(df=self.df_r,
nonc=self.Lambda,
size=self.simulation_rounds)
self.interest_array[:, i] = interest_vol ** 2 * (1 - np.exp(-alpha_r / self.no_of_slices)) / (
4 * alpha_r) * self.chi_square_factor
# for diffusion/volatility simulation
self.Lambda = (4 * alpha_v * np.exp(-alpha_v / self.no_of_slices) * self.vol_array[:, i - 1] / (
asset_vol ** 2 * (1 - np.exp(-alpha_v / self.no_of_slices))))
self.chi_square_factor = np.random.noncentral_chisquare(df=self.df_v,
nonc=self.Lambda,
size=self.simulation_rounds)
self.vol_array[:, i] = asset_vol ** 2 * (1 - np.exp(-alpha_v / self.no_of_slices)) / (
4 * alpha_v) * self.chi_square_factor
# re-define the interest rate and volatility path
self.r = self.interest_array
self.sigma = self.vol_array
return self.interest_z_t, self.vol_array
def stock_price_simulation(self) -> Tuple[np.ndarray, float]:
self.exp_mean = (self.mue - self.div_yield - (self.sigma ** 2.0) * 0.5) * self.h
self.exp_diffusion = self.sigma * np.sqrt(self.h)
self.z_t = np.random.standard_normal((self.simulation_rounds, self.no_of_slices))
self.price_array = np.zeros((self.simulation_rounds, self.no_of_slices))
self.price_array[:, 0] = self.S0
for i in range(1, self.no_of_slices):
self.price_array[:, i] = self.price_array[:, i - 1] * np.exp(
self.exp_mean[:, i] + self.exp_diffusion[:, i] * self.z_t[:, i]
)
self.terminal_prices = self.price_array[:, -1]
self.stock_price_expectation = np.mean(self.terminal_prices)
self.stock_price_standard_error = np.std(self.terminal_prices) / np.sqrt(len(self.terminal_prices))
print('-' * 64)
print(
" Number of simulations %4.1i \n S0 %4.1f \n K %2.1f \n Maximum Stock price %4.2f \n"
" Minimum Stock price %4.2f \n Average stock price %4.3f \n Standard Error %4.5f " % (
self.simulation_rounds, self.S0, self.K, np.max(self.terminal_prices),
np.min(self.terminal_prices), self.stock_price_expectation, self.stock_price_standard_error
)
)
print('-' * 64)
return self.stock_price_expectation, self.stock_price_standard_error
def stock_price_simulation_with_poisson_jump(self, jump_alpha: float, jump_std: float, poisson_lambda: float) -> \
Tuple[np.ndarray, float]:
self.z_t_stock = np.random.standard_normal((self.simulation_rounds, self.no_of_slices))
self.price_array = np.zeros((self.simulation_rounds, self.no_of_slices))
self.price_array[:, 0] = self.S0
self.k = np.exp(jump_alpha) - 1
self.exp_mean = (self.mue - self.div_yield - poisson_lambda * self.k - (self.sigma ** 2.0) * 0.5) * self.h
self.exp_diffusion = self.sigma * np.sqrt(self.h)
for i in range(1, self.no_of_slices):
# poisson jump
self.sum_w = []
self.m = np.random.poisson(lam=poisson_lambda, size=self.simulation_rounds)
for j in self.m:
self.sum_w.append(np.sum(np.random.standard_normal(j)))
self.poisson_jump_factor = np.exp(
self.m * (jump_alpha - 0.5 * jump_std ** 2) + jump_std * np.array(self.sum_w)
)
self.price_array[:, i] = self.price_array[:, i - 1] * np.exp(
self.exp_mean[:, i] + self.exp_diffusion[:, i]
* self.z_t_stock[:, i]
) * self.poisson_jump_factor
self.terminal_prices = self.price_array[:, -1]
self.stock_price_expectation = np.mean(self.terminal_prices)
self.stock_price_standard_error = np.std(self.terminal_prices) / np.sqrt(len(self.terminal_prices))
print('-' * 64)
print(
" Number of simulations %4.1i \n S0 %4.1f \n K %2.1f \n Maximum Stock price %4.2f \n"
" Minimum Stock price %4.2f \n Average stock price %4.3f \n Standard Error %4.5f " % (
self.simulation_rounds, self.S0, self.K, np.max(self.terminal_prices),
np.min(self.terminal_prices), self.stock_price_expectation, self.stock_price_standard_error
)
)
print('-' * 64)
return self.stock_price_expectation, self.stock_price_standard_error
def european_call(self) -> Tuple[float, float]:
assert len(self.terminal_prices) != 0, 'Please simulate the stock price first'
self.terminal_profit = np.maximum((self.terminal_prices - self.K), 0.0)
self.expectation = np.mean(self.terminal_profit * np.exp(-np.sum(self.r, axis=1)))
self.standard_error = np.std(self.terminal_profit) / np.sqrt(len(self.terminal_profit))
print('-' * 64)
print(
" European call monte carlo \n S0 %4.1f \n K %2.1f \n"
" Call Option Value %4.3f \n Standard Error %4.5f " % (
self.S0, self.K, self.expectation, self.standard_error
)
)
print('-' * 64)
return self.expectation, self.standard_error
def european_put(self, empirical_call: float or None = None) -> float:
"""
Use put call parity (incl. continuous dividend) to calculate the put option value
:param empirical_call: can be calculated or observed call option value
:return: put option value
"""
if empirical_call is not None:
self.european_call_value = self.european_call()
else:
self.european_call_value = empirical_call
self.put_value = self.european_call_value + np.exp(-np.sum(self.r, axis=1)) * self.K - np.exp(
-self.div_yield * self.T) * self.S0
return self.put_value
def asian_avg_price_option(self, avg_method: str = 'arithmetic', option_type: str = 'call') -> Tuple[float, float]:
assert option_type == 'call' or option_type == 'put', 'option_type must be either call or put'
assert len(self.terminal_prices) != 0, 'Please simulate the stock price first'
assert avg_method == 'arithmetic' or avg_method == 'geometric', 'arithmetic or geometric average?'
average_prices = np.average(self.price_array, axis=1)
if option_type == 'call':
self.terminal_profit = np.maximum((average_prices - self.K), 0.0)
elif option_type == 'put':
self.terminal_profit = np.maximum((self.K - average_prices), 0.0)
if avg_method == 'arithmetic':
self.expectation = np.mean(self.terminal_profit * np.exp(-np.sum(self.r, axis=1)))
elif avg_method == 'geometric':
self.expectation = sts.gmean(self.terminal_profit * np.exp(-np.sum(self.r, axis=1)))
self.standard_error = np.std(self.terminal_profit) / np.sqrt(len(self.terminal_profit))
print('-' * 64)
print(
" Asian %s monte carlo arithmetic average \n S0 %4.1f \n K %2.1f \n"
" Option Value %4.3f \n Standard Error %4.5f " % (
option_type, self.S0, self.K, self.expectation, self.standard_error
)
)
print('-' * 64)
return self.expectation, self.standard_error
def american_option_longstaff_schwartz(self, poly_degree: int = 2, option_type: str = 'call') -> \
Tuple[float, float]:
"""
American option, Longstaff and Schwartz method
:param poly_degree: x^n, default = 2
:param option_type: call or put
"""
assert option_type == 'call' or option_type == 'put', 'option_type must be either call or put'
assert len(self.terminal_prices) != 0, 'Please simulate the stock price first'
if option_type == 'call':
self.intrinsic_val = np.maximum((self.price_array - self.K), 0.0)
elif option_type == 'put':
self.intrinsic_val = np.maximum((self.K - self.price_array), 0.0)
# last day cashflow == last day intrinsic value
cf = self.intrinsic_val[:, -1]
stopping_rule = np.zeros_like(self.price_array)
stopping_rule[:, -1] = np.where(self.intrinsic_val[:, -1] > 0, 1, 0)
# Longstaff and Schwartz iteration
for t in range(self.no_of_slices - 2, 0, -1): # fill out the value table from backwards
# find out in-the-money path to better estimate the conditional expectation function
# where exercise is relevant and significantly improves the efficiency of the algorithm
itm_path = np.where(self.intrinsic_val[:, t] > 0) # <==> self.price_array[:, t] vs. self.K
cf = cf * np.exp(-self.r[:, t + 1])
Y = cf[itm_path]
X = self.price_array[itm_path, t]
# initialize continuation value
hold_val = np.zeros(shape=self.simulation_rounds)
# if there is only 5 in-the-money paths (most likely appear in out-of-the-money options
# then simply assume that value of holding = 0.
# otherwise, run regression and compute conditional expectation E[Y|X].
if len(itm_path) > 5:
rg = np.polyfit(x=X[0], y=Y, deg=poly_degree) # regression fitting
hold_val[itm_path] = np.polyval(p=rg, x=X[0]) # conditional expectation E[Y|X]
# 1 <==> exercise, 0 <==> hold
stopping_rule[:, t] = np.where(self.intrinsic_val[:, t] > hold_val, 1, 0)
# if exercise @ t, all future stopping rules = 0 as the option contract is exercised.
stopping_rule[np.where(self.intrinsic_val[:, t] > hold_val), (t + 1):] = 0
# cashflow @ t, if hold, cf = 0, if exercise, cf = intrinsic value @ t.
cf = np.where(self.intrinsic_val[:, t] > 0, self.intrinsic_val[:, t], 0)
simulation_vals = (self.intrinsic_val * stopping_rule * self.discount_table).sum(axis=1)
self.expectation = np.average(simulation_vals)
self.standard_error = np.std(simulation_vals) / np.sqrt(self.simulation_rounds)
print('-' * 64)
print(
" American %s Longstaff-Schwartz method (assume polynomial fit)"
" \n polynomial degree = %i \n S0 %4.1f \n K %2.1f \n"
" Option Value %4.3f \n Standard Error %4.5f " % (
option_type, poly_degree, self.S0, self.K, self.expectation, self.standard_error
)
)
print('-' * 64)
return self.expectation, self.standard_error
def barrier_option(self, option_type: str, barrier_price: float, barrier_type: str, barrier_direction: str,
parisian_barrier_days: int or None = None) -> Tuple[float, float]:
assert option_type == 'call' or option_type == 'put', 'option type must be either call or put'
assert barrier_type == "knock-in" or barrier_type == "knock-out", 'type must be either knock-in or knock-out'
assert barrier_direction == "up" or barrier_direction == "down", 'direction must be either up or down'
if barrier_direction == "up":
barrier_check = np.where(self.price_array >= barrier_price, 1, 0)
if parisian_barrier_days is not None:
days_to_slices = int(parisian_barrier_days * self.no_of_slices / (self.T * 252))
parisian_barrier_check = np.zeros((self.simulation_rounds, self.no_of_slices - days_to_slices))
for i in range(0, self.no_of_slices - days_to_slices):
parisian_barrier_check[:, i] = np.where(
np.sum(barrier_check[:, i:i + days_to_slices], axis=1) >= parisian_barrier_days, 1, 0
)
barrier_check = parisian_barrier_check
elif barrier_direction == "down":
barrier_check = np.where(self.price_array <= barrier_price, 1, 0)
if parisian_barrier_days is not None:
days_to_slices = int(parisian_barrier_days * self.no_of_slices / (self.T * 252))
parisian_barrier_check = np.zeros((self.simulation_rounds, self.no_of_slices - days_to_slices))
for i in range(0, self.no_of_slices - days_to_slices):
parisian_barrier_check[:, i] = np.where(
np.sum(barrier_check[:, i:i + days_to_slices], axis=1) >= parisian_barrier_days, 1, 0
)
barrier_check = parisian_barrier_check
if option_type == 'call':
self.intrinsic_val = np.maximum((self.price_array - self.K), 0.0)
elif option_type == 'put':
self.intrinsic_val = np.maximum((self.K - self.price_array), 0.0)
if barrier_type == "knock-in":
self.terminal_profit = np.where(
|
np.sum(barrier_check, axis=1)
|
numpy.sum
|
import os, glob
import numpy as np
import pandas as pd
import pickle
import torch
from torch.utils.data import TensorDataset, DataLoader
from numpy.random import rand, randn
from scipy.spatial.distance import pdist, squareform
from matplotlib import pyplot as plt
from collections import defaultdict
from shapely import geometry as geo
from shapely.ops import nearest_points
from tqdm import tqdm
from abc import ABCMeta, abstractmethod
# np.seterr(divide='ignore', invalid='ignore')
class FourierCurveModel(metaclass=ABCMeta):
n_parameters = 4 # must be uneven number times four
n_observations = 1
name = ''
@abstractmethod
def __init__(self):
pass
def flatten_coeffs(self, coeffs):
batch_size = coeffs.shape[0]
coeffs = coeffs.reshape(batch_size, -1)
return np.concatenate([coeffs.real, coeffs.imag], axis=1)
def unflatten_coeffs(self, coeffs):
batch_size = coeffs.shape[0]
real, imag = np.split(coeffs, 2, axis=1)
coeffs = real.astype(np.complex64)
coeffs.imag = imag
return coeffs.reshape(batch_size, 2, -1)
def fourier_coeffs(self, points, n_coeffs):
N = len(points) # Number of points
M = n_coeffs//2
M = min(N//2, M) # Number of positive/negative Fourier coefficients
# Vectorized equation to compute Fourier coefficients
ms = np.arange(-M, M+1)
a = np.sum(points[:,:,None] * np.exp(-2*np.pi*1j*ms[None,None,:]*np.arange(N)[:,None,None]/N), axis=0) / N
return a
def trace_fourier_curves(self, coeffs, n_points=100):
# Vectorized equation to compute points along the Fourier curve
t = np.linspace(0, 1, n_points)
ms = np.arange(-(coeffs.shape[-1]//2), coeffs.shape[-1]//2 + 1)
tm = t[:,None] * ms[None,:]
points = np.sum(coeffs[:,None,:,:] * np.exp(2*np.pi*1j*tm)[None,:,None,:], axis=-1).real
return points
@abstractmethod
def sample_prior(self, n_samples, flat=True):
pass
@abstractmethod
def sample_joint(self, n_samples, flat=True):
pass
def init_plot(self, y_target=None):
return plt.figure(figsize=(7,7))
@abstractmethod
def update_plot(self, x, y_target=None, n_bold=3, show_forward=True):
pass
class LensShapeModel(FourierCurveModel):
n_parameters = 4*5 # 5 complex 2d Fourier coefficients
n_observations = 2
name = 'lens-shape'
def __init__(self):
self.name = 'lens-shape1'
def generate_lens_shape(self):
# First circle
x0, y0, r0 = 0, 0, 1 + rand()
p0 = geo.Point(x0, y0).buffer(r0)
# Second circle
r1 = 2*r0
theta = 2*np.pi * rand() # Random angle
d = 0.8 * (r0 + r1) # Distance of centers
x1, y1 = x0 + d * np.sin(theta), y0 + d * np.cos(theta)
p1 = geo.Point(x1, y1).buffer(r1)
# Intersect
shape = p0.intersection(p1)
# Center with a little noise
coords = np.array(shape.exterior.coords)
coords -= coords.mean(axis=0) + 0.5 * randn(1,2)
return coords
def sample_prior(self, n_samples, flat=True):
samples = []
for i in range(n_samples):
coords = self.generate_lens_shape()
sample = self.fourier_coeffs(coords, n_coeffs=LensShapeModel.n_parameters//4)
samples.append(sample)
samples = np.stack(samples)
if flat:
samples = self.flatten_coeffs(samples)
return samples
def sample_joint(self, n_samples, flat=True):
samples = []
labels = []
for i in tqdm(range(n_samples)):
coords = self.generate_lens_shape()
sample = self.fourier_coeffs(coords, n_coeffs=LensShapeModel.n_parameters//4)
samples.append(sample[None,...])
labels.append(self.forward_process(self.flatten_coeffs(samples[-1])))
samples = np.concatenate(samples)
labels = np.concatenate(labels)
if flat:
samples = self.flatten_coeffs(samples)
return samples, labels
def forward_process(self, x, noise=0.05):
x = self.unflatten_coeffs(x)
points = self.trace_fourier_curves(x)
features = []
for i in range(len(x)):
# Find dominant angle and largest diameter of the shape
d = squareform(pdist(points[i]))
max_idx = np.unravel_index(d.argmax(), d.shape)
p0, p1 = points[i,max_idx[0]], points[i,max_idx[1]]
# features.append((angle, max_diameter))
features.append(((p1-p0)[1], (p1-p0)[0]))
features = np.array(features)
return features + noise * randn(*features.shape)
def update_plot(self, x, y_target=None, n_bold=3, show_forward=True):
plt.gcf().clear()
x = self.unflatten_coeffs(np.array(x))
points = self.trace_fourier_curves(x)
for i in range(len(points)):
plt.plot(points[i,:,0], points[i,:,1], c=(0,0,0,min(1,10/len(points))))
if i >= len(points) - n_bold:
plt.plot(points[i,:,0], points[i,:,1], c=(0,0,0))
if show_forward:
if y_target is not None:
diff_1, diff_0 = y_target
# Visualize angle and scale
# TODO
# Plot dominant angle and largest diameter of the shape
d = squareform(pdist(points[i]))
max_idx = np.unravel_index(d.argmax(), d.shape)
d0, d1 = points[i,max_idx[0]], points[i,max_idx[1]]
plt.plot([d0[0], d1[0]], [d0[1], d1[1]], c=(0,1,0), ls='-', lw=1)
plt.scatter([d0[0], d1[0]], [d0[1], d1[1]], c=[(0,1,0)], s=3, zorder=10)
plt.axis('equal')
plt.axis([min(-5, points[:,:,0].min() - 1), max(5, points[:,:,0].max() + 1),
min(-5, points[:,:,1].min() - 1), max(5, points[:,:,1].max() + 1)])
class PlusShapeModel(FourierCurveModel):
n_parameters = 4*25 # 25 complex 2d Fourier coefficients
n_observations = 4
name = 'plus-shape'
def __init__(self):
self.name = 'plus-shape'
def densify_polyline(self, coords, max_dist=0.2):
# Add extra points between consecutive coordinates if they're too far apart
all = []
for i in range(len(coords)):
start = coords[(i+1)%len(coords),:]
end = coords[i,:]
dense = np.array([t * start + (1-t) * end
for t in np.linspace(0, 1, max(1, int(round(
np.max(np.abs(end-start))/max_dist))))])
all.append(dense)
return np.concatenate(all)
def generate_plus_shape(self, forward=False, target=None):
# Properties of x and y bar
xlength = 3 + 2 * rand()
ylength = 3 + 2 * rand()
if target is None:
xwidth = .5 + 1.5 * rand()
ywidth = .5 + 1.5 * rand()
else:
if target[3] >= 1:
xwidth = target[3]*.5 + (2 - target[3]*0.5) * rand()
else:
xwidth = 0.5 + (2*target[3] - 0.5) * rand()
ywidth = xwidth/target[3]
xshift = -1.5 + 3 * rand()
yshift = -1.5 + 3 * rand()
center = np.array([0.0, 0.0])
# Create bars and compute union
xbar = geo.box(xshift - xlength/2, -xwidth/2, xshift + xlength/2, xwidth/2)
ybar = geo.box(-ywidth/2, yshift - ylength/2, ywidth/2, yshift + ylength/2)
both = xbar.union(ybar)
coords = np.array(both.exterior.coords[:-1])
# Add points inbetween, center, rotate and shift randomly
coords = self.densify_polyline(coords)
center -= coords.mean(axis=0)
coords -= coords.mean(axis=0)
if target is None:
angle = 0.5*np.pi * rand()
else:
angle = target[2]
rotation = np.array([[np.cos(angle),
|
np.sin(angle)
|
numpy.sin
|
import visualization.panda.world as wd
import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85
import robot_sim.robots.ur3_dual.ur3_dual as ur3ds
import basis.robot_math as rm
import numpy as np
import modeling.collision_model as cm
import modeling.geometric_model as gm
import motion.probabilistic.rrt_connect as rrtc
import copy
import open3d as o3d
import random
from skimage.measure import LineModelND, ransac
import research_posemake_many as pose
import math
import socket
import robot_con.ur.program_builder as pb
import pickle
import time
import sympy as sp
from scipy.optimize import basinhopping
import motion.optimization_based.incremental_nik as inik
rotatedegree = 5
endthreshold = 3
objpointrange = [300, 900, -400, 800, 1051, 1500]
objpos_finalmax_lft = np.array([250, 250, 1600])
objpos_finalmax_rgt = np.array([250, -250, 1600])
## ToDo : Change the param according to the object
## param(acrylic board) ----------------------------------
objpath = "./research_flippingboard2_mm.stl"
l = 300
w = 300
h = 40
M = 4.0
g = 9.8
myu0 = 0.5
myu1 = 0.4
vmax = 30
anglemax = 20
timestep = 1.0
thetathreshold = 50
# limitdegree = 70
limitdegree = 90 + math.degrees(math.atan(h / l)) + 10
print(limitdegree)
objpos_start = np.array([.381, .250, 1.1], dtype=float)
pushpose_pre = np.array([15.46510215, -124.31216495, -22.21501633, -68.25934326, 108.02513127, 39.89826658])
pushrot = np.array([[0.02974146, -0.74159545, 0.67018776],
[0.06115005, -0.66787857, -0.74175392],
[0.99768538, 0.06304286, 0.02548492]])
## ---------------------------------------------------------
## param(stainless box) ----------------------------------------
# objpath = "./objects/TCbox.stl"
# l = 300
# w = 400
# h = 150
# M = 6.0
# g = 9.8
# myu0 = 0.4
# myu1 = 0.1
# vmax = 30
# anglemax = 20
# timestep = 1.0
# thetathreshold = 49
# # limitdegree = 125
# limitdegree = 90 + math.degrees(math.atan(h / l)) + 10
# print(limitdegree)
#
# objpos_start = np.array([381, 250, 1035], dtype=float)
# pushpose_pre = np.array([15.46510215, -124.31216495, -22.21501633, -68.25934326, 108.02513127, 39.89826658])
# pushrot = np.array([[ 0.02974146, -0.74159545, 0.67018776],
# [ 0.06115005, -0.66787857, -0.74175392],
# [ 0.99768538, 0.06304286, 0.02548492]])
## -------------------------------------------------------------
## param(plywood board) ----------------------------------------
# objpath = "./objects/400×500×44.stl"
# l = 500
# w = 400
# h = 44
# M = 6.4
# g = 9.8
# myu0 = 0.6
# myu1 = 0.3
# vmax = 45
# anglemax = 20
# timestep = 1.0
# thetathreshold = 57
# # limitdegree = 100
# limitdegree = 90 + math.degrees(math.atan(h / l)) + 10
# print(limitdegree)
#
# objpos_start = np.array([240, 140-30, 1035], dtype=float)
# pushpose_pre = np.array([12.840271549966547, -92.64224433679576, -39.088370300126584, 112.36556622471164, -92.64626048802772, 35.67784488430386])
# pushrot = np.array([[ 0.02437668, 0.74389354, 0.66785341],
# [-0.16925718, 0.66147852, -0.73061493],
# [-0.98527041, -0.09522902, 0.14203398]])
## --------------------------------------------------------------
Mg = [0, -M * g]
pulleypos = np.array([580, 370, 2500])
ropetoppos = np.array([.25, 0, 2.5])
rotate_axis = np.array([1, 0, 0])
## calibration_matrix 2020-0818
calibration_matrix = np.array([[3.95473025e-02, -8.94575014e-01, -4.45164638e-01, 7.62553715e+02],
[-9.98624616e-01, -2.00371608e-02, -4.84498644e-02, 6.67240739e+01],
[3.44222026e-02, 4.46468426e-01, -8.94137045e-01, 2.12149540e+03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
def gethangedpos(objpos, objrot):
## ベニヤ板の場合
hangedpos = copy.copy(objpos) + (w / 2) * objrot[:, 0] + l * objrot[:, 1] + h * objrot[:, 2]
return hangedpos
def getobjcenter(objpos, objrot):
## ベニヤ板の場合
objcenter = copy.copy(objpos) + (w / 2) * objrot[:, 0] + (l / 2) * objrot[:, 1] + (h / 2) * objrot[:, 2]
return objcenter
def getrotatecenter(objpos, objrot):
## ベニヤ板の場合
rotatecenter = copy.copy(objpos) + (w / 2) * objrot[:, 0]
return rotatecenter
def getrotatecenter_after(objpos, objrot):
## ベニヤ板の場合
rotatecenter = copy.copy(objpos) + (w / 2) * objrot[:, 0] + h * objrot[:, 2]
return rotatecenter
def getrefpoint(objpos, objrot):
## ベニヤ板の場合
refpoint = copy.copy(objpos) + h * objrot[:, 2]
return refpoint
def getpointcloudkinect(pointrange=[]):
pcd = client.getpcd()
pcd2 = np.ones((len(pcd), 4))
pcd2[:, :3] = pcd
newpcd = np.dot(calibration_matrix, pcd2.T).T[:, :3]
if len(pointrange) > 0:
x0, x1, y0, y1, z0, z1 = pointrange
newpcd = np.array([x for x in newpcd if (x0 < x[0] < x1) and (y0 < x[1] < y1) and (z0 < x[2] < z1)])
return newpcd
## 2020_0722作成
def getpointcloudkinectforrope_up(rbt, armname, initialpoint, pointrange):
# pcd = client.getpcd()
# pcd2 = np.ones((len(pcd), 4))
# pcd2[:, :3] = pcd
# newpcd = np.dot(calibration_matrix, pcd2.T).T[:, :3]
newpcd = getpointcloudkinect(pointrange)
finalpoint = rbt.get_gl_tcp(manipulator_name=armname)[0]
tostartvec = copy.copy(initialpoint - finalpoint)
newpcd = np.array([x for x in newpcd if x[2] < 1700])
newpcd = np.array([x for x in newpcd if rm.angle_between_vectors(tostartvec, x - finalpoint) < math.radians(30)])
return newpcd
def getpointcloudkinectforrope_down(rbt, armname, pointrange=[]):
# pcd = client.getpcd()
# pcd2 = np.ones((len(pcd), 4))
# pcd2[:, :3] = pcd
# newpcd = np.dot(calibration_matrix, pcd2.T).T[:, :3]
newpcd = getpointcloudkinect(pointrange)
initialpoint = rbt.get_gl_tcp(manipulator_name=armname)[0]
# eepos_under = copy.copy(initialpoint)
# eepos_under[2] -= 250
# refvec = copy.copy(eepos_under - initialpoint)
base.pggen.plotSphere(base.render, pos=initialpoint, radius=10, rgba=[1, 0, 0, 1])
minuszaxis = np.array([0, 0, -1])
newpcd = np.array([x for x in newpcd if 1100 < x[2] < initialpoint[2]])
newpcd = np.array([x for x in newpcd if rm.angle_between_vectors(minuszaxis, x - initialpoint) < math.radisn(40)])
return newpcd
## RANSACでロープを検出
def doRANSAC(newpcd, threshold):
model_robust, inliers = ransac(newpcd, LineModelND, min_samples=100, residual_threshold=threshold, max_trials=1000)
outliers = inliers == False
## 検出した直線の表示
ropeline = [] # ロープの点群のみ取り出す
for i, eachpoint in enumerate(newpcd):
if inliers[i] == True:
# base.pggen.plotSphere(base.render, pos=newpcd[numberofrope], radius=10, rgba=[1, 0, 0, .5])
ropeline.append(newpcd[i])
return ropeline
## リストを昇順に並べ替える(デフォルト:z座標)
def ascendingorder(array, axis=2):
array = np.asarray(array)
array_ascend = array[array[:, axis].argsort(), :]
return array_ascend
## リストをz座標で降順に並べ替える
def descendingorder(array, axis):
array_ascend = ascendingorder(array, axis)
array_descend = array_ascend[::-1]
return array_descend
def create_candidate_points(arm_name, initialhandpos, obstacles=None, limitation=None):
if arm_name == "lft_arm":
pointlistrange = np.array([.15, .3, .05, .3, 1.3, initialhandpos[2]])
elif arm_name == "rgt_arm":
pointlistrange = np.array([.15, .3, -.2, -.05, 1.3, initialhandpos[2]])
if obstacles is not None and arm_name == "lft":
for obs in obstacles:
## 3dモデルを点群化し、原点に配置
obs_points = obs.sample_surface(8000)
homomat = obs.get_homomat()
obs_points_converted = np.ones((len(obs_points), 4))
obs_points_converted[:, :3] = obs_points
obs_points_converted = np.dot(homomat, obs_points_converted.T).T[:, :3]
zmax = max(obs_points_converted[:, 2]) + .15
pointlistrange[4] = zmax
# print("pointrange", pointlistrange)
if limitation is not None:
pointlistrange[3] = limitation
points = []
number = 30
for i in range(number):
x = random.uniform(pointlistrange[0], pointlistrange[1])
y = random.uniform(pointlistrange[2], pointlistrange[3])
z = random.uniform(pointlistrange[4], pointlistrange[5])
point = [x, y, z]
# print("point", point)
points.append(point)
return points
## 始点での把持姿勢を探索
def decidestartpose(armname, ropelinesorted, predefined_grasps, fromjnt, startpointid):
IKpossiblelist_start = []
while True:
objpos_initial = ropelinesorted[startpointid]
objrot_initial = np.eye(3)
objmat4_initial = rm.homomat_from_posrot(objpos_initial, objrot_initial)
obj_initial = copy.deepcopy(ropeobj) # ->早川:変数の定義はどこですか?また、obj.copy()を使ってください.
obj_initial.set_rgba(rgba=[1, 0, 0, .5])
obj_initial.set_homomat(objmat4_initial)
for i, eachgrasp in enumerate(predefined_grasps):
prejawwidth, prehndfc, prehndpos, prehndrotmat = eachgrasp
prehndhomomat = rm.homomat_from_posrot(prehndpos, prehndrotmat)
hndmat4_initial = np.dot(objmat4_initial, prehndhomomat)
eepos_initial = rm.homomat_transform_points(objmat4_initial, prehndfc)[:3]
eerot_initial = hndmat4_initial[:3, :3]
start = robot_s.ik(component_name=armname,
tgt_pos=eepos_initial,
tgt_rotmat=eerot_initial,
seed_jnt_values=fromjnt)
if start is not None:
original_jnt_values = robot_s.get_jnt_values(component_name=armname)
robot_s.fk(component_name=armname, jnt_values=start)
objrelmat = robot_s.cvt_gl_to_loc_tcp(armname, objpos_initial, objrot_initial)
## 衝突検出
cd_result = robot_s.is_collided(obscmlist)
if not cd_result:
IKpossiblelist_start.append([start, objrelmat, i])
robot_s.fk(component_name=armname, jnt_values=original_jnt_values)
if len(IKpossiblelist_start) > 0:
return IKpossiblelist_start, objpos_initial, objrot_initial, startpointid
startpointid = startpointid + 1
if startpointid == len(ropelinesorted):
print("始点が存在しませんでした")
return [False, False, False, False]
print("startpointid = ", startpointid)
## 終点での把持姿勢を探索(終点を1つにしたとき)
def decidegoalpose_onepoint(arm_name,
IKpossiblelist_start,
hold_pos_final,
predefined_grasps,
obscmlist):
IKpossiblelist_startgoal = []
objrot_final = np.eye(3)
objmat4_final = rm.homomat_from_posrot(hold_pos_final, objrot_final)
# obj_final = copy.deepcopy(ropeobj)
# obj_final.set_rgba(rgba=[1, 0, 0, .5])
# obj_final.set_homomat(objmat4_final)
for i in IKpossiblelist_start:
prejawwidth, prehndfc, prehndpos, prehndrotmat = predefined_grasps[i[2]]
prehndhomomat = rm.homomat_from_posrot(prehndpos, prehndrotmat)
hndmat4_final = np.dot(objmat4_final, prehndhomomat)
eepos_final = rm.homomat_transform_points(objmat4_final, prehndfc)[:3]
eerot_final = hndmat4_final[:3, :3]
fromjnt = i[0]
goal = robot_s.ik(component_name=arm_name,
tgt_pos=eepos_final,
tgt_rotmat=eerot_final,
seed_jnt_values=fromjnt)
# gm.gen_frame(pos=eepos_final, rotmat=eerot_final).attach_to(base)
# robot_s.fk(arm_name, fromjnt)
# robot_s.gen_meshmodel().attach_to(base)
# base.run()
if goal is not None:
original_jnt_values = robot_s.get_jnt_values(component_name=arm_name)
robot_s.fk(component_name=arm_name, jnt_values=goal)
cd_result = robot_s.is_collided(obscmlist)
if not cd_result:
IKpossiblelist_startgoal.append([i[0], goal, i[1], [2]])
robot_s.fk(component_name=arm_name, jnt_values=original_jnt_values)
if len(IKpossiblelist_startgoal) > 0:
return IKpossiblelist_startgoal
else:
print("終点での姿勢が存在しません")
return False
## 最初の一回の引き動作のみ
def getsuitablegoalpos_first(arm_name,
IKpossiblelist_start,
objpos_initial,
objpos_finallist,
predefined_grasps):
## 重み
w_length = 1
w_FT = 1
w_manip = 1
pullinglengthlist = []
for i, selected_objpos_final in enumerate(objpos_finallist):
pullinglength = np.linalg.norm(objpos_initial - selected_objpos_final)
pullinglengthlist.append(pullinglength)
pullinglength_ref = min(pullinglengthlist)
## 評価要素を計算
totalIKpossiblelist_startgoal = []
costlist = []
assessment_value_list = []
for i, selected_objpos_final in enumerate(objpos_finallist):
## pullinglength
pullinglength = pullinglengthlist[i]
pullinglength_cost = 1 - pullinglength_ref / pullinglength
## FT
zaxis = np.array([0, 0, 1])
tostartvec = objpos_initial - selected_objpos_final
theta = rm.angle_between_vectors(rm.unit_vector(tostartvec), zaxis)
FT_cost = math.cos(theta)
## manipulability
IKpossiblelist_startgoal = decidegoalpose_onepoint(arm_name,
IKpossiblelist_start,
objpos_initial,
selected_objpos_final,
predefined_grasps,
obscmlist)
if IKpossiblelist_startgoal is not False and IKpossiblelist_start is not False:
manipulability_cost = len(IKpossiblelist_startgoal) / len(IKpossiblelist_start)
else:
manipulability_cost = -100
## 各コストのリスト
costlist.append([pullinglength_cost, FT_cost, manipulability_cost])
## 使用可能なIKのリスト
totalIKpossiblelist_startgoal.append(IKpossiblelist_startgoal)
## tostartvec, togoalvecのリスト
# veclist.append([tostartvec, togoalvec])
## 評価関数値のリスト
assessment_value = w_length * pullinglength_cost + w_manip * manipulability_cost + w_FT * FT_cost
## [assessment_value, chosen_objpos_final]
assessment_value_list.append([assessment_value, i])
assessment_value_list = descendingorder(assessment_value_list, axis=0)
print("assessment_value_list", assessment_value_list)
return assessment_value_list, totalIKpossiblelist_startgoal, costlist
def getsuitablegoalpos_second(arm_name,
IKpossiblelist_start,
objpos_initial,
objpos_finallist,
predefined_grasps,
predictlist):
# objpos_final_under = np.array([250, 0, 1650])
## 重み
w_length = 1
w_FT = 0
w_manip = 0
pullinglengthlist = []
for i, use_objpos_final in enumerate(objpos_finallist):
pullinglength = np.linalg.norm(objpos_initial - use_objpos_final)
pullinglengthlist.append(pullinglength)
pullinglength_ref = min(pullinglengthlist)
## 評価要素を計算
totalIKpossiblelist_startgoal = []
costlist = []
## 各点における予測値の要素を計算
elements_for_predictlist = []
for i, use_objpos_final in enumerate(objpos_finallist):
flag = 0
togoalvec = copy.copy(use_objpos_final - objpos_initial)
d_next = np.linalg.norm(objpos_initial - use_objpos_final)
d_before, theta_before, theta_beforebefore = predictlist
## 次の角度の予測値
theta_next = theta_before + (theta_before - theta_beforebefore) * (d_next / d_before)
if theta_next > thetathreshold:
d_next = (thetathreshold - theta_before) * (d_before / (theta_before - theta_beforebefore))
use_objpos_final = copy.copy(objpos_initial) + d_next * rm.unit_vector(togoalvec)
togoalvec = copy.copy(use_objpos_final - objpos_initial)
flag = 1
elements_for_predictlist.append([d_next, theta_next, use_objpos_final, flag, togoalvec])
## 評価値の計算
value_plus_element = []
for i, eachpos in enumerate(objpos_finallist):
use_element = elements_for_predictlist[i]
use_objpos_final = use_element[2]
## pullinglength
pullinglength = pullinglengthlist[i]
pullinglength_cost = 1 - pullinglength_ref / pullinglength
print("length cost = ", pullinglength_cost)
## FT
zaxis = np.array([0, 0, 1])
togoalvec = use_element[4]
tostartvec = copy.copy(togoalvec) * (-1)
degree = rm.angle_between_vectors(rm.unit_vector(tostartvec), zaxis)
FT_cost = math.cos(degree)
print("force cost = ", FT_cost)
## 予測位置での物体の情報
obj_predict = copy.deepcopy(obj)
objectpos = copy.copy(objpos_start)
objectrot = rm.rotmat_from_axangle(rotate_axis, math.radians(use_element[1]))
objmat_predict = rm.homomat_from_posrot(objectpos, objectrot)
obj_predict.set_rotmat(objmat_predict)
## 予測位置での物体を障害物として追加
obscmlist.append(obj_predict)
pickle.dump(obscmlist, open("obscmlist.pickle", "wb"))
## manipulability
IKpossiblelist_startgoal = decidegoalpose_onepoint(arm_name,
IKpossiblelist_start,
objpos_initial,
use_objpos_final,
predefined_grasps,
obscmlist)
if IKpossiblelist_startgoal is not False and IKpossiblelist_start is not False:
manipulability_cost = len(IKpossiblelist_startgoal) / len(IKpossiblelist_start)
else:
manipulability_cost = -100
obscmlist.pop(-1)
print("manipulation cost = ", manipulability_cost)
## 各コストのリスト
costlist.append([pullinglength_cost, FT_cost, manipulability_cost])
## 使用可能なIKのリスト
totalIKpossiblelist_startgoal.append(IKpossiblelist_startgoal)
## 評価関数値のリスト
assessment_value = w_length * pullinglength_cost + w_manip * manipulability_cost + w_FT * FT_cost
## value_plus_element : [assessment_value, i, d_next, theta_next, use_objpos_final, flag, togoalvec]
value_plus_element.append([assessment_value, i] + use_element)
# ## [assessment_value, chosen_objpos_final]
# assessment_value_list.append([assessment_value, i])
# assessment_value_list = descendingorder(assessment_value_list, axis=0)
value_plus_element = descendingorder(value_plus_element, axis=0)
assessment_value_list = value_plus_element[:, :2] ## assessment_value, i
print("assessment_value_list", assessment_value_list)
elements_for_predictlist = value_plus_element[:, 2:6] ## d_next, theta_next, use_objpos_final, flag
togoalveclist = value_plus_element[:, 6] ## togoalvec
return assessment_value_list, totalIKpossiblelist_startgoal, costlist, elements_for_predictlist, togoalveclist
## 終点での把持姿勢を探索(0203作成:左右で引く方向を変換)
def decidegoalpose(arm_name,
IKpossiblelist_start,
objpos_initial,
predefined_grasps,
objpos_final=np.array([260, 0, 1200]),
diff=None,
label="down"):
# tic = time.time()
IKpossiblelist_startgoal = []
# if label == "down":
# if arm_name == "lft":
# hold_pos_final = np.array([260, 100, 1400])
# else:
# hold_pos_final = np.array([260, -100, 1400])
objrot_final = np.eye(3)
tostartvec = objpos_initial - objpos_final ## 終点から始点への方向ベクトル(非正規化)
togoalvec = objpos_final - objpos_initial ## 始点から終点への方向ベクトル(非正規化)
togoalvec_len = np.linalg.norm(togoalvec)
togoalvec_normalize = rm.unit_vector(togoalvec)
pullinglength = copy.copy(togoalvec_len)
if label == "down":
if diff is not None: ## 一回目の引き動作のための条件
if diff < togoalvec_len:
print("pass")
pullinglength = copy.copy(diff)
while True:
if label == "down":
objpos_final = objpos_initial + pullinglength * togoalvec_normalize
else:
pass
togoalvec = objpos_final - objpos_initial
print("hold_pos_final", objpos_final)
objmat4_final = rm.homomat_from_posrot(objpos_final, objrot_final)
obj_final = copy.deepcopy(ropeobj)
obj_final.set_rgba([1, 0, 0, .5])
obj_final.set_rotmat(objmat4_final)
for i in IKpossiblelist_start:
prejawwidth, prehndfc, prehndpos, prehndrotmat = predefined_grasps[i[2]]
prehndhomomat = rm.homomat_from_posrot(prehndpos, prehndrotmat)
hndmat4_final = np.dot(objmat4_final, prehndhomomat)
eepos_final = rm.homomat_transform_points(objmat4_final, prehndfc)[:3]
eerot_final = hndmat4_final[:3, :3]
# goal = robot_s.numik(eepos_final, eerot_final, arm_name)
fromjnt = i[0]
goal = robot_s.ik(component_name=arm_name,
tgt_pos=eepos_final,
tgt_rotmat=eerot_final,
seed_jnt_values=fromjnt)
if goal is not None:
original_jnt_values = robot_s.get_jnt_values(component_name=arm_name)
robot_s.fk(component_name=arm_name, jnt_values=goal)
cd_result = robot_s.is_collided(obscmlist)
if not cd_result:
IKpossiblelist_startgoal.append([i[0], goal, i[1], [2]])
robot_s.fk(manipulator_name=arm_name, jnt_values=original_jnt_values)
if len(IKpossiblelist_startgoal) > 0:
print(str(pullinglength) + "mm引きます")
return IKpossiblelist_startgoal, objpos_final, tostartvec, togoalvec
pullinglength -= 1
if pullinglength < 0:
print("終点が存在しません")
return [False, False, False, False]
## 中継点での把持姿勢を探索
def decidemidpose(arm_name, IKpossiblelist_startgoal, handdir, objpos_final=None):
centerflag = 0
if objpos_final is not None:
if objpos_final[1] == 0:
centerflag = 1
print("中継点での姿勢を探索します")
IKpossiblelist = []
for i in IKpossiblelist_startgoal:
direction = rm.unit_vector(handdir[i[3]]) * (-1)
distance = .08
while True:
if objpos_final is None or centerflag == 1: ## 終点が中心のとき(hold_pos_final = Noneを設定)終点からの中継点も計算
## 始点に対する中継点の経路
midpathstart = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
i[0],
direction,
distance,
obscmlist,
type="source")
midpathgoal = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
i[1],
direction,
distance,
obscmlist,
type="source")
if len(midpathstart) > 0 and len(midpathgoal) > 0:
# robot_s.movearmfk(midpath[-1], arm_name)
# mideepos, mideerot = robot_s.getee(arm_name)
midpathstart = midpathstart[::-1]
midjntstart = copy.copy(midpathstart[0])
midjntgoal = copy.copy(midpathgoal[0])
#### list[startjnt, goaljnt, midjntlist, midpathlist, objrelmat, id]
IKpossiblelist.append(
[i[0], i[1], [midjntstart, midjntgoal], [midpathstart, midpathgoal], i[2], i[3]])
break
else:
distance -= 1
if distance <= 30:
print(str(i[3]) + "番目の姿勢は中継点が見つかりません")
break
else:
## 始点に対する中継点の経路
midpathstart = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
i[0],
direction,
distance,
[],
type="source")
if len(midpathstart) > 0:
midpathstart = midpathstart[::-1]
midjntstart = copy.copy(midpathstart[0])
goaljnt = i[1]
#### list[startjnt, goaljnt, midjntlist, midpathlist, objrelmat, id]
IKpossiblelist.append([i[0], i[1], [midjntstart, goaljnt], [midpathstart, []], i[2], i[3]])
break
else:
distance -= 1
if distance <= 30:
print(str(i[3]) + "番目の姿勢は始点に対する中継点が見つかりません")
break
return IKpossiblelist
def ropepullingmotion(IKpossiblelist, togoalvec, ctcallback, theta=None, theta_next=None):
for i in range(len(IKpossiblelist)):
useid = random.randint(0, len(IKpossiblelist) - 1)
use_startjnt = IKpossiblelist[useid][0]
use_objrelmat = IKpossiblelist[useid][4]
pullinglength = np.linalg.norm(togoalvec)
print("pullinglength : ", pullinglength)
togoalvec_copy = copy.copy(togoalvec)
direction = rm.unit_vector(togoalvec_copy)
obstacles_forpullingrope = copy.deepcopy(obscmlist)
if theta is not None and theta_next is not None:
currentobj = copy.deepcopy(obj)
currentrot = rm.rotmat_from_axangle(rotate_axis, theta)
currentmat = rm.homomat_from_posrot(objpos_start, currentrot)
currentobj.set_homomat(currentmat)
nextobj = copy.deepcopy(obj)
nextrot = rm.rotmat_from_axangle(rotate_axis, theta_next)
nextmat = rm.homomat_from_posrot(objpos_start, nextrot)
nextobj.set_homomat(nextmat)
i = 0.1
while True:
appendobj = copy.deepcopy(obj)
appendrot = rm.rotmat_from_euler(rotate_axis, theta + i)
appendmat = rm.homomat_from_posrot(objpos_start, appendrot)
appendobj.set_homomat(appendmat)
obstacles_forpullingrope.append(appendobj)
i += 0.1
if theta + i >= theta_next:
break
ropepulling = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
use_startjnt,
direction,
pullinglength,
obstacles_forpullingrope,
type="source")
ropepulling = ctcallback.getLinearPrimitive(use_startjnt, direction, pullinglength, [ropeobj], [use_objrelmat],
obstacles_forpullingrope, type="source")
if len(ropepulling) > 0:
print("ropepulling motion planning success!")
return ropepulling, IKpossiblelist[useid], useid
print("ropepulling motion not found!")
return [False, False, False]
# return ropepulling, IKpossiblelist[useid], useid
def RRTmotion(startjoint, goaljoint, ctcallback, obscmlist, expanddis, maxtime):
tic = time.time()
smoother = sm.Smoother()
pathplanner = rrtc.RRTConnect(start=startjoint, goal=goaljoint, ctcallback=ctcallback,
starttreesamplerate=30,
endtreesamplerate=30, expanddis=expanddis,
maxiter=2000, maxtime=maxtime)
path, _ = pathplanner.planning(obscmlist)
if path is not False:
path = smoother.pathsmoothing(path, pathplanner)
return path
else:
return False
def preprocess_point_cloud(pcd, voxel_size):
print(":: Estimate normal with search radius %.3f." % 10)
o3d.geometry.PointCloud.estimate_normals(pcd, o3d.geometry.KDTreeSearchParamHybrid(radius=10, max_nn=30))
radius_feature = voxel_size * 5
print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
pcd_fpfh = o3d.registration.compute_fpfh_feature(pcd,
o3d.geometry.KDTreeSearchParamHybrid(radius=10, max_nn=100))
return pcd, pcd_fpfh
def execute_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size):
distance_threshold = 30
print(":: RANSAC registration on downsampled point clouds.")
print(" Since the downsampling voxel size is %.3f," % voxel_size)
print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
o3d.registration.RANSACConvergenceCriteria(4000000, 500))
return result
def refine_registration(source, target, result_ransac):
distance_threshold = 30
print(":: Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. This time we use a strict")
print(" distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_icp(
source, target, distance_threshold, result_ransac.transformation,
o3d.registration.TransformationEstimationPointToPlane())
return result
def objectfitting(newpcd, fitobjpcd, refpoint_fitting):
samplepoint = copy.copy(newpcd)
targetpoint = sample_volume(fitobjpcd, 20000)
targetpointnew = copy.copy(targetpoint)
while True:
targetpoint = targetpointnew
voxel_size = 30
source = o3d.geometry.PointCloud()
source.points = o3d.utility.Vector3dVector(samplepoint)
target = o3d.geometry.PointCloud()
target.points = o3d.utility.Vector3dVector(targetpoint)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
print("RANSAC start")
result_ransac = execute_global_registration(source_down, target_down,
source_fpfh, source_fpfh, voxel_size)
print(result_ransac)
print("ICP start")
result_icp = refine_registration(source, target, result_ransac)
print(result_icp)
transformation = result_icp.transformation
transmatrix =
|
np.linalg.inv(transformation)
|
numpy.linalg.inv
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
.. module: planttype
:synopsis: APES-model component
.. moduleauthor:: <NAME>
Describes planttype spefic processes such as dry leaf gas exchange, leaf energy
balance, root uptake, seasonal cycle of LAI and photosynthetic capacity.
Based on MatLab implementation by <NAME>.
Created on Tue Oct 02 09:04:05 2018
Note:
migrated to python3
- absolute imports
Changes by SL 15.11.2019:
- self.StomaModel set to 'MEDLYN-FARQUHAR'
- init works also without 'phenop'or 'laip' in case ctr['pheno_cycle'] or
ctr['seasonal_LAI'] == False
- changed output grouping and return arguments
- added sunlit/shaded leaves outputs
- source-terms for canopy.microment now updated explicitly in canopy
Todo:
- update self.run call args documentation (radiation-related part)
- update self.run; get rid of photo.leaf_interface & bring parts as class functions
if energy_balance == True:
self.run_leaf_energy_balance(args)
else:
self.run_leaf_gas_exchange(args)
- add trunk water transport & storage (porous-media approach, tree-Richards)
to solve leaf water potential and transpiration limitations by water transport
- move water stress equations as separate def?
References:
<NAME>., <NAME>., <NAME>. and <NAME>., 2015. Coupling boreal
forest CO2, H2O and energy flows by a vertically structured forest canopy –
Soil model with separate bryophyte layer. Ecological modelling, 312, pp.385-405.
"""
import numpy as np
#from copy import deepcopy
import logging
logger = logging.getLogger(__name__)
from .photo import photo_c3_medlyn_farquhar, photo_temperature_response
from .phenology import Photo_cycle, LAI_cycle
from .rootzone import RootUptake
from canopy.micromet import leaf_boundary_layer_conductance, e_sat
from canopy.interception import latent_heat
from canopy.constants import PAR_TO_UMOL, MOLAR_MASS_H2O, SPECIFIC_HEAT_AIR, EPS
H2O_CO2_RATIO = 1.6 # H2O to CO2 diffusivity ratio [-]
class PlantType(object):
r""" Contains plant-specific properties, state variables and phenological
functions.
"""
def __init__(self, z, p, dz_soil, ctr, loc):
r""" Initialises a planttype object and submodel objects
using given parameters.
Args:
z (array): canopy model nodes, height from soil surface (= 0.0) [m]
p (dict):
'name' (str): name of planttype
'LAImax' (float): maximum leaf area index [m2m-2]
'lad' (array): normalized leaf area density profile [m2m-3]
# following group needed only if ctr['pheno_cycle'] == True
'phenop' (dict): parameters for seasonal cycle of phenology
'Xo': initial delayed temperature [degC]
'fmin': minimum photocapacity [-]
'Tbase': base temperature [degC]
'tau': time constant [days]
'smax': threshold for full acclimation [degC]
# following group needed only if ctr['seasonal_LAI'] == True
'laip' (dict): parameters for LAI seasonal dynamics
'lai_min': minimum LAI, fraction of annual maximum [-]
'lai_ini': initial LAI fraction, if None lai_ini = Lai_min * LAImax
'DDsum0': degreedays at initial time [days]
'Tbase': base temperature [degC]
'ddo': degreedays at bud burst [days]
'ddur': duration of recovery period [days]
'sso': start doy of decrease, based on daylength [days]
'sdur': duration of decreasing period [days]
'photop' (dict): leaf gas-exchange and stomatal control parameters
'Vcmax': maximum carboxylation velocity [umolm-2s-1]
'Jmax': maximum rate of electron transport [umolm-2s-1]
'Rd': dark respiration rate [umolm-2s-1]
'alpha': quantum yield parameter [mol/mol]
'theta': co-limitation parameter of Farquhar-model
# 'm': stomatal parameter of Ball-Berry model
# 'La': stomatal parameter of stomatal optimality model
'g1': stomatal parameter of Medlyn A-gs model
'g0': residual conductance for CO2 [molm-2s-1]
'kn': nitrogen attenuation factor [-]; vertical scaling of Vcmax, Jmax, Rd
'beta': co-limitation parameter of Farquhar-model
'drp': drought-response parameters
'tresp' (dict): temperature sensitivity parameters
'Vcmax': [Ha, Hd, Topt]; activation energy [kJmol-1], deactivation energy [kJmol-1], optimum temperature [degC]
'Jmax': [Ha, Hd, Topt];
'Rd': [Ha]; activation energy [kJmol-1)]
'leafp' (dict): leaf properties
'lt': leaf lengthscale [m]
'par_alb': leaf Par albedo [-]
'nir_alb': leaf Nir albedo [-]
'emi': leaf emissivity [-]
'rootp' (dict): root zone properties
'root_depth': root depth [m]
'beta': shape parameter for root distribution model
'RAI_LAI_multiplier': multiplier for total fine root area index (RAI = 2*LAImax)
'fine_radius': fine root radius [m]
'radial_K': maximum bulk root membrane conductance in radial direction [s-1]
dz_soil (array): thickness of soilprofile layers, needed for rootzone [m]
ctr (dict): switches and specifications for computation
'WaterStress' (str): account for water stress using 'Rew', 'PsiL' or 'None'
'seasonal_LAI' (bool): account for seasonal LAI dynamics
'pheno_cycle' (bool): account for phenological cycle
Returns:
self (object):
.name (str)
.pheno_state(float): phenology state [0...1]
.relative_LAI (float): LAI relative to annual maximum [0...1]
.lad (array): leaf area density [m2 m-3]
.lad_normed (array): normalized leaf area density [-]
...
.Switch_pheno (bool): account for phenological cycle
.Switch_lai (bool): account for seasonal LAI dynamics
.Switch_WaterStress (bool): account for water stress in planttypes
.Pheno_Model (object): model for phenological cycle
.LAI_Model (object): model for seasonal development of LAI
.Roots (object): root properties
"""
self.Switch_pheno = ctr['pheno_cycle'] # include phenology
self.Switch_lai = ctr['seasonal_LAI'] # seasonal LAI
self.Switch_WaterStress = ctr['WaterStress'] # water stress affects stomata
#self.StomaModel = 'MEDLYN_FARQUHAR' # stomatal model
self.name = p['name']
# seasonal phenology model
if self.Switch_pheno:
self.Pheno_Model = Photo_cycle(p['phenop']) # phenology model instance
self.pheno_state = self.Pheno_Model.f # phenology state [0...1]
else:
self.pheno_state = 1.0
# dynamic LAI model
if self.Switch_lai:
# seasonality of leaf area
self.LAI_Model = LAI_cycle(p['laip'], loc) # LAI model instance
self.relative_LAI = self.LAI_Model.f # LAI relative to annual maximum [0...1]
else:
self.relative_LAI = 1.0
# physical structure
self.LAImax = p['LAImax'] # maximum annual 1-sided LAI [m2m-2]
self.LAI = self.LAImax * self.relative_LAI # current LAI
self.lad_normed = p['lad'] # normalized leaf-area density [m-1]
self.lad = self.LAI * self.lad_normed # current leaf-area density [m2m-3]
# root properties
self.Roots = RootUptake(p['rootp'], dz_soil, self.LAImax)
self.mask = np.where(self.lad > 0, 1.0, np.NaN) # 1.0 where lad>0, nan elsewhere
self.dz = z[1] - z[0]
# leaf gas-exchange parameters
self.photop0 = p['photop'] # A-gs parameters at pheno_state = 1.0 (dict)
self.photop = self.photop0.copy() # current A-gs parameters (dict)
# leaf properties
self.leafp = p['leafp'] # leaf properties (dict)
#print(self.name, self.mask)
def update_daily(self, doy, T, PsiL=0.0, Rew=1.0):
r""" Updates planttype pheno_state, gas-exchange parameters, LAI and lad.
Args:
doy (float): day of year [days]
Ta (float): mean daily air temperature [degC]
PsiL (float): leaf water potential [MPa] --- CHECK??
Rew (float): relatively extractable water (-)
Note: Call once per day
"""
if self.Switch_pheno:
self.pheno_state = self.Pheno_Model.run(T, out=True)
if self.Switch_lai:
self.relative_LAI =self.LAI_Model.run(doy, T, out=True)
self.LAI = self.relative_LAI * self.LAImax
self.lad = self.lad_normed * self.LAI
# scale photosynthetic capacity using vertical N gradient
f = 1.0
if 'kn' in self.photop0:
kn = self.photop0['kn']
Lc = np.flipud(np.cumsum(np.flipud(self.lad*self.dz)))
Lc = Lc / np.maximum(Lc[0], EPS)
f = np.exp(-kn*Lc)
# preserve proportionality of Jmax and Rd to Vcmax
self.photop['Vcmax'] = f * self.pheno_state * self.photop0['Vcmax']
self.photop['Jmax'] = f * self.pheno_state * self.photop0['Jmax']
self.photop['Rd'] = f * self.pheno_state * self.photop0['Rd']
# water stress responses: move into own sub-models?
if self.Switch_WaterStress == 'Rew':
# drought responses from Hyde scots pine shoot chambers, 2006; for 'Medlyn - model' only
b = self.photop['drp']
fm = np.minimum(1.0, (Rew / b[0])**b[1])
self.photop['g1'] = fm * self.photop0['g1']
# apparent Vcmax decrease with Rew
fv = np.minimum(1.0, (Rew / b[2])**b[3])
self.photop['Vcmax'] *= fv
self.photop['Jmax'] *= fv
self.photop['Rd'] *= fv
if self.Switch_WaterStress == 'PsiL':
PsiL = np.minimum(-1e-5, PsiL)
b = self.photop0['drp']
# medlyn g1-model, decrease with decreasing Psi
self.photop['g1'] = self.photop0['g1'] * np.maximum(0.05, np.exp(b*PsiL))
# Vmax and Jmax responses to leaf water potential. Kellomäki & Wang, 1996.
# (Note! mistake in paper eq's, these correspond to their figure)
fv = 1.0 / (1.0 + (PsiL / - 2.04)**2.78) # vcmax
fj = 1.0 / (1.0 + (PsiL / - 1.56)**3.94) # jmax
fr = 1.0 / (1.0 + (PsiL / - 2.53)**6.07) # rd
self.photop['Vcmax'] *= fv
self.photop['Jmax'] *= fj
self.photop['Rd'] *= fr
def run(self, forcing, parameters, controls):
r"""Computes dry leaf gas-exchange for shaded and sunlit leaves for timestep.
Args:
forcing (dict):
'h2o' (array): water vapor mixing ratio [mol mol-1]
'co2' (array): carbon dioxide mixing ratio [ppm]
'air_temperature' (array): air temperature [degC]
'air_pressure' (float): ambient pressure [Pa]
'wind_speed' (array): mean wind speed [m s-1]
'par' (dict): incident and absorbed PAR [Wm-2] for sunlit & shaded leaves seprately;
see structure in caller
'nir' (dict): --"-- for NIR
'lw' (dict): long-wave related inputs; see structure from caller
'parameters' (dict):
'sunlit_fraction': array [-]
'dry_leaf_fraction' (array) [-]
'controls' (dict):
'energy_balance' (boolean): True solves leaf temperature, False assumes Tleaf = air_temperature
"""
# --- compute sunlit leaves
sl = self.leaf_gas_exchange(forcing, controls, 'sunlit')
# --- compute shaded leaves
sh = self.leaf_gas_exchange(forcing, controls, 'shaded')
# --- update initial guess for leaf temperature
if controls['energy_balance']:
self.Tl_sh = sh['leaf_temperature'].copy()
self.Tl_sl = sl['leaf_temperature'].copy()
# TEST: CO2 exchange for wet leaves
gb_h, gb_c, gb_v = leaf_boundary_layer_conductance(forcing['wind_speed'], self.leafp['lt'],
forcing['air_temperature'],
forcing['wet_leaf_temperature'] - forcing['air_temperature'],
forcing['air_pressure'])
# VDP from Tl_wet
esat, s = e_sat(forcing['wet_leaf_temperature'])
Dleaf = esat / forcing['air_pressure'] - forcing['h2o']
# sunlit shaded separately
An_wet_sl, Rd_wet_sl, _, _, _, _ = photo_c3_medlyn_farquhar(self.photop,
forcing['par']['sunlit']['incident']* PAR_TO_UMOL,
forcing['wet_leaf_temperature'],
Dleaf, forcing['co2'], gb_c, gb_v, P=forcing['air_pressure'])
An_wet_sh, Rd_wet_sh, _, _, _, _ = photo_c3_medlyn_farquhar(self.photop,
forcing['par']['shaded']['incident']* PAR_TO_UMOL,
forcing['wet_leaf_temperature'],
Dleaf, forcing['co2'], gb_c, gb_v, P=forcing['air_pressure'])
An_wet = (1 - parameters['sunlit_fraction']) * An_wet_sh + parameters['sunlit_fraction'] * An_wet_sl
Rd_wet = (1 - parameters['sunlit_fraction']) * Rd_wet_sh + parameters['sunlit_fraction'] * Rd_wet_sl
# prepare outputs
pt_stats, layer_stats = self._outputs(sl, sh, Rd_wet, An_wet, f_sl=parameters['sunlit_fraction'], df=parameters['dry_leaf_fraction'])
return pt_stats, layer_stats
def leaf_gas_exchange(self, forcing, controls, leaftype):
r""" Solves leaf gas-exchange and energy balance (optionally).
Energy balance is solved using Taylor's expansion (i.e isothermal
net radiation -approximation) which eliminates need for iterations with radiation-scheme.
Args:
forcing (dict):
'h2o': water vapor mixing ratio (mol/mol)
'co2': carbon dioxide mixing ratio (ppm)
'air_temperature': ambient air temperature (degC)
'par_incident': incident PAR at leaves (umolm-2s-1)
'sw_absorbed': absorbed SW (PAR + NIR) at leaves (Wm-2)
'lw_net': net isothermal long-wave radiation (Wm-2)
'wind_speed': mean wind speed (m/s)
'air_pressure': ambient pressure (Pa)
'leaf_temperature': initial guess for leaf temperature (optional)
'average_leaf_temperature': leaf temperature used for computing LWnet (optional)
'radiative_conductance': radiative conductance used in computing LWnet (optional)
controls (dict):
'energy_balance' (bool): True computes leaf temperature by solving energy balance
'logger_info' (str)
leaftype (str): 'sunlit' / 'shaded'
Returns:
(dict):
'net_co2': net CO2 flux (umol m-2 leaf s-1)
'dark_respiration': CO2 respiration (umol m-2 leaf s-1)
'transpiration': H2O flux (transpiration) (mol m-2 leaf s-1)
'sensible_heat': sensible heat flux (W m-2 leaf)
'fr': non-isothermal radiative flux (W m-2)
'Tl': leaf temperature (degC)
'stomatal_conductance': stomatal conductance for H2O (mol m-2 leaf s-1)
'boundary_conductance': boundary layer conductance for H2O (mol m-2 leaf s-1)
'leaf_internal_co2': leaf internal CO2 mixing ratio (mol/mol)
'leaf_surface_co2': leaf surface CO2 mixing ratio (mol/mol)
<NAME> & <NAME>, Last edit 25.11.2019 / SL
"""
Ebal = controls['energy_balance']
logger_info = controls['logger_info'] + 'leaftype: ' + leaftype
# -- unpack forcing
T = np.array(forcing['air_temperature'], ndmin=1)
H2O = np.array(forcing['h2o'], ndmin=1)
P = forcing['air_pressure']
U = forcing['wind_speed']
CO2 = forcing['co2']
# incident PAR at leaftype
Qp = forcing['par'][leaftype]['incident'] * PAR_TO_UMOL # umolm-2s-1
# solve energy balance iteratively
if Ebal:
SWabs = forcing['par'][leaftype]['absorbed'] + forcing['nir'][leaftype]['absorbed']
LWnet = forcing['lw']['net_leaf']
Rabs = SWabs + LWnet
gr = forcing['lw']['radiative_conductance']
Tl_ave = forcing['average_leaf_temperature'] # layer mean leaf temperature
# initial guess for leaf temperature
if leaftype == 'sunlit':
Tl_ini = self.Tl_sl
if leaftype == 'shaded':
Tl_ini = self.Tl_sh
# canopy nodes
ic = np.where(abs(LWnet) > 0.0)
Tl = Tl_ini.copy()
Told = Tl.copy()
# vapor pressure
esat, s = e_sat(Tl)
s = s / P # slope of esat, mol/mol / degC
Dleaf = esat / P - H2O
Lv = latent_heat(T) * MOLAR_MASS_H2O
itermax = 20
err = 999.0
iter_no = 0
while err > 0.01 and iter_no < itermax:
iter_no += 1
Told = Tl.copy()
# boundary layer conductance
gb_h, gb_c, gb_v = leaf_boundary_layer_conductance(U, self.leafp['lt'], T, 0.5 * (Tl + Told) - T, P)
# solve leaf gas-exchange
An, Rd, fe, gs_opt, Ci, Cs = photo_c3_medlyn_farquhar(self.photop, Qp, Tl, Dleaf, CO2, gb_c, gb_v, P=P)
gsv = H2O_CO2_RATIO*gs_opt
geff_v =
|
np.where(Dleaf > 0.0, (gb_v*gsv) / (gb_v + gsv), gb_v)
|
numpy.where
|
import numpy as np
from scipy.integrate import ode
def lorenz(sigma, rho, beta, tau=1.):
f = lambda t,x : [sigma*(x[1] - x[0])/tau, (x[0]*(rho - x[2]) - x[1])/tau, (x[0]*x[1] - beta*x[2])/tau]
jac = lambda t,x : [[-sigma/tau, sigma/tau, 0.],
[(rho - x[2])/tau, -1./tau, -x[0]/tau],
[x[1]/tau, x[0]/tau, -beta/tau]]
return f,jac
def simulate_lorenz(dt, n_timesteps, x0=None, sigma=10., rho=28., beta=8/3, tau=1.):
if x0 is None:
x0 = [-8, 7, 27]
f,jac = lorenz(sigma, rho, beta, tau=tau)
r = ode(f,jac).set_integrator('zvode', method='bdf')
r.set_initial_value(x0, 0.0)
x = [x0]
t = [0.0]
xprime = [f(0.0,x0)]
while r.successful() and len(x) < n_timesteps:
r.integrate(r.t + dt)
x.append(np.real(r.y))
xprime.append(f(r.t,np.real(r.y)))
t.append(r.t)
return np.array(x).T, np.array(t), np.array(xprime).T
def rossler(a, b, c, tau=1.):
f = lambda t,x : [(-x[1] - x[2])/tau, (x[0] + a*x[1])/tau, (b + x[2]*(x[0] - c))/tau]
jac = lambda t,x : [[0., -1/tau, -1/tau],
[1/tau, a/tau, 0.],
[x[2]/tau, 0., x[0]/tau]]
return f,jac
def simulate_rossler(dt, n_timesteps, x0=None, a=0.2, b=0.2, c=5.7, tau=1.):
if x0 is None:
x0 = [0, 10, 0]
f,jac = rossler(a, b, c, tau=tau)
r = ode(f,jac).set_integrator('zvode', method='bdf')
r.set_initial_value(x0, 0.0)
x = [x0]
t = [0.0]
xprime = [f(0.0,x0)]
while r.successful() and len(x) < n_timesteps:
r.integrate(r.t + dt)
x.append(np.real(r.y))
xprime.append(f(r.t,np.real(r.y)))
t.append(r.t)
return np.array(x).T, np.array(t),
|
np.array(xprime)
|
numpy.array
|
# Author: <NAME> <<EMAIL>>
"""
This module contains the classes used to perform inference on various
graphical models.
"""
__docformat__ = 'restructuredtext'
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
import general
import graph
import cliques
import potentials
import models
import cpds
import pylab
from utilities import create_all_evidence
def test_bnet_mle():
"""EXAMPLE: MLE learning on a BNET"""
"""Create all data required to instantiate the bnet object"""
nodes = 4
dag = np.zeros((nodes, nodes))
C = 0
S = 1
R = 2
W = 3
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
ns = 2 * np.ones((1, nodes))
"""Instantiate the model"""
net = models.bnet(dag, ns, [])
"""Learn the parameters"""
samples = np.array(pylab.load('./Data/lawn_samples.txt')) - 1
net.learn_params_mle(samples.copy())
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs = np.vstack((mlcs, mlc))
"""Read in expected values"""
exp_mlcs = np.array(pylab.load('./Data/bnet_mle_exact_max_sum_res.txt'))
"""Assert that the output matched the expected values"""
assert_array_equal(mlcs, exp_mlcs)
def test_bnet_EM():
"""EXAMPLE: EM learning on a BNET"""
"""Create all data required to instantiate the bnet object"""
nodes = 4
dag = np.zeros((nodes, nodes))
C = 0
S = 1
R = 2
W = 3
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
ns = 2 * np.ones((1, nodes))
"""Instantiate the model"""
net = models.bnet(dag, ns, [])
"""
Load the samples, and set one sample of one node to be unobserved, this
should not effect the learnt parameter much, and will demonstrate that
the algorithm can handle unobserved samples.
"""
samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
samples[0][0] = []
"""Learn the parameters"""
net.learn_params_EM(samples[:])
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs =
|
np.vstack((mlcs, mlc))
|
numpy.vstack
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def flatten(l): return flatten(l[0]) + (flatten(l[1:]) if len(l) > 1 else []) if type(l) is list else [l]
#load the data
n_sub_iter=pd.read_csv("n_sub_iter_vA_swimmer_sn.csv")
rewards_snapshot=pd.read_csv("rewards_snapshot_swimmer_vA_sn.csv")
rewards_subiter=pd.read_csv("rewards_subiter_swimmer_vA_sn.csv")
variance_sgd=pd.read_csv("variance_sgd_vA_swimmer_sn.csv")
variance_svrg=pd.read_csv("variance_svrg_vA_swimmer_sn.csv")
importance_weights=pd.read_csv("importance_weights_vA_swimmer_sn.csv")
#gpomdp_rewards=pd.read_csv("GPOMDP_AVAR_rewards2.csv")
#analize
#x=rewards_snapshot["rewardsSnapshot0"] np.array(x[0][1:-1].split()) crea un array contenete i rewards nel primo snapshot
#per trajectories analysis
avg_traj_rewards=list()
for col_name_s,col_name_si,col_name_nsi in zip(rewards_snapshot,rewards_subiter,n_sub_iter):
first = np.insert(np.array(np.cumsum(n_sub_iter[col_name_nsi][~np.isnan(n_sub_iter[col_name_nsi])])),0,0) #np.cumsum(n_sub_iter["nSubIter0"][~np.isnan(n_sub_iter["nSubIter0"])])
ranges = n_sub_iter[col_name_nsi][~np.isnan(n_sub_iter[col_name_nsi])]
traj_rewards = list()
for i,k,s in zip(first[0:-1],ranges,range(len(ranges))):
traj_rewards.append(list(rewards_snapshot[col_name_s][s][1:-1].split())[:100])
for j in range(np.int(k)):
traj_rewards.append(list(rewards_subiter[col_name_si][i+j][1:-1].split())[:10])
if(len(ranges)<len(rewards_snapshot[col_name_s][rewards_snapshot[col_name_s]==rewards_snapshot[col_name_s]])):
traj_rewards.append(list(rewards_snapshot[col_name_s][np.int(s+1)][1:-1].split()))
traj_rewards=flatten(traj_rewards)
avg_traj_rewards.append(traj_rewards)
lenghts=list()
for i in avg_traj_rewards:
lenghts.append(len(i))
min_len=min(lenghts)
temp=list()
for i in avg_traj_rewards:
temp.append(i[:min_len])
avg_traj_rewards=temp
avg_traj_rewards=np.asarray(np.mean(np.matrix(avg_traj_rewards,dtype=np.float64),axis=0)).flatten()
plt.plot(avg_traj_rewards)
plt.legend(["Average Reward"],loc="lower right")
plt.savefig("per_tra_analisis.jpg", figsize=(32, 24), dpi=160)
plt.show()
#per update analysis
avg_traj_rewards=list()
for col_name_s,col_name_si,col_name_nsi in zip(rewards_snapshot,rewards_subiter,n_sub_iter):
first = np.insert(np.array(np.cumsum(n_sub_iter[col_name_nsi][~np.isnan(n_sub_iter[col_name_nsi])])),0,0) #np.cumsum(n_sub_iter["nSubIter0"][~np.isnan(n_sub_iter["nSubIter0"])])
ranges = n_sub_iter[col_name_nsi][~np.isnan(n_sub_iter[col_name_nsi])]
traj_rewards = list()
for i,k,s in zip(first[0:-1],ranges,range(len(ranges))):
traj_rewards.append(np.mean(np.asarray(rewards_snapshot[col_name_s][s][1:-1].split(),dtype=np.float64)))
for j in range(np.int(k)):
traj_rewards.append(np.mean(np.asarray(rewards_subiter[col_name_si][i+j][1:-1].split(),dtype=np.float64)))
if(len(ranges)<len(rewards_snapshot[col_name_s][rewards_snapshot[col_name_s]==rewards_snapshot[col_name_s]])):
traj_rewards.append(np.mean(np.asarray(rewards_snapshot[col_name_s][
|
np.int(s+1)
|
numpy.int
|
""" Test out some crucial linear GS tests in parallel with distributed comps."""
from openmdao.jacobians.jacobian import Jacobian
import unittest
import itertools
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.distributed_components import DistribCompDerivs, SummerDerivs
from openmdao.test_suite.components.paraboloid_distributed import DistParab, DistParabFeature, \
DistParabDeprecated
from openmdao.utils.mpi import MPI
from openmdao.utils.name_maps import rel_name2abs_name
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
try:
from pyoptsparse import Optimization as pyoptsparse_opt
except ImportError:
pyoptsparse_opt = None
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
if MPI:
rank = MPI.COMM_WORLD.rank
else:
rank = 0
class DistribExecComp(om.ExecComp):
"""
An ExecComp that uses N procs and takes input var slices. Unlike a normal
ExecComp, it only supports a single expression per proc. If you give it
multiple expressions, it will use a different one in each proc, repeating
the last one in any remaining procs.
"""
def __init__(self, exprs, arr_size=11, **kwargs):
super().__init__(exprs, **kwargs)
self.arr_size = arr_size
self.options['distributed'] = True
def setup(self):
outs = set()
allvars = set()
exprs = self._exprs
kwargs = self._kwargs
comm = self.comm
rank = comm.rank
if len(self._exprs) > comm.size:
raise RuntimeError("DistribExecComp only supports up to 1 expression per MPI process.")
if len(self._exprs) < comm.size:
# repeat the last expression for any leftover procs
self._exprs.extend([self._exprs[-1]] * (comm.size - len(self._exprs)))
self._exprs = [self._exprs[rank]]
# find all of the variables and which ones are outputs
for expr in exprs:
lhs, _ = expr.split('=', 1)
outs.update(self._parse_for_out_vars(lhs))
v, _ = self._parse_for_names(expr)
allvars.update(v)
sizes, offsets = evenly_distrib_idxs(comm.size, self.arr_size)
start = offsets[rank]
end = start + sizes[rank]
for name in outs:
if name not in kwargs or not isinstance(kwargs[name], dict):
kwargs[name] = {}
kwargs[name]['val'] = np.ones(sizes[rank], float)
for name in allvars:
if name not in outs:
if name not in kwargs or not isinstance(kwargs[name], dict):
kwargs[name] = {}
meta = kwargs[name]
meta['val'] = np.ones(sizes[rank], float)
meta['src_indices'] = np.arange(start, end, dtype=int)
super().setup()
class DistribCoordComp(om.ExplicitComponent):
def setup(self):
comm = self.comm
rank = comm.rank
if rank == 0:
self.add_input('invec', np.zeros((5, 3)), distributed=True,
src_indices=[[0,0,0,1,1,1,2,2,2,3,3,3,4,4,4],[0,1,2,0,1,2,0,1,2,0,1,2,0,1,2]])
self.add_output('outvec', np.zeros((5, 3)), distributed=True)
else:
self.add_input('invec', np.zeros((4, 3)), distributed=True,
# use some negative indices here to
# make sure they work
src_indices=[[5,5,5,6,6,6,7,7,7,-1,8,-1],[0,1,2,0,1,2,0,1,2,0,1,2]])
self.add_output('outvec', np.zeros((4, 3)), distributed=True)
def compute(self, inputs, outputs):
if self.comm.rank == 0:
outputs['outvec'] = inputs['invec'] * 2.0
else:
outputs['outvec'] = inputs['invec'] * 3.0
def _test_func_name(func, num, param):
args = []
for p in param.args:
try:
arg = p.__name__
except:
arg = str(p)
args.append(arg)
return func.__name__ + '_' + '_'.join(args)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class MPITests2(unittest.TestCase):
N_PROCS = 2
def test_distrib_shape(self):
points = np.array([
[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[0., 1., 1.],
[1., 0., 0.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.],
[0., 0., 2.],
])
prob = om.Problem()
prob.model.add_subsystem('indep', om.IndepVarComp('x', points))
prob.model.add_subsystem('comp', DistribCoordComp())
prob.model.add_subsystem('total', om.ExecComp('y=x',
x=np.zeros((9, 3)),
y=np.zeros((9, 3))))
prob.model.connect('indep.x', 'comp.invec')
prob.model.connect('comp.outvec', 'total.x', src_indices=om.slicer[:], flat_src_indices=True)
prob.setup(check=False, mode='fwd')
prob.run_model()
final = points.copy()
final[0:5] *= 2.0
final[5:9] *= 3.0
assert_near_equal(prob['total.y'], final)
def test_two_simple(self):
size = 3
group = om.Group()
group.add_subsystem('P', om.IndepVarComp('x', np.arange(size)),
promotes_outputs=['x'])
group.add_subsystem('C1', DistribExecComp(['y=2.0*x', 'y=3.0*x'], arr_size=size,
x=np.zeros(size),
y=np.zeros(size)),
promotes_inputs=['x'])
group.add_subsystem('C2', om.ExecComp(['z=3.0*y'],
y=np.zeros(size),
z=np.zeros(size)))
prob = om.Problem()
prob.model = group
prob.model.linear_solver = om.LinearBlockGS()
prob.model.connect('C1.y', 'C2.y', src_indices=om.slicer[:])
prob.setup(check=False, mode='fwd')
prob.run_model()
J = prob.compute_totals(['C2.z'], ['x'])
assert_near_equal(J['C2.z', 'x'], np.diag([6.0, 6.0, 9.0]), 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(['C2.z'], ['x'])
assert_near_equal(J['C2.z', 'x'], np.diag([6.0, 6.0, 9.0]), 1e-6)
@parameterized.expand(itertools.product([om.NonlinearRunOnce, om.NonlinearBlockGS]),
name_func=_test_func_name)
def test_fan_out_grouped(self, nlsolver):
size = 3
prob = om.Problem()
prob.model = root = om.Group()
root.add_subsystem('P', om.IndepVarComp('x', np.ones(size, dtype=float)))
root.add_subsystem('C1', DistribExecComp(['y=3.0*x', 'y=2.0*x'], arr_size=size,
x=np.zeros(size, dtype=float),
y=np.zeros(size, dtype=float)))
sub = root.add_subsystem('sub', om.ParallelGroup())
sub.add_subsystem('C2', om.ExecComp('y=1.5*x',
x=np.zeros(size),
y=np.zeros(size)))
sub.add_subsystem('C3', om.ExecComp(['y=5.0*x'],
x=np.zeros(size, dtype=float),
y=np.zeros(size, dtype=float)))
root.add_subsystem('C2', om.ExecComp(['y=x'],
x=np.zeros(size, dtype=float),
y=np.zeros(size, dtype=float)))
root.add_subsystem('C3', om.ExecComp(['y=x'],
x=
|
np.zeros(size, dtype=float)
|
numpy.zeros
|
# test CurlypivImageCollection
"""
Notes about program
"""
# 1.0 import modules
# data I/O
import os
from os import listdir
from os.path import isfile, basename, join, isdir
from collections import OrderedDict
import glob
# quality control and debugging
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# Maths/Scientifics
import numpy as np
import pandas as pd
# Image Processing
from skimage import io
import cv2 as cv
# Plotting
import matplotlib.pyplot as plt
# Curlypiv
from curlypiv.CurlypivFile import CurlypivFile
from curlypiv.CurlypivUtils import find_substring
import curlypiv.CurlypivImageProcessing as CurlypivImageProcessing
import curlypiv.CurlypivPlotting as CurlypivPlotting
# 2.0 define class
class CurlypivTestCollection(object):
def __init__(self, collectionName, dirpath, file_type,
process=None, electric_field_strengths=None, frequencies=None,
runs=None, seqs=None, tests=None,
bpe_specs=None, backsub_specs=None, processing_specs=None, thresholding_specs=None, cropping_specs=None, resizing_specs=None,
dir_tests='tests', dir_bg='background', dir_results='results',
locid = 'loc', testid = ('E', 'Vmm'), runid = ('run', 'num'), seqid = ('test_', '_X'), frameid = '_X',
load_files=False, file_lim=25, exclude=[],
calibration_grid_path=None, calibration_illum_path=None, calibration_camnoise_path=None,
):
super(CurlypivTestCollection, self).__init__()
if not isdir(dirpath):
raise ValueError("Specified folder {} does not exist".format(dirpath))
self._collectionName = collectionName
self.dirpath = dirpath
self.dir_tests = dir_tests
self._path = join(self.dirpath, self.dir_tests)
self.dir_bg = dir_bg
self.dir_results = dir_results
self.file_type = file_type
self.locid = locid
self.testid = testid
self.runid = runid
self.seqid = seqid
self.frameid = frameid
self.file_lim = file_lim
self.load_files = load_files
self._find_locs(exclude=exclude)
self._add_locs()
self._get_size()
self.bpe_specs = bpe_specs
self.cropping_specs = cropping_specs
self.resizing_specs = resizing_specs
self.backsub_specs = backsub_specs
self.processing_specs = processing_specs
self.thresholding_specs = thresholding_specs
# data structure dependent files
if calibration_grid_path is None:
self.grid_path = join(self.dirpath, 'setup/calibration/microgrid')
if calibration_illum_path is None:
self.illum_path = join(self.dirpath, 'setup/calibration/illumination')
else:
self.illum_path = join(self.dirpath, 'setup/calibration/illumination', calibration_illum_path)
if calibration_camnoise_path is None:
self.camnoise_path = join(self.dirpath, 'setup/calibration/cameraNoise')
def __len__(self):
return self._size
def __getitem__(self, item):
key = list(self.files.keys())[item]
return self.files[key]
def __repr__(self):
class_ = 'CurlypivFileCollection'
repr_dict = {'Name': self._collectionName,
'Dirpath': self.dirpath,
'Filetype': self.file_type,
'Test collection identifier': self.dir_tests,
'Number of files': len(self),
'Cropping': self.cropping_specs,
'Preprocessing': self.processing_specs}
out_str = "{}: \n".format(class_)
for key, val in repr_dict.items():
out_str += '{}: {} \n'.format(key, str(val))
return out_str
def _find_locs(self, exclude=[]):
all_locs = listdir(join(self.path))
all_locs.sort()
save_locs = []
for lc in [l for l in all_locs if l.startswith(self.locid)]:
if lc in exclude:
continue
save_locs.append((lc))
if len(save_locs) < 1:
raise ValueError("No locs found in /{} with {} loc id".format(self.dir_tests, self.locid))
self.loclist = save_locs
def _add_locs(self):
locs = OrderedDict()
for lc in [l for l in self.loclist if l.startswith(self.locid)]:
loc = CurlypivLocation(join(self.path,lc), file_type=self.file_type,
testid=self.testid,runid=self.runid,seqid=self.seqid,frameid=self.frameid,
load_files=self.load_files, file_lim=self.file_lim)
locs.update({loc._locname: loc})
logger.warning('Loaded loc{}'.format(loc._locname))
if len(locs) < 1:
raise ValueError("No locs found in {} with ...{} loc id".format(self.dir_tests, self.locid))
self.locs = locs
#def get_files(self, loc=None, test=None, run=None, sequence=None, file=None):
# method to retrieve all files in the specified container
def add_img_testset(self, loc, test, run, seq, level='seq'):
lev = self
if level == 'seq':
levels = [loc, test, run, seq]
for l in levels:
lev = lev.get_sublevel(l)
self.img_testset = lev
def filter_images(self):
for image in self.tests.values():
image.filter_image
def _get_size(self):
size = 0
for key, values in self.locs.items(): size += values.size
self._size = size
def get_sublevel(self, key):
sub = self.locs
for k, v in sub.items():
if k == key:
item = v
return item
def get_sublevel_all(self):
sub = self.locs
all_subs = []
for k, v in sub.items():
all_subs.append(v)
return all_subs
@property
def name(self):
return self._collectionName
@property
def path(self):
return self._path
@property
def size(self):
return self._size
@property
def collectionname(self):
return self._collectionName
# 2.0 define class
class CurlypivLocation(object):
def __init__(self, dirpath, file_type,
locid = 'loc', testid = ('E', 'Vmm'), runid = ('run', 'num'), seqid = ('test_', '_X'), frameid = '_X',
load_files=False, file_lim=25, exclude=[]):
self.dirpath = dirpath
self.file_type = file_type
self.locid = locid
self.testid = testid
self.runid = runid
self.seqid = seqid
self.frameid = frameid
self.file_lim = file_lim
self.load_files = load_files
self._locname = find_substring(string=self.dirpath, leadingstring=self.locid, dtype=int)[0]
self._find_tests()
self._add_tests()
self._get_size()
def __repr__(self):
class_ = 'CurlypivLocation'
repr_dict = {'Dirpath': self.dirpath,
'Filetype': self.file_type,
'Location identifier': self._locname,
'Test list': self.test_list,
'Total number of images': self._size}
out_str = "{}: \n".format(class_)
for key, val in repr_dict.items():
out_str += '{}: {} \n'.format(key, str(val))
return out_str
def _find_tests(self, exclude=[]):
"""
Identifies all tests in a location folder
:param exclude:
:return:
"""
# step 0: asses the length of the test_id
test_ids = len(self.testid) // 2
# step 1: find all files in directory and add to list
all_tests = listdir(self.dirpath)
save_tests = []
for test in [t for t in all_tests if t.endswith(self.testid[-1])]:
if test in exclude:
continue
save_tests.append(test)
if len(save_tests) < 1:
raise ValueError("No runs found in {} with run...{} format".format(self.dirpath, self.testid[-1]))
# step 2: sort files based on sequence id and frame id
if test_ids > 1:
save_tests.sort(key=lambda save_tests: find_substring(string=save_tests, leadingstring=self.testid[0],
trailingstring=self.testid[1], leadingsecond=self.testid[2],
trailingsecond=self.testid[3], dtype=float, magnitude=False))
else:
save_tests.sort(key=lambda save_tests: find_substring(string=save_tests, leadingstring=self.testid[0],
trailingstring=self.testid[1], dtype=float, magnitude=True))
# save all the files of the right filetype in this attribute
self.test_list = save_tests
logger.warning(
"Found {} runs in directory {}".format(len(self.test_list), self.dirpath))
def _add_tests(self):
tests = OrderedDict()
for tst in [t for t in self.test_list if t.endswith(self.testid[-1])]:
test = CurlypivTest(join(self.dirpath,tst), file_type=self.file_type,
testid=self.testid, runid=self.runid, seqid=self.seqid, frameid=self.frameid,
load_files=self.load_files, file_lim=self.file_lim)
j = 1
tests.update({test._testname: test})
logger.warning('Loaded test {}'.format(test._testname))
if len(tests) < 1:
raise ValueError("No tests found in test {} with ...{} test id".format(self.dir_tests, self.testid))
self.tests = tests
def _get_size(self):
size = 0
for key, values in self.tests.items(): size += values.size
self._size = size
def get_sublevel(self, key):
sub = self.tests
for k, v in sub.items():
if k == key:
item = v
return item
def get_sublevel_all(self):
sub = self.tests
all_subs = []
for k, v in sub.items():
all_subs.append(v)
return all_subs
@property
def name(self):
return self._locname
@property
def path(self):
return self.dirpath
@property
def size(self):
return self._size
@property
def locname(self):
return self._locname
# 2.0 define class
class CurlypivTest(object):
def __init__(self, dirpath, file_type,
testid = ('E', 'Vmm'), runid = ('run', 'num'), seqid = ('test_', '_X'), frameid = '_X',
load_files=False, file_lim=25, exclude=[]):
self.dirpath = dirpath
self.file_type = file_type
self.testid = testid
self.runid = runid
self.seqid = seqid
self.frameid = frameid
self.file_lim = file_lim
self.load_files = load_files
# step 0: asses the length of the test_id
test_ids = len(self.testid) // 2
# step 1: assign the test name as a tuple (E,) or (E, f)
if test_ids == 1:
self._testname = (find_substring(string=self.dirpath, leadingstring=self.testid[0], trailingstring=self.testid[1],
dtype=float)[0],)
self._E = self._testname[0]
else:
testname = find_substring(string=self.dirpath, leadingstring=self.testid[0], trailingstring=self.testid[1],
leadingsecond=self.testid[2], trailingsecond=self.testid[3], dtype=float)
self._testname = (testname[0], testname[1])
self._E = self._testname[0]
self._f = self._testname[1]
self._find_runs()
self._add_runs()
self._get_size()
def __len__(self):
return len(self.run_list)
def __repr__(self):
class_ = 'CurlypivTest'
repr_dict = {'Dirpath': self.dirpath,
'Filetype': self.file_type,
'Test identifier': self._testname,
'Electric field': self._E,
'Frequency': self._f,
'Run list': self.run_list,
'Total number of images': self._size}
out_str = "{}: \n".format(class_)
for key, val in repr_dict.items():
out_str += '{}: {} \n'.format(key, str(val))
return out_str
def _find_runs(self, exclude=[]):
"""
Identifies all runs in a test folder
:param exclude:
:return:
"""
# step 1: find all files in directory and add to list
all_runs = listdir(self.dirpath)
save_runs = []
for run in [r for r in all_runs if r.endswith(self.runid[1])]:
if run in exclude:
continue
save_runs.append(run)
if len(save_runs) < 1:
raise ValueError("No runs found in {} with run...{} format".format(self.dirpath, self.runid[1]))
# step 2: sort files based on sequence id and frame id
save_runs.sort(key=lambda save_runs: find_substring(string=save_runs, leadingstring=self.runid[0],
trailingstring=self.runid[1], dtype=int,
magnitude=False))
# save all the files of the right filetype in this attribute
self.run_list = save_runs
logger.warning(
"Found {} runs in directory {}".format(len(self.run_list), self.dirpath))
def _add_runs(self):
runs = OrderedDict()
for f in self.run_list:
file = CurlypivRun(join(self.dirpath,f), file_type=self.file_type,
runid = self.runid, seqid = self.seqid, frameid = self.frameid,
load_files=self.load_files, file_lim=self.file_lim)
runs.update({file._runname: file})
logger.warning('Loaded run {}'.format(file._runname))
self.runs = runs
def _get_size(self):
size = 0
for key, values in self.runs.items(): size += values.size
self._size = size
def get_sublevel(self, key):
sub = self.runs
for k, v in sub.items():
if k == key:
item = v
return item
def get_sublevel_all(self):
sub = self.runs
all_subs = []
for k, v in sub.items():
all_subs.append(v)
return all_subs
def add_piv_data(self, zeta=False, x=None, testname=None):
if zeta:
u_mag_bkg = []
u_mag_mean = []
u_mag_std = []
u_mean = []
v_mean = []
for run in self.runs.values():
u_mag_bkg.append(run.u_mag_bkg)
u_mag_mean.append(run.u_mag_mean)
u_mag_std.append(run.u_mag_std)
u_mean.append(run.u_mean)
v_mean.append(run.v_mean)
self.u_mag_bkg = np.round(np.mean(u_mag_bkg),1)
self.u_mag_mean = np.round(np.mean(u_mag_mean), 1)
self.u_mag_std = np.round(np.mean(u_mag_std), 1)
self.u_mean = np.round(np.mean(u_mean), 1)
self.v_mean = np.round(np.mean(v_mean), 1)
else:
u_mean_columns = []
u_mean_columns_std = []
for run in self.runs.values():
u_mean_columns.append(run.u_mean_columns)
u_mean_columns_std.append(run.u_mean_columns_std)
self.u_mean_x = x
self.u_mean_columns = np.round(np.mean(run.u_mean_columns, axis=0), 1)
self.u_mean_columns_std = np.round(np.mean(run.u_mean_columns_std, axis=0), 2)
@property
def name(self):
return self._testname
@property
def path(self):
return self.dirpath
@property
def testname(self):
return self._testname
@property
def size(self):
return self._size
# 2.0 define class
class CurlypivRun(object):
def __init__(self, dirpath, file_type,
runid = ('run', 'num'), seqid = ('test_', '_X'), frameid = '_X',
load_files=False, file_lim=25, exclude=[]):
self.dirpath = dirpath
self.file_type = file_type
self.runid = runid
self.seqid = seqid
self.frameid = frameid
self.file_lim = file_lim
self.load_files = load_files
self._runname = find_substring(string=self.dirpath, leadingstring=self.runid[0], trailingstring=self.runid[1],
dtype=int)[0]
# first loop through file list to expand any stacked files
self._find_filepaths()
self._find_seqpaths()
self._add_seqs()
logger.warning("First loop through file list complete")
# second loop through file list to reorganize file list
self._find_filepaths()
self._find_seqpaths()
self._add_seqs()
logger.warning("Second loop through file list complete")
self._get_size()
def __len__(self):
return self.files
def __repr__(self):
class_ = 'CurlypivRun'
repr_dict = {'Dirpath': self.dirpath,
'Filetype': self.file_type,
'Run identifier': self._runname,
'Sequence list': self.seq_list,
'Number of files': self._size}
out_str = "{}: \n".format(class_)
for key, val in repr_dict.items():
out_str += '{}: {} \n'.format(key, str(val))
return out_str
def _find_filepaths(self, exclude=[]):
"""
Identifies all files of filetype in folder
:param exclude:
:return:
"""
# step 1: find all files in directory and add to list
all_files = listdir(self.dirpath)
save_files = []
for file in [f for f in all_files if f.endswith(self.file_type)]:
if file in exclude or file.startswith('multifile'):
continue
save_files.append(file)
if len(save_files) < 1:
raise ValueError("No files found in {}".format(self.dirpath))
# step 2: sort files based on sequence id and frame id
save_files.sort(key=lambda save_files: find_substring(string=save_files, leadingstring=self.seqid[0],
trailingstring=self.seqid[1], dtype=int,
magnitude=False,
leadingsecond=self.frameid,
trailingsecond=self.file_type))
# save all the files of the right filetype in this attribute
self.filepaths = save_files
# step 3: identify the sequences
seq_list = []
for f in self.filepaths:
seq = find_substring(string=f, leadingstring=self.seqid[0], trailingstring=self.seqid[1],
dtype=int)[0]
if seq not in seq_list: seq_list.append(seq)
self.seq_list = seq_list
logger.warning(
"Found {} files across {} sequences of type {} in directory {}".format(len(save_files), len(self.seq_list),
self.file_type, self.dirpath))
def _find_seqpaths(self):
seq_files = []
for seq_uniq in self.seq_list:
seeqing = str(self.seqid[0])+str(seq_uniq)+str(self.seqid[1])
files = []
for file in [f for f in self.filepaths if f.find(seeqing)!=-1]:
files.append(file)
seq_files.append([seq_uniq, files])
self._seqpaths = (seq_files)
def _add_seqs(self):
seqs = OrderedDict()
for s in self._seqpaths:
seq = CurlypivSequence(self.dirpath,file_type=self.file_type, seqname=s[0],
filelist=s[1], seqid=self.seqid, frameid = self.frameid, load_files=self.load_files, file_lim=self.file_lim)
seqs.update({seq._seqname: seq})
logger.warning('Loaded sequence {}'.format(seq._seqname))
self.seqs = seqs
def _add_files(self):
files = OrderedDict()
for f in self.filepaths:
file = CurlypivFile(join(self.dirpath,f), img_type=self.file_type)
files.update({file.filename: file})
logger.warning('Loaded image {}'.format(file.filename))
self.files = files
def update_run_filelist(self):
print('yah')
def _get_size(self):
self._size = len(self.filepaths)
def get_sublevel(self, key):
sub = self.seqs
for k, v in sub.items():
if k == key:
item = v
return item
def get_sublevel_all(self):
sub = self.seqs
all_subs = []
for k, v in sub.items():
all_subs.append(v)
return all_subs
def add_piv_data(self, zeta=False, x=None, testname=None):
if zeta:
u_mag_bkg = []
u_mag_mean = []
u_mag_std = []
u_mean = []
v_mean = []
for seq in self.seqs.values():
if seq.name == 0:
u_mag_bkg.append(seq.u_mag_bkg)
u_mag_mean.append(seq.u_mag_mean)
u_mag_std.append(seq.u_mag_std)
u_mean.append(seq.u_mean)
v_mean.append(seq.v_mean)
self.u_mag_bkg = np.round(np.mean(u_mag_bkg),1)
self.u_mag_mean = np.round(np.mean(u_mag_mean), 1)
self.u_mag_std = np.round(np.mean(u_mag_std), 1)
self.u_mean = np.round(
|
np.mean(u_mean)
|
numpy.mean
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 20:55:29 2020
@author: Will
"""
import numpy as np
import matplotlib.pyplot as plt
from vizualization_tools import mesh_hist,superplot,mpltable,plot_mm,barhist
from tabulate import tabulate
import math
import pandas as pd
import warnings
from MN_Duality_Tools import moment_vector
import pickle
warnings.simplefilter('error',RuntimeWarning)
def truncate(number, digits):
stepper = 10.0 ** digits
newval = math.trunc(stepper * number) / stepper
if newval > 100:
newval = int(newval)
return newval
else:
return newval
def plot_heatmap(points,nbins,**opts):
x_vals = points[:,0]
y_vals = points[:,1]
bincounts,xbins,ybins = np.histogram2d(x_vals,y_vals,bins = nbins)
xinds,yinds = np.digitize(x_vals,xbins) - 1,np.digitize(y_vals,ybins) - 1
bounds = [xbins[0]-1,xbins[-1]+1,ybins[0]-1,ybins[-1]+1]
heat = np.zeros((nbins+1,nbins+1),dtype = float)
newbincount = np.zeros((nbins+1,nbins+1),dtype = float)
for i in range(len(xinds)):
newbincount[xinds[i],yinds[i]] += 1
heat[xinds[i],yinds[i]] += points[i,2]
hot = newbincount > 0
cold = 1-hot
temp_weights = newbincount + 1*cold
logavg_heat = np.log10(np.divide(heat,temp_weights) + \
np.min(np.divide(heat,temp_weights)[heat > 0])*cold)
fig, ax = plt.subplots()
heatmap = ax.pcolor(xbins,ybins,logavg_heat.T,cmap = 'jet')
cbar = fig.colorbar(heatmap,ax = ax)
#cbar.set_label('Log10 Avg Error', rotation = 270)
#plt.imshow(heat.T,extent = bounds,cmap = 'jet',origin = 'lower')
#plt.colorbar()
if 'xlabel' in opts:
ax.set_xlabel(opts['xlabel'])
if 'ylabel' in opts:
ax.set_ylabel(opts['ylabel'])
if 'title' in opts:
ax.set_title(opts['title'])
if 'savedir' in opts:
plt.savefig(opts['savedir'])
plt.show()
def plotlosscurve(history,bools,regularization,savefolder,saveid):
#Get the saveid and savefolder
enforce_func,enforce_grad,enforce_moment,enforce_conv = bools
if type(enforce_grad) == float:
if enforce_grad > 0:
enforce_grad = True
if type(enforce_moment) == float:
if enforce_moment > 0:
enforce_moment = True
if type(enforce_func) == float:
if enforce_func > 0:
enforce_moment = True
if type(enforce_grad) == float:
if enforce_conv > 0:
enforce_conv = True
plt.plot(np.log10(history.history['loss']))
plt.plot(np.log10(history.history['val_loss']))
plt.legend(['training','validation'],loc = 'upper right')
plt.title('Log10 of Total Loss')
plt.xlabel('Epochs')
plt.ylabel('Log10 Loss Value')
plt.savefig(savefolder+'losscurve'+saveid+'.eps')
plt.show()
#plt.plot(np.log10(history.history['lr']))
#plt.xlabel('Epochs')
#plt.ylabel('Log10 of Learning Rate')
#plt.title('Log10 of Base Learning Rate Value')
#plt.savefig(savefolder+'learningrate'+saveid+'.eps')
#plt.show()
#Make and modify loss keys according to weights
legend_train = []
legend_validate = []
components_train = []
components_validate = []
if enforce_func == True:
legend_train.append('function')
legend_validate.append('function')
components_train.append('output_1_loss')
components_validate.append('val_output_1_loss')
if enforce_grad == True:
legend_train.append('gradient')
legend_validate.append('gradient')
components_train.append('output_2_loss')
components_validate.append('val_output_2_loss')
if enforce_moment == True:
legend_train.append('moment')
legend_validate.append('moment')
components_train.append('output_3_loss')
components_validate.append('val_output_3_loss')
if enforce_conv == True:
legend_train.append('conv')
legend_validate.append('conv')
components_train.append('output_4_loss')
components_validate.append('val_output_4_loss')
for key in components_train:
if key == 'output_4_loss':
try:
plt.plot(np.log10(history.history[key]),label = key)
except RuntimeWarning:
legend_train.remove('conv')
else:
plt.plot(np.log10(history.history[key]),label = key)
if regularization == True:
reg_vals = np.array(history.history['loss'])-\
(float(bools[0])*np.array(history.history['output_1_loss']) + float(bools[1])*np.array(history.history['output_2_loss']) +
float(bools[2])*np.array(history.history['output_3_loss'])+float(bools[3])*np.array(history.history['output_4_loss']) )
try:
plt.plot(np.log10(reg_vals),label = 'regularization')
except RuntimeWarning:
print(reg_vals)
legend_train.append('regularization')
plt.legend(legend_train,loc = 'upper right')
plt.title('Log10 of Training Loss Components')
plt.xlabel('Epochs')
plt.ylabel('Log10 Loss Value')
plt.savefig(savefolder+'complosscurvetrain'+saveid+'.eps')
plt.show()
for key in components_validate:
if key == 'val_output_4_loss':
try:
plt.plot(np.log10(history.history[key]),label = key)
except RuntimeWarning:
legend_validate.remove('conv')
else:
plt.plot(np.log10(history.history[key]),label = key)
plt.legend(legend_validate,loc = 'upper right')
plt.title('Log10 of Validation-Loss Components')
plt.xlabel('Epochs')
plt.ylabel('Log10 Loss Value')
plt.savefig(savefolder+'complosscurveval'+saveid+'.eps')
plt.show()
if enforce_conv == True and ('conv' not in legend_train):
plt.plot(history.history['output_4_loss'])
plt.title('Training Conv-Loss Value (Achieves Zero)')
plt.xlabel('Epochs')
plt.ylabel('Loss Value')
plt.savefig(savefolder+'convlosstrain'+saveid+'.eps')
if enforce_conv == True and ('conv' not in legend_validate):
plt.plot(history.history['val_output_4_loss'])
plt.title('Validation Conv-Loss Value (Achieves Zero)')
plt.xlabel('Epochs')
plt.ylabel('Loss Value')
plt.savefig(savefolder+'convlossval'+saveid+'.eps')
def quickstats(f,alpha,u,conv,savefolder,saveid):
h_pred,h_train = f
alpha_pred,alpha_train = alpha
u_pred,u_train = u
conv_pred = conv
h_pred_graph = np.hstack((u_train,h_pred))
h_true = h_train.reshape((h_train.shape[0],1))
h_true_graph = np.hstack((u_train,h_true))
MSE = np.mean(np.square(h_pred-h_true),axis = 0)[0]
MSE_rel = np.sum(np.square(h_pred-h_true),axis = 0)[0]/np.sum(np.square(h_true),axis = 0)[0]
u_MSE = np.mean(np.sum(np.square(u_pred-u_train),axis = 1))
u_norm = np.mean(np.sum(np.square(u_train),axis =1))
u_MSE_rel = u_MSE/u_norm
alpha_train_norm = np.mean(np.sum(np.square(alpha_train),axis = 1))
final_MSE_alpha = np.mean(np.sum(np.square(alpha_pred-alpha_train),axis =1))
final_MSE_alpha_rel = final_MSE_alpha / alpha_train_norm
det_pred_train = conv_pred[:,0]
a_pred_train = conv_pred[:,1]
#moment_loss_curve = moment_history.history['output_3_loss']
num_nonconv_train = np.sum((det_pred_train < 0) + (a_pred_train < 0))
error_cols = ['MSE-f','MSEr-f','MSEr-alpha','MSE-u','MSEr-u','# NegDef']
error_info = [['{:.2e}'.format(MSE),'{:.2e}'.format(MSE_rel),'{:.2e}'.format(final_MSE_alpha_rel),\
'{:.2e}'.format(u_MSE),'{:.2e}'.format(u_MSE_rel),num_nonconv_train]]
error_table = pd.DataFrame(error_info,columns = error_cols)
mpltable(error_table,width = 0.13,savedir = savefolder + 'firsterrtable'+saveid+'.pdf')
print('\n\n','Network Final Error on Training Data (exclude validation): \n\n',\
tabulate(error_table,headers = 'keys',tablefmt = 'psql'))
#Plot network function
superplot(approx_vals = h_pred_graph,targetvals = h_true_graph,\
view = [10,20],title = 'Network and Target at Training Pts',\
savedir = savefolder + 'viewnet'+saveid+'.pdf')
def runerranalysis(testdom_object,compute_conv,f,alpha,u,conv,savefolder,saveid):
testinfo = testdom_object
h_pred_test = f
grad_pred_test = alpha
u_predict = u
conv_test_pred = conv
det_pred = conv_test_pred[:,0]
a_pred = conv_test_pred[:,1]
moment_pred = np.zeros((testinfo.moment_domain.shape[0],2))
h_true_test = testinfo.graph[:,2]
for i in range(testinfo.moment_domain.shape[0]):
moment_pred[i] = moment_vector(grad_pred_test[i])
h_true_test = h_true_test.reshape((h_true_test.shape[0],1))
if np.allclose(moment_pred,0) == True:
raise ValueError('moment prediction is still zero within tolerance of np.allclose')
### Function Error ###
#Compute function error over the test domain:
funcerr_vals = np.square(h_pred_test - h_true_test)
MSE_test = np.mean(np.square(h_true_test-h_pred_test))
l2_test = np.mean(np.square(h_pred_test))
MSErel_test = MSE_test / l2_test
#Plot function error over the test domain
funcerr_graph = np.hstack((testinfo.moment_domain,funcerr_vals))
#superplot(errorpoints = funcerr_graph,targetvalsEarly = testinfo.points,title = 'Function Error and Hull',\
#savedir = savefolder+'funcerr'+saveid+'.pdf')
plot_heatmap(funcerr_graph,50,title = 'Squared Function Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+'funcheat'+saveid+'.eps')
### Moment Error ###
#Compute moment error over finer domain
momenterr_vals = np.sum(np.square(testinfo.moment_domain - moment_pred),axis = 1)
MSE_moment = np.sum(np.mean((np.square(testinfo.moment_domain - moment_pred)),axis = 0))
l2_moment = np.mean(np.sum(np.square(testinfo.moment_domain),axis =1))
MSErel_moment = MSE_moment / l2_moment
#Plot the moment error over the test domain
momenterr_plot = momenterr_vals.reshape((momenterr_vals.shape[0],1))
momenterr_graph = np.hstack((testinfo.moment_domain,momenterr_plot))
#superplot(errorpoints = momenterr_graph,targetvals = testinfo.points,title = 'Moment Error and Hull',\
#savedir = savefolder+'momenterr'+saveid+'.pdf')
plot_heatmap(momenterr_graph,50,title = 'Squared Moment Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+'momentheat'+saveid+'.eps')
### Gradient (Alpha) Error:
#Compute alpha errorr on the domain
alphaerr_vals = np.sum(np.square(grad_pred_test - testinfo.multiplier_domain),axis = 1)
MSE_alpha= np.mean(alphaerr_vals)
l2_alpha = np.mean(np.sum(np.square(testinfo.multiplier_domain),axis = 1))
MSErel_alpha = MSE_alpha / l2_alpha
#Plot the alpha errors over the test domain
alphaerr_plot = alphaerr_vals.reshape(alphaerr_vals.shape[0],1)
alphaerr_graph = np.hstack((testinfo.moment_domain,alphaerr_plot))
#superplot(errorpoints = alphaerr_graph,targetvals = testinfo.points,title = 'Alpha Error and Hull',\
#savedir = savefolder +'alphaerr'+saveid+'.pdf')
plot_heatmap(alphaerr_graph,50,title = 'Squared Alpha Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+'alphaheat'+saveid+'.eps')
### Determinant Values:
if compute_conv == True:
detneg = det_pred < 0
aneg = a_pred < 0
num_negdef_points = np.sum(aneg + detneg)
print('Number of negative definite points','\n', num_negdef_points,'\n\n')
#Pretty-print the error values
else:
num_negdef_points = np.nan
test_cols = ['h','rel-h','u','rel-u','rel-alpha','#NegDef']
test_errors = [[truncate(MSE_test,4),truncate(MSErel_test,4),truncate(MSE_moment,4),truncate(MSErel_moment,4),\
truncate(MSErel_alpha,4),num_negdef_points]]
test_table = pd.DataFrame(test_errors,columns = test_cols)
print('\n\n','Network Error Metrics Over Test Domain:','\n',tabulate(test_table,headers = 'keys',tablefmt = 'psql'))
mpltable(test_table,width = 0.12,savedir = savefolder+'metricstable'+saveid+'.pdf')
def plot_M1_1d(truevals,predvals,**kwargs):
h_true,u_true = truevals
h_pred,u_pred = predvals
u0_errors = np.log10( np.divide(np.abs(u_true[:,0] - u_pred[:,0]),np.abs(u_true[:,0]))+1e-10 )
u1_errors = np.log10( np.divide(np.abs(u_true[:,1] - u_pred[:,1]), np.abs(u_true[:,1]))+1e-10 )
f_errors = np.log10( np.divide(np.abs(h_true-h_pred),np.abs(h_true) ) + 1e-10 )
fig, ax = plt.subplots(1)
"""
if 'xlabel' in kwargs:
ax.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs:
ax.set_ylabel(kwargs['ylabel'])
if 'title' in kwargs:
ax.set_title(kwargs['title'])
"""
ax.scatter(u_true[:,1],u0_errors,color = 'blue')
ax.set_xlabel('u_1')
ax.set_ylabel('log_10 u_0 Error')
ax.set_title('log_10 of Relative u_0 Error')
plt.show()
if 'savedir' in kwargs:
plt.savefig(kwargs['savedir']+'1dtest_u0')
plt.clf()
fig,ax = plt.subplots(1)
ax.scatter(u_true[:,1],u1_errors,color = 'orange')
ax.set_xlabel('u_1')
ax.set_ylabel('log_10 u_1 Error')
ax.set_title('log_10 of Relative u_1 Error')
plt.show()
if 'savedir' in kwargs:
plt.savefig(kwargs['savedir']+'1dtest_u1')
plt.clf()
fig,ax = plt.subplots(1)
ax.scatter(u_true[:,1],f_errors,color = 'green')
ax.set_xlabel('u_1')
ax.set_ylabel('log_10 h Error')
ax.set_title('log_10 of Relative h Error')
plt.show()
if 'savedir' in kwargs:
plt.savefig(kwargs['savedir']+'1dtest_h')
plt.clf()
# ax.legend()
def quickstats_scaled(f,alpha,u,conv,savefolder,saveid,data_id,size,method,domain,N = 1,append_results= False,**kwargs):
h_pred,h_train = f
alpha_pred,alpha_train = alpha
u_pred,u_train = u
if method == 'net':
conv_pred = conv
h_pred = h_pred.reshape((h_pred.shape[0],1))
h_true = h_train.reshape((h_train.shape[0],1))
L2_h = np.mean(np.square(h_pred-h_true),axis = 0)[0]
L2_hrel = np.sum(np.square(h_pred-h_true),axis = 0)[0] / np.sum(np.square(h_true),axis = 0)[0]
L2_u = np.mean(np.sum(np.square(u_pred-u_train),axis = 1))
L2norm_u = np.mean(np.sum(np.square(u_train),axis =1))
L2_urel = L2_u / L2norm_u
L2_u0 = np.mean(np.square(u_pred[:,0]-u_train[:,0]))
u0_norm = np.mean(np.square(u_train[:,0]))
L2_u0rel = L2_u0 / u0_norm
L2norm_alpha = np.mean(np.sum(np.square(alpha_train),axis = 1))
L2_alpha = np.mean(np.sum(np.square(alpha_pred-alpha_train),axis =1))
L2_alpharel = L2_alpha / L2norm_alpha
L2_u0_spec = np.mean(np.square(u_train[:,0] - u_pred[:,0])) / np.mean(np.square(u_train[:,0]))
L2_u1_spec = np.mean(np.square(u_train[:,1] - u_pred[:,1])) / np.mean(np.square(u_train[:,1]))
if N == 2:
L2_u2_spec = np.mean(np.square(u_train[:,2] - u_pred[:,2])) / np.mean(np.square(u_train[:,2]))
if method == 'net':
if N == 1:
num_nonconv_train = np.sum((conv_pred < 0))
if N == 2:
num_nonconv_train = np.sum((conv_pred[:,0] < 0) + (conv_pred[:,1] < 0))
error_cols = ['MSE f','MSE r-f','MSE alpha','MSE r-alpha','MSE u','MSE r-u','MSE r-u0','# NegDef']
error_info = [['{:.2e}'.format(L2_h),'{:.2e}'.format(L2_hrel),'{:.2e}'.format(L2_alpha),'{:.2e}'.format(L2_alpharel),\
'{:.2e}'.format(L2_u),'{:.2e}'.format(L2_urel),'{:.2e}'.format(L2_u0rel),num_nonconv_train]]
error_table = pd.DataFrame(error_info,columns = error_cols)
print('\n ------------------------------------------------------------------- \n ')
print(domain+' Data Error Summary (excluding validation) in LaTeX','\n\n',error_table.to_latex())
#mpltable(error_table,width = 0.12,savedir = savefolder + 'firsterrtable'+saveid+'.pdf')
print('\n\n',domain + ' Data Error Summary (exclude validation): \n\n',\
tabulate(error_table,headers = 'keys',tablefmt = 'psql'))
elif method == 'spline':
error_cols = ['MSE f','MSE r-f','MSEalpha','MSEr-alpha','MSE u','MSE r-u','MSE r-u0']
error_info = [['{:.2e}'.format(L2_h),'{:.2e}'.format(L2_hrel),'{:.2e}'.format(L2_alpha),'{:.2e}'.format(L2_alpharel),\
'{:.2e}'.format(L2_u),'{:.2e}'.format(L2_urel),'{:.2e}'.format(L2_u0rel)]]
error_table = pd.DataFrame(error_info,columns = error_cols)
print('\n -------------------------------------------------------------------- \n')
print(domain+' Domain Error Summary (excluding validation) in Latex','\n\n',error_table.to_latex())
#mpltable(error_table,width = 0.12,savedir = savefolder + method + 'firsterrtable' + saveid + '.pdf')
print('\n\n',method+' '+domain+' Domain Error Summary (excluding validation): \n\n',\
tabulate(error_table,headers = 'keys',tablefmt = 'psql'))
if N ==2:
Select_MSE_Cols = ['MSE r-f','MSE r-u','MSE r-u0','MSE r-u1','MSE r-u2', 'MSE r-alpha']
Select_RMSE_Cols = ['RMSE r-f','RMSE r-u','RMSE r-u0','RMSE r-u1','RMSE r-u2', 'RMSE r-alpha']
RMSE_vals = [[np.sqrt(L2_hrel),np.sqrt(L2_urel),np.sqrt(L2_u0_spec),\
np.sqrt(L2_u1_spec),np.sqrt(L2_u2_spec),np.sqrt(L2_alpharel)]]
MSE_vals = [[L2_hrel,L2_urel,L2_u0_spec,L2_u1_spec,L2_u2_spec,L2_alpharel]]
Format_RMSE_vals = [['{:.2e}'.format(x) for x in RMSE_vals[0]]]
Format_MSE_vals = [['{:.2e}'.format(x) for x in MSE_vals[0]]]
Format_MSE_Table = pd.DataFrame(Format_MSE_vals,columns = Select_MSE_Cols)
Format_RMSE_Table = pd.DataFrame(Format_RMSE_vals,columns = Select_RMSE_Cols)
print('\n\n Select ' + domain +' RMSE Table in Latex:\n\n ',Format_RMSE_Table.to_latex(),'\n\n')
if N == 1:
Select_MSE_Cols = ['MSE r-f','MSE r-u','MSE r-u0','MSE r-u1', 'MSE r-alpha']
Select_RMSE_Cols = ['RMSE r-f','RMSE r-u','RMSE r-u0','RMSE r-u1', 'RMSE r-alpha']
RMSE_vals = [[np.sqrt(L2_hrel),np.sqrt(L2_urel),np.sqrt(L2_u0_spec),\
np.sqrt(L2_u1_spec),np.sqrt(L2_alpharel)]]
MSE_vals = [[L2_hrel,L2_urel,L2_u0_spec,L2_u1_spec,L2_alpharel]]
Format_RMSE_vals = [['{:.2e}'.format(x) for x in RMSE_vals[0]]]
Format_MSE_vals = [['{:.2e}'.format(x) for x in MSE_vals[0]]]
Format_RMSE_Table = pd.DataFrame(Format_RMSE_vals,columns = Select_RMSE_Cols)
Format_MSE_Table = pd.DataFrame(Format_MSE_vals,columns = Select_MSE_Cols)
if append_results:
results_to_append = [saveid,size,np.sqrt(L2_hrel),np.sqrt(L2_urel),np.sqrt(L2_u0_spec),\
np.sqrt(L2_u1_spec),np.sqrt(L2_alpharel)]
if (method == 'spline') and (domain == 'Train'):
with open(domain+'_All_Results_'+'Spline'+'Spline'+'.pickle','rb') as handle:
result_list = pickle.load(handle)
result_list.append(results_to_append)
with open(domain+'_All_Results_'+'Spline'+'Spline'+'.pickle','wb') as newhandle:
pickle.dump(result_list,newhandle)
elif (method == 'spline') and (domain == '1dTest'):
with open(domain+'_All_Results_'+'Spline'+data_id+'.pickle','rb') as handle:
result_list = pickle.load(handle)
result_list.append(results_to_append)
with open(domain+'_All_Results_'+'Spline'+data_id+'.pickle','wb') as newhandle:
pickle.dump(result_list,newhandle)
else:
with open(domain+'_All_Results_'+data_id+'.pickle','rb') as handle:
result_list = pickle.load(handle)
result_list.append(results_to_append)
with open(domain+'_All_Results_'+data_id+'.pickle','wb') as newhandle:
pickle.dump(result_list,newhandle)
print('\n\n Select ' + domain + 'RMSE Table in Latex:\n\n ',Format_RMSE_Table.to_latex(),'\n\n')
#Plot network function
if N ==2:
if 'plot' in kwargs:
if kwargs['plot'] == True:
h_true_graph = np.hstack((u_train[:,1:],h_true))
h_pred_graph = np.hstack((u_train[:,1:],h_pred))
superplot(approx_vals = h_pred_graph,targetvals = h_true_graph,\
view = [150,20], title = 'Network and Target', \
savedir = savefolder + method + 'viewnet' + saveid +'.pdf')
else:
pass
if N == 1:
if 'plot1d' in kwargs:
if kwargs['plot1d'] == True:
plot_M1_1d([h_train,u_train],[h_pred,u_pred],savedir = savefolder + saveid)
else:
pass
if 'plot3d' in kwargs:
if kwargs['plot3d'] == True:
h_true_graph = np.hstack((u_train[:,:],h_true))
h_pred_graph = np.hstack((u_train[:,:],h_pred))
superplot(approx_vals = h_pred_graph,targetvals = h_true_graph,\
view = [150,20],title = 'Network and Target at Training Pts',\
savedir = savefolder + method + 'viewnet'+saveid+'.pdf')
else:
pass
def runerranalysis_scaled(truevals,compute_conv,predvals,savefolder,saveid,data_id,size,method,domain,L1 = False,append_results = False,N = 1):
print('\n\n','MSE := (1/N)*\sum_{i} (x_i^{pred}-x_i^{true})^{2} \n MSRE := (1/N)* \sum_{i} [(x_i^{pred} - x_i^{true}) / (x_^{i} true)]^{2}')
#Import true values
if N == 1:
"""
testinfo = truevals
h_true = testinfo.graph[:,2]
u_true = testinfo.moment_domain
u_true2d = testinfo.moment_domain
grad_true = testinfo.multiplier_domain
"""
h_true,grad_true,u_true = truevals
u_true2d = u_true[:,:]
if N ==2:
h_true,grad_true,u_true = truevals
u_true2d = u_true[:,1:]
#Get predicted values on the test domain
h_pred,grad_pred,u_pred,conv_pred = predvals
#conv_true = 0
h_pred = h_pred.reshape((h_pred.shape[0],1))
h_true = h_true.reshape((h_true.shape[0],1))
### Calculate function errors
L2_func_title,L1_func_title = 'Squared ','Norm '
L2_func_vals,L1_func_vals = np.square(h_pred-h_true),np.sqrt(np.square(h_pred-h_true))
L2_func,L1_func = np.mean(L2_func_vals),np.mean(L1_func_vals)
L2_funcrel,L1_funcrel = L2_func / np.mean(np.square(h_true)), L1_func / np.mean(np.abs(h_true),axis = 0)
if L1 == True:
if N == 1:
L1_funcerr_graph = np.hstack((u_true2d,L1_func_vals))
plot_heatmap(L1_funcerr_graph,50,title = L1_func_title + 'Function Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+method+'funcheat'+saveid+'.eps')
elif L1 == False:
if N == 1:
"""
L2_funcerr_graph = np.hstack((u_true2d,L2_func_vals))
plot_heatmap(L2_funcerr_graph,50,title = L2_func_title + 'Function Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+method+'funcheat'+saveid+'.eps')
"""
pass
### Calculate moment errors
L2_u0_spec = np.mean(np.square(u_true[:,0] - u_pred[:,0])) / np.mean(np.square(u_true[:,0]))
L2_u1_spec = np.mean(np.square(u_true[:,1] - u_pred[:,1])) / np.mean(np.square(u_true[:,1]))
if N == 2:
L2_u2_spec = np.mean(np.square(u_true[:,2] - u_pred[:,2])) / np.mean(np.square(u_true[:,2]))
L2_u_vals,L1_u_vals = np.sum(np.square(u_true - u_pred),axis = 1),np.sqrt(np.sum(np.square(u_true - u_pred),axis = 1))
L2_u,L1_u = np.mean(L2_u_vals,axis = 0),np.mean(L1_u_vals,axis = 0)
L2_u_rel, L1_u_rel = L2_u / np.mean(np.sum(np.square(u_true),axis =1),axis =0) , L1_u / np.mean(np.sqrt(np.sum(np.square(u_true),axis =1)),axis =0)
L2_moment_title,L1_moment_title = 'Squared ','Norm '
#Calculate scaled moment (u/ u_0) errors
L2_uoveru0_vals,L1_uoveru0_vals = np.divide(L2_u_vals.reshape((L2_u_vals.shape[0],1)),np.square(u_true[:,0].reshape((u_true[:,0].shape[0],1)))),\
np.divide(L1_u_vals.reshape((L1_u_vals.shape[0],1)),u_true[:,0].reshape((u_true[:,0].shape[0],1)))
L2_uoveru0_vals,L1_uoveru0_vals = L2_uoveru0_vals.reshape((L2_uoveru0_vals.shape[0],1)),L1_uoveru0_vals.reshape((L1_uoveru0_vals.shape[0],1))
L2_uoveru0_title,L1_uoveru0_title = 'Squared u_0 Scaled ','Norm u_0 Scaled '
L2_moment_title,L1_moment_title = 'Squared ','Norm '
if L1 == True:
if N ==1:
"""
L1_uerr_plot = L1_u_vals.reshape((L1_u_vals.shape[0],1))
L1_scaled_u_graph = np.hstack((u_true2d,L1_uoveru0_vals))
L1_uerr_graph = np.hstack((u_true2d,L1_uerr_plot))
plot_heatmap(L1_scaled_u_graph,50,title = L1_uoveru0_title + 'Moment Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+method+'momentu0heat'+saveid+'.eps')
plot_heatmap(L1_uerr_graph,50,title = L1_moment_title + 'Moment Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+method+'momentheat'+saveid+'.eps')
"""
pass
elif L1 == False:
if N ==1:
"""
L2_uerr_plot = L2_u_vals.reshape((L2_u_vals.shape[0],1))
L2_scaled_u_graph = np.hstack((u_true2d,L2_uoveru0_vals))
L2_uerr_graph = np.hstack((u_true2d,L2_uerr_plot))
plot_heatmap(L2_scaled_u_graph,50,title = L2_uoveru0_title + 'Moment Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+method+'momentu0heat'+saveid+'.eps')
plot_heatmap(L2_uerr_graph,50,title = L2_moment_title + 'Moment Error: Log10 Average',\
xlabel = 'u0',ylabel = 'u1',savedir = savefolder+method+'momentheat'+saveid+'.eps')
"""
pass
#Table for detailed moment errors
L2_u0_vals,L2_u1_vals = np.square(u_pred[:,0]-u_true[:,0]), np.square(u_pred[:,1]-u_true[:,1])
rel_u0_vals,rel_u1_vals = np.divide(np.square(u_pred[:,0]-u_true[:,0]),\
np.square(u_true[:,0])),np.divide(np.square(u_pred[:,1]-u_true[:,1]),np.square(u_true[:,1]))
L2_u0,L2_u1 =
|
np.mean(L2_u0_vals)
|
numpy.mean
|
#!/usr/bin/env python
import numpy as np
import numpy.testing as nt
import unittest
from machinevisiontoolbox.Image import Image
# from pathlib import Path
class TestImageProcessingBase(unittest.TestCase):
def test_int(self):
# test for uint8
im = np.zeros((2, 2), np.float)
im = Image(im)
nt.assert_array_almost_equal(im.int().image,
|
np.zeros((2, 2), np.uint8)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import numpy.random as random
def mcs(losses, alpha, B, w, boot = 'STATIONARY'):
'''
Compute the model confidence set of Hansen, Lunde, Nason
INPUTS:
LOSSES = T x K matrix of losses
ALPHA = The final pval to use the MCS
B = Number of bootstrap replications
W = Desired block length
BOOT = 'STATIONARY' or 'BLOCK', Stationary is default
OUTPUTS:
INCLUDEDR = included models using the R method
PVALSR = Pvals using R method
EXCLUDEDR = Excluded models using the R method
INCLUDEDSQ = Included models using SQ method
PVALSSQ = Pvals using SQ method
EXCLUDEDSQ = Excluded models using SQ method
COMMENTS:
This version of the MCS operates on quantities that shoudl be called
bads, such as losses. If the quantities of interest are goods, such as returns,
simply call MCS with -1*LOSSES
- Translated to Python from <NAME>'s MFE MATLAB toolbox
'''
# ===================================================================== #
# ========================= SUBFUNCTIONS ============================== #
def block_bootstrap(data, B, w):
'''
Implements circular block bootstrap for stationary, dependent series
'''
t,k = data.shape
if k > 1:
raise ValueError('DATA must be a column vector')
if t < 2:
raise ValueError('DATA must have at least 2 observations')
if not np.isscalar(w) or w < 1 or np.floor(w) != w or w > t:
raise ValueError('W must be a positive scalar integer smaller than T')
if not np.isscalar(B) or B < 1 or np.floor(B) != B:
raise ValueError('B must be a positive scalar integer')
# Compute the number of blocks needed
s = int(
|
np.ceil(t / w)
|
numpy.ceil
|
import numpy as np
def pca(X,K,*arg):
#EVD
def EVD_pca(x,k):
rw,cl=np.shape(x)
#step 1: mean calculation
mean=
|
np.mean(x,axis=0)
|
numpy.mean
|
import os
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import figure, imread, imsave, imshow
from matplotlib import cm
from skimage.transform import rescale, resize
from skimage.color import rgb2gray
from skimage import img_as_float
import pandas as pd
from skimage import exposure
from PIL import Image
import skimage
import skimage.measure
import matplotlib.patches as patches
from scipy.spatial import distance_matrix
from skimage.draw import circle
import cv2
import imutils
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
from utils.utils import *
from utils.log_progress import *
import imgaug as ia
from imgaug import augmenters as iaa
# https://stackoverflow.com/questions/9056957/correct-way-to-define-class-variables-in-python
class Visualize():
def __init__(self,df,model,predictiontype='leaf',input_shape=(512,768,3),masktype='leaf'):
self.df = df
self.model = model
self.predictiontype = predictiontype
self.input_shape = input_shape
self.figsize = (15,15)
self.prediction_threshold = None
self.selected_row = None
self.mode = None
self.img = None
self.msk = None
self.prediction = None
self.dice_score = None
self.img_shape = None
self.model_shape = None
assert masktype=='leaf' or masktype=='root', 'Masktype not allowed'
self.masktype = masktype
self.seq_norm = iaa.Sequential([
iaa.CLAHE(),
iaa.LinearContrast(alpha=1.0)
])
def get_image(self,index="random"):
"""
Get Image
:param int or str "index": index of Image to load
:return: image
"""
if index == 'random':
self.selected_row = self.df.sample(1)
else:
self.selected_row = self.df[self.df['name'].str.contains(str(index))]
self.load_data()
self.__process_input()
return self.img
def get_mask(self,index):
"""
Get Mask
:param int or str "index": index of Mask to load
:return: image
"""
if index == 'random':
self.selected_row = self.df.sample(1)
else:
self.selected_row = self.df[self.df['name'].str.contains(str(index))]
self.load_data()
return self.msk
def get_roots(self,index):
"""
Get Roots Coordinates
:param int or str "index": index of Image to load roots from
:return: Root Coordinates as List of Tuples (x,y)
"""
if index == 'random':
self.selected_row = self.df.sample(1)
else:
self.selected_row = self.df[self.df['name'].str.contains(str(index))]
return [tuple(r) for r in self.selected_row["roots"].values[0]]
def get_prediction(self,index):
"""
Get Prediction of Image
:param int or str "index": index of Image to make prediction
:return: Prediction as Image
"""
if index == 'random':
self.selected_row = self.df.sample(1)
else:
self.selected_row = self.df[self.df['name'].str.contains(str(index))]
self.load_data()
self.predict()
return self.prediction
def show_single(self,index,mode):
"""
Show Single Image
:param int or str "index": index of Image to show
:param str "mode":
image : shows only image
mask : shows only mask
image_mask : shows image with overlayed mask
image_prediction : shows image with overlayed prediction
image_prediction_roots : shows image with GT mask and predicted roots
image_prediction_contour : shows image with predicted segmentation and GT contours
:return: No return Value
"""
self.mode = mode
if index == 'random':
self.selected_row = self.df.sample(1)
else:
self.selected_row = self.df[self.df['name'].str.contains(str(index))]
_, ax = plt.subplots(1,1,figsize=self.figsize)
self.__add_image(self.selected_row,ax)
def show_matrix(self,index,mode,rows=4):
"""
Show a rows x 2 Matrix of images
:param List of int or str: List of indexes to show, or "random"
:param str "mode":
image : shows only image
mask : shows only mask
image_mask : shows image with overlayed mask
image_prediction : shows image with overlayed prediction
image_prediction_roots : shows image with GT mask and predicted roots
image_prediction_contour : shows image with predicted segmentation and GT contours
:param int "row": how much rows should be displayd
:return: No return Value
"""
self.mode = mode
# Create empty header:
selected_rows = pd.DataFrame().reindex_like(self.df).head(0)
if index == 'random':
n = rows*2
selected_rows = selected_rows.append(self.df.sample(n))
else:
n = len(index)
rows = int(n/2)
if n <= 2:
raise ValueError('Index length must be greater then 2')
if n % 2 != 0:
raise ValueError('Index length must be eval')
for i in index:
selected_rows = selected_rows.append(self.df[self.df['name'].str.contains(str(i))], ignore_index=True)
_, ax = plt.subplots(int(n/2),2,figsize=(15,3*n))
for i in log_progress(range(rows),every=1,name='Rows'):
rows = selected_rows[2*i:2*i+2]
self.__make_image_row(rows,ax[i])
plt.subplots_adjust(wspace=0.01, hspace=0)
def __make_image_row(self,rows,ax):
self.__add_image(rows.iloc[[0]],ax[0])
self.__add_image(rows.iloc[[1]],ax[1])
def __add_image(self,row,ax=None):
'''
Adds image to 'ax' Object
'''
self.selected_row = row
self.load_data()
if ax == None:
_, ax = plt.subplots(figsize=self.figsize)
if self.mode == "image":
ax.imshow(self.img)
if self.mode == "mask":
ax.imshow(self.msk)
if self.mode == "image_mask":
ax.imshow(self.img)
ax.imshow(self.msk,cmap="terrain",alpha=0.4)
if self.mode == "image_prediction":
self.predict()
ax.imshow(self.img,cmap='gray')
if self.prediction_threshold == None:
ax.imshow(self.prediction, alpha=0.4)
else:
ax.imshow(self.prediction>self.prediction_threshold, alpha=0.4)
if self.mode == "image_prediction_error":
self.predict()
self.error = np.abs((self.prediction-self.msk))
ax.imshow(self.img, interpolation='none')
ax.imshow(self.prediction, interpolation='none',alpha=0.2)
ax.imshow(self.error,cmap='Reds', alpha=0.4, interpolation='none')
if self.mode == "image_prediction_roots":
self.predict()
root_coords = Evaluate.get_root_pred_coord_v1(self,self.prediction)
if root_coords is not None:
for c in root_coords:
circ = patches.Circle(c,30,facecolor='red',alpha=0.5)
circ2 = patches.Circle(c,4,facecolor='black')
ax.add_patch(circ)
ax.add_patch(circ2)
if "roots" in self.selected_row:
for root_y in self.selected_row["roots"].values[0]/2:
# print(root_y)
circ = patches.Circle(tuple(root_y),5,facecolor='yellow')
ax.add_patch(circ)
ax.imshow(self.img,cmap='gray')
if "roots" in self.selected_row:
ax.imshow(self.msk, alpha=0.4)
if self.mode == 'image_prediction_contour':
self.predict()
ax.imshow(self.img,cmap='gray')
CS = ax.contour(self.msk,[-1,1],colors='cyan',linewidths=3)
if self.prediction_threshold == None:
ax.imshow(self.prediction>0.7, alpha=0.5)
else:
ax.imshow(self.prediction>self.prediction_threshold, alpha=0.4)
if self.mode == 'normalized_gray':
norm = rgb2gray(self.img)
norm = (norm-np.mean(norm))/np.std(norm)
ax.imshow(norm,cmap="gray")
if self.dice_score == None:
self.dice_score = 0.0
ax.set_title('Image: ' + str(self.selected_row.name.values[0]) + " Dice Coeff: " + str(round(self.dice_score,2)), fontsize=15)
ax.axis('off')
self.dice_score = None
def load_data(self):
if len(self.selected_row) == 0:
raise ValueError('Image not found, index not in dataframe')
img= imread(self.selected_row.image_path.values[0]+self.selected_row.name.values[0])
img = self.__adjust_data(img)
if self.input_shape[2] == 1: #grayscale
self.img = resize(img,self.input_shape[:2]).reshape(*self.input_shape[:2])
if self.input_shape[2] == 3: #rgb
self.img = resize(img,self.input_shape[:2]).reshape(*self.input_shape)
self.img = img_as_float(self.img)
if 'mask_path' in self.df:
if self.masktype == 'leaf':
#msk = imread(self.selected_row.mask_path.values[0]+self.selected_row.name.values[0])
msk = imread('../data/00_all/masks_leaf-segmentation/'+self.selected_row['name'].values[0])
elif self.masktype == 'root':
msk = imread('../data/00_all/masks_root-estimation/'+self.selected_row['name'].values[0])
#msk = imread(self.selected_row.mask_path.values[0]+self.selected_row.name.values[0])
self.msk = resize(msk,self.input_shape[:2]).reshape(*self.input_shape[:2])
self.msk = img_as_float(self.msk)
else:
self.msk = np.zeros(self.input_shape[:2])
def __process_input(self):
self.img = (self.img*255).astype("uint8")
self.img = self.seq_norm.augment_image(self.img)
self.img = self.img.astype(float)/255.0
def predict(self):
if self.model.layers[0].input_shape[1:] != self.input_shape:
raise ValueError('Modelinput and Image Shape doesnt match \n ' + 'Modelinput Shape is: ' + str(self.model.layers[0].input_shape[1:]) + '\n' + 'Defined Input Shape is: ' + str(self.input_shape))
self.img = (self.img*255).astype("uint8")
self.img = self.seq_norm.augment_image(self.img)
self.img = self.img.astype(float)/255.0
tmp_img = self.img.reshape(1,*self.input_shape)
if self.predictiontype == 'leaf':
self.prediction = self.model.predict(tmp_img)[:,:,:,0]
elif self.predictiontype == 'root':
try:
self.prediction = self.model.predict(tmp_img)[:,:,:,1]
except IndexError:
self.prediction = self.model.predict(tmp_img)[:,:,:,0]
self.prediction = self.prediction.reshape(*self.input_shape[:2])
if self.prediction_threshold == None:
self.prediction = img_as_float(self.prediction)
else:
self.prediction = img_as_float(self.prediction>self.prediction_threshold)
if self.msk is not None:
smooth = 0.0
y_true_f = np.ndarray.flatten(self.msk.astype(float))
y_pred_f = np.ndarray.flatten(self.prediction.astype(float))
intersection = np.sum(y_true_f * y_pred_f)
self.dice_score = (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def __normalize(self,img):
img -= img.mean()
img /= (img.std() +1e-5)
img *= 0.1
img += 0.5
img = np.clip(img,0,1)
return img
# https://www.kaggle.com/gauss256/preprocess-images
def __adjust_data(self,img):
if self.input_shape[2] == 1: #grayscale
img = rgb2gray(img)
img = exposure.equalize_hist(img)
elif self.input_shape[2] == 3: #rgb
pass #TODO
return img
class Evaluate(Visualize):
'''
Evaluate specific Model
Target: tbd
'''
def __init__(self,df,input_shape,model,predictiontype,masktype='leaf'):
#Visualize.__init__(self,df,model,masktype='hand')
self.df = df
self.input_shape = input_shape
self.model = model
self.predictiontype = predictiontype
self.masktype = masktype
self.prediction_threshold = None
self.seq_norm = iaa.Sequential([
iaa.CLAHE(),
iaa.LinearContrast(alpha=1.0)
])
def get_seg_eval_metrics(self,prediction_threshold=0.7,dc_threshold=0.7,print_output=False):
DCs = []
TPs = []
FPs = []
FNs = []
names = []
for i in log_progress(range(len(self.df)),name="Samples to Test"):
self.selected_row = self.df.iloc[[i]]
self.load_data()
self.predict()
pred = self.prediction > prediction_threshold
msk = self.msk > 0.5
DC = self.__dice_score(msk,pred)
pred = pred.flatten()
msk = msk.flatten()
TP = np.sum(pred == msk) / len(msk)
FP = 0
for gt,p in zip(msk,pred):
if p == 1 and gt == 0:
FP += 1
FP /= len(msk)
FN = 0
FN = 0 if DC > dc_threshold else 1
#for gt,p in zip(msk,pred):
# if p == 0 and gt == 1:
# FN += 1
#FN /= len(msk)
name = self.df.iloc[[i]].name
DCs.append(DC)
TPs.append(TP)
FPs.append(FP)
FNs.append(FN)
names.append(name)
if print_output:
print(str(DC) + " | " + str(TP) + " | " + str(FP) + " | " + str(FN) + " | " + str(name))
return DCs, TPs, FPs, FNs, names
def get_dice_score(self, index, prediction_threshold=0.8):
"""
Get dice coefficent of a prediction from a single image
:param int "index": index of image to load
:return: dice score
"""
assert index != 'random', "Random not supported here!"
self.selected_row = self.df[self.df['name'].str.contains(str(index))]
self.load_data()
self.predict()
pred = self.prediction > prediction_threshold
msk = self.msk > 0.5
return self.__dice_score(msk, pred)
def get_iou_score(self, index, prediction_threshold=0.8):
"""
Get iou score of a prediction from a single image
:param int "index": index of image to load
:return: iou score
"""
assert index != 'random', "Random not supported here!"
self.selected_row = self.df[self.df['name'].str.contains(str(index))]
self.load_data()
self.predict()
pred = self.prediction > prediction_threshold
msk = self.msk > 0.5
return self.img, msk, pred, self.__dice_score(msk, pred)
def get_dice_coeff_score(self,mode='simple'):
assert mode=='simple' or mode=='raw', 'Mode must be "simple" or "raw"'
dice_coeffs = []
prediction_times = [] # Just for stats
for i in log_progress(range(len(self.df)),name='Samples to Test'):
self.selected_row = self.df.iloc[[i]]
self.load_data()
t = time.time()
self.predict()
prediction_times.append(time.time() - t)
dice_coeffs.append(self.__dice_score(self.prediction.reshape(1,*self.input_shape[:2],1),self.msk.reshape(1,*self.input_shape[:2],1)))
print("Average prediction time: %.2f s" % (sum(prediction_times)/len(prediction_times)))
if mode=='simple':
return min(dice_coeffs), max(dice_coeffs), sum(dice_coeffs)/len(dice_coeffs)
elif mode == 'raw':
return dice_coeffs
def get_iou_score_v0(self,mode='simple'):
iou_scores = []
for i in log_progress(range(len(self.df)),name='Samples to Test'):
self.selected_row = self.df.iloc[[i]]
self.load_data()
self.predict()
iou_scores.append(self.__iou_score(self.prediction.reshape(1,*self.input_shape[:2],1),self.msk.reshape(1,*self.input_shape[:2],1)))
if mode=='simple':
return min(iou_scores), max(iou_scores), sum(iou_scores)/len(iou_scores)
elif mode == 'raw':
return iou_scores
def __dice_score(self,y_true,y_pred,smooth=1e-12):
y_true_f = np.ndarray.flatten(y_true)
y_pred_f = np.ndarray.flatten(y_pred)
intersection = np.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (np.sum(y_true_f) +
|
np.sum(y_pred_f)
|
numpy.sum
|
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for optimize_media."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from lightweight_mmm import lightweight_mmm
from lightweight_mmm import optimize_media
from lightweight_mmm import preprocessing
class OptimizeMediaTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(OptimizeMediaTest, cls).setUpClass()
cls.national_mmm = lightweight_mmm.LightweightMMM()
cls.national_mmm.fit(
media=jnp.ones((50, 5)),
target=jnp.ones(50),
total_costs=jnp.ones(5) * 50,
number_warmup=2,
number_samples=2,
number_chains=1)
cls.geo_mmm = lightweight_mmm.LightweightMMM()
cls.geo_mmm.fit(
media=jnp.ones((50, 5, 3)),
target=jnp.ones((50, 3)),
total_costs=jnp.ones(5) * 50,
number_warmup=2,
number_samples=2,
number_chains=1)
def setUp(self):
super().setUp()
self.mock_minimize = self.enter_context(
mock.patch.object(optimize_media.optimize, "minimize", autospec=True))
@parameterized.named_parameters([
dict(
testcase_name="national",
model_name="national_mmm",
geo_ratio=1),
dict(
testcase_name="geo",
model_name="geo_mmm",
geo_ratio=np.tile(0.33, reps=(5, 3)))
])
def test_objective_function_generates_correct_value_type_and_sign(
self, model_name, geo_ratio):
mmm = getattr(self, model_name)
extra_features = mmm._extra_features
time_periods = 10
kpi_predicted = optimize_media._objective_function(
extra_features=extra_features,
media_mix_model=mmm,
media_input_shape=(time_periods, *mmm.media.shape[1:]),
media_gap=None,
target_scaler=None,
media_scaler=preprocessing.CustomScaler(),
media_values=jnp.ones(mmm.n_media_channels) * time_periods,
geo_ratio=geo_ratio,
seed=10)
self.assertIsInstance(kpi_predicted, jnp.DeviceArray)
self.assertLessEqual(kpi_predicted, 0)
self.assertEqual(kpi_predicted.shape, ())
@parameterized.named_parameters([
dict(
testcase_name="zero_output",
media=
|
np.ones(9)
|
numpy.ones
|
from ast import operator
from pickle import NONE
#from asyncio.windows_events import NULL
import librosa
import argparse
import numpy as np
import numpy.random as random
import soundfile as sf
import os
def convert_wv12wav(args):
# input - original path, intended wav path, sampling rate
# output - void
# function converts between WV1 file to in our original path to a WAV file in our intended path, file SR is fs
speakers_dirs = os.listdir(args.original_path)
for speaker in speakers_dirs:
if not os.path.isdir(args.wav_path + speaker):
os.mkdir(args.wav_path + speaker)
files = os.listdir(args.original_path + '/' + speaker)
files = [file for file in files if file.split('.')[-1] == 'wv1']
for signal in files:
wv1_signal, fs = librosa.load(args.original_path + '/' + speaker + '/' +signal, fs)
sf.write(args.wav_path + speaker + '/' + signal.split('.')[0] + '.wav', wv1_signal, fs)
def mix_signal(p_silence, p_one,args):
# input - wav files path, first speaker porbabilty, second speaker probabilty
# output - mixed signal of speakers according to their defined probabilty
# function uses given probabilty for each speaker to speak, mixes them to 5 sec intervals, and returns (meant to be used twice for our data)
five_sec = 5*args.fs
first_speaker =
|
np.random.normal(0,1e-4,five_sec)
|
numpy.random.normal
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow input/output utilities."""
import collections
import json
import math
import os
import numpy as np
import tensorflow.compat.v1 as tf
class Features(object):
"""Feature keys."""
# Waveform(s) of audio observed at receiver(s).
RECEIVER_AUDIO = 'receiver_audio'
# Images of each source at each microphone, including reverberation.
# Images are real valued with shape [sources, microphones, length].
SOURCE_IMAGES = 'source_images'
# Boolean diarization labels of shape (sources, length) which indicates
# whether a source is active or not. For nonexisting source, it is all zeros.
DIARIZATION_LABELS = 'diarization_labels'
# Speaker indices (global indices which are contiguous over all training data
# starting with 0) that are present in this meeting or meeting chunk with
# shape (sources,). If number of speakers present in the meeting is less
# than sources, for a non-existing speaker/source, the speaker index is
# set to -1. Note that, for a meeting sub-block, we still have all the
# speaker indices in the meeting even if not all the speakers are present
# in that meeting sub-block.
SPEAKER_INDEX = 'speaker_indices'
def get_inference_spec(num_receivers=1,
num_samples=None):
"""Returns a specification of features in tf.Examples in roomsim format."""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
return spec
def get_roomsim_spec(num_sources,
num_receivers,
num_samples):
"""Returns a specification of features in tf.Examples in roomsim format.
Args:
num_sources: Expected number of sources.
num_receivers: Number of microphones in array.
num_samples: Expected length of sources in samples. 'None' for variable.
Returns:
Feature specifications suitable to pass to tf.parse_example.
"""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
spec[Features.SOURCE_IMAGES] = tf.FixedLenFeature(
[num_sources, num_receivers, num_samples], tf.float32)
return spec
def placeholders_from_spec(feature_spec):
"""Returns placeholders compatible with a given feature spec."""
placeholders = {}
for key, feature in feature_spec.items():
placeholders[key] = tf.placeholder(dtype=feature.dtype,
shape=[1] + feature.shape,
name=key)
return placeholders
def _read_meeting_list(meeting_list, meeting_length_type):
"""Reads meeting list from json file to get necessary information.
Args:
meeting_list: A meeting list read from a json file.
meeting_length_type: One of 'maximum', 'minimum' or 'average'.
Since typically meeting lengths are not fixed, we can
set the training/eval length to the maximum, minimum or average meeting
length in the json file based on the value of this argument. We
eventually pad or clip individual meetings to attain the desired constant
meeting length in our data reading pipeline.
Returns:
num_meetings: Number of meetings.
max_num_spk_per_meeting: Maximum number of speakers in a meeting.
max_num_utt_per_spk: Maximum number of utterances per speaker.
max_dia_seg_per_utt: Maximum diarization segments per utterance.
max_utt_length: Maximum utterance length.
meeting_length: Meeting length that will be used.
speaker_ids: A list of speaker ids that appear in meetings.
"""
max_num_spk_per_meeting = 0
max_num_utt_per_meeting = 0
meeting_lengths = []
speaker_id_to_count = collections.defaultdict(int)
num_meetings = len(meeting_list)
total_spk = 0
total_utt = 0
max_utt_length = 0
max_num_utt_per_spk = 0
max_dia_seg_per_utt = 0
for one_meeting in meeting_list:
sources_start_end = one_meeting['utterance_start_end']
meeting_length = int(one_meeting['duration'])
num_utt_in_meeting = len(sources_start_end)
max_num_utt_per_meeting = max(max_num_utt_per_meeting, num_utt_in_meeting)
utt2spk = []
spk2wavs = collections.defaultdict(list)
spk_utt_idx = collections.defaultdict(int)
for start, end, spkid, wav_path in sources_start_end:
max_utt_length = max(max_utt_length, end - start)
utt2spk.append(spkid)
spk2wavs[spkid].append(wav_path)
speaker_id_to_count[spkid] += 1
spk_utt_idx[spkid] += 1
diarization_info = \
one_meeting['diarization_label'][spkid][spk_utt_idx[spkid] - 1]
num_seg_in_utt = len(diarization_info)
max_dia_seg_per_utt = max(max_dia_seg_per_utt, num_seg_in_utt)
speakers_in_meeting = list(set(utt2spk))
num_spk = len(speakers_in_meeting)
for spkid in speakers_in_meeting:
max_num_utt_per_spk = max(max_num_utt_per_spk,
len(set(spk2wavs[spkid])))
max_num_spk_per_meeting = max(max_num_spk_per_meeting, num_spk)
total_spk += num_spk
total_utt += num_utt_in_meeting
meeting_lengths.append(meeting_length)
if meeting_length_type == 'maximum':
meeting_length = int(math.ceil(np.max(meeting_lengths)))
elif meeting_length_type == 'minimum':
meeting_length = int(math.floor(np.min(meeting_lengths)))
elif meeting_length_type == 'average':
meeting_length = int(round(np.mean(meeting_lengths)))
elif isinstance(meeting_length_type, int):
meeting_length = meeting_length_type
else:
raise ValueError(f'Unknown meeting_length_type={meeting_length_type}')
speaker_ids = sorted(speaker_id_to_count.keys())
tf.logging.info('Read %s meetings from json file.', num_meetings)
tf.logging.info('Average number of speakers per meeting = %f.',
total_spk / num_meetings)
tf.logging.info('Average number of utterances per speaker = %f.',
total_utt / total_spk)
return (num_meetings, max_num_spk_per_meeting, max_num_utt_per_spk,
max_dia_seg_per_utt, max_utt_length,
meeting_length, speaker_ids)
def _pad_mics_tf(signal, new_mics):
"""Pads new mic channels to an input tensor and returns the updated tensor.
Args:
signal: A tf.tensor of shape (input_mics, samples)
new_mics: The number of new mic channels to be added (integer scalar tensor)
Returns:
padded_signal: A tf.tensor of shape (input_mics + new_mics, samples)
"""
# Take first new_mics channels and shift them by 1 sample.
new_inputs = tf.roll(signal[:new_mics, :], shift=1, axis=-1)
# Add noise 1e-3 times the RMS value in the signal.
noise_scale = 1e-3 * tf.sqrt(tf.reduce_mean(tf.square(new_inputs)))
new_inputs += noise_scale * tf.random.normal(tf.shape(new_inputs))
return tf.concat((signal, new_inputs), axis=0)
def json_to_dataset(json_file,
batch_size,
parallel_readers=tf.data.experimental.AUTOTUNE,
randomize_order=False,
num_examples=-1,
prefetch_buffer_size=tf.data.experimental.AUTOTUNE,
shuffle_buffer_size=5,
repeat=True,
num_mics=1,
sample_rate=16000,
use_relative_path=True,
meeting_length_type='maximum',
num_meeting_subdivisions=1,
sensor_noise_range=(0.0, 0.0)):
r"""Fetches features from a dictionary and source .wav files.
Args:
json_file: A json file containing meeting information.
batch_size: The number of examples to read.
parallel_readers: Number of dataset.map operations that should happen in
parallel.
randomize_order: Whether to randomly shuffle features.
num_examples: Limit number of examples to this value. Unlimited if -1.
prefetch_buffer_size: How many batches to prefecth.
shuffle_buffer_size: The size of the shuffle buffer.
repeat: If True, repeat the dataset.
num_mics: The expected number of mics in source wav files.
sample_rate: Sample rate of wav files read.
use_relative_path: If True, the path for .wav files is relative to the
json file, otherwise, the paths are absolute.
meeting_length_type: 'maximum', 'minimum' or 'average'. Can also specify
an integer value which is the length in samples, which will be used.
num_meeting_subdivisions: If > 1, chop the meeting in time into this
many chunks.
sensor_noise_range: Range of standard deviation for sensor noise. If
sensor_noise_range[1] <= 0.0, then no sensor noise is added. Otherwise,
white Gaussian sensor noise with uniformly random standard deviation
from the provided range is added as the first reference signal.
Returns:
A batch_size number of features constructed from wav files.
Raises:
ValueError if max_sources_override is less than assumed max number sources.
"""
tf.logging.info('Reading %s.', json_file)
with open(json_file, 'r') as f:
meeting_list = json.load(f)
(num_meetings, max_num_spk, max_num_utt_per_spk, max_dia_seg_per_utt,
max_utt_length, samples, speaker_id_list) = _read_meeting_list(
meeting_list, meeting_length_type)
tf.logging.info('Maximum number of speakers per meeting = %s', max_num_spk)
tf.logging.info('Maximum number of utterances per speaker = %s',
max_num_utt_per_spk)
tf.logging.info('Maximum diarization segments per utterance = %s',
max_dia_seg_per_utt)
tf.logging.info('Maximum utterance length in seconds = %s',
max_utt_length/sample_rate)
tf.logging.info('Used meeting length in seconds = %s', samples/sample_rate)
tf.logging.info('Number of speakers seen in all meetings = %s',
len(speaker_id_list))
tf.logging.info('Using %s parallel readers.', parallel_readers)
tf.logging.info('shuffle_buffer=%s, prefetch_buffer=%s, num_mics=%s, '
'randomize=%s.', shuffle_buffer_size, prefetch_buffer_size,
num_mics, randomize_order)
if use_relative_path:
base_path = os.path.dirname(json_file)
spkid2idx = {key: idx for idx, key in enumerate(speaker_id_list)}
def utterance_info_generator():
"""Yields utterance informations from each meeting.
Utterance info is in the form of a 6-tuple:
wav_path, diarization, spkidx, meeting_scale, start, gain.
"""
default_diarization =
|
np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32)
|
numpy.zeros
|
import time
import numpy as np
from scipy import sparse
from radon_server.radon_thread import RadonTransformThread
class FastSlantStackTransform(RadonTransformThread):
def get_matrix_ratio(self):
return 2
def get_reconstruct_multiply(self):
return 1
def get_algorithm_name(self):
return "fss"
def run_transform(self, image, n, variant=None):
self.fss(image, n)
def ffft(self, x, alpha):
# y=ffft(x,alpha)
# fast algorithm for the fractional fft
# inputs:
# x - the input vector
# alpha - the scaling coefficient
# <NAME> - Jan 21 2012
x = x[:]
x = np.fft.fftshift(x) # centering x indices arround zero
n = len(x)
E = 1j * np.pi * alpha / n
ivec = np.transpose(np.arange(-(n / 2), (n / 2)))
Dd = np.exp(-E * ivec * ivec)
Dx = Dd * x # multiplication with the appropriate diagonal matrix of complex exponnentials
z = np.concatenate((Dx, np.zeros(n, dtype='float64'))) # zero padding
ivec2 = (np.arange(0, n, dtype='float64')).transpose()
T1stCol = np.exp(E * ivec2 * ivec2) # 1st column of the Toeplitz matrix
C1stCol = np.concatenate((T1stCol, [0], np.flipud(T1stCol[1:n]))) # 1st column of the circulant matrix
D2d = np.fft.fft(C1stCol) / np.sqrt(n) # applying normalyzed fft to the first column of C
Fz = np.fft.fft(z)
D2Fz = D2d * Fz #
Cz = np.fft.ifft(D2Fz) # the convolution result of z and the 1st column of C (which is equivalent to C*z)
TDx = Cz[0:n] # ommiting the last n entries of Cz
y = Dd * TDx # Left multiflication with the appropriate diagonal matrix
return y
# written by <NAME> 18/4/2007
def ffft2(self, x, alpha):
m = len(x)
n = 2 * m
t = np.zeros(n, dtype='complex')
y = np.zeros(n, dtype='complex')
j = np.arange(0, m, dtype='float64')
y[0:m] = x * np.exp(1j * np.pi * alpha * (j - j * j / m))
t[0:m] = np.exp(-1j * np.pi * alpha / m * j * j)
t[m + 1:2 * m] = np.flip(t[1:m], 0)
t = np.fft.fft(np.conj(t))
y = np.fft.fft(y) * t
y = np.fft.ifft(y)
y = y[0:m] * np.exp(1j * np.pi * alpha * (j - m / 2 - j * j / m))
return y
def ppfft(self, image):
n = len(image)
Y = np.zeros((2 * n, 2 * n), dtype='complex')
# Basically Horizontal Lines
PaddedX = np.concatenate((np.zeros((n // 2, n)), image, np.zeros((n // 2, n))))
Z = np.fft.fftshift(PaddedX, 0)
Z = np.fft.fft(Z, axis=0)
Z = np.fft.fftshift(Z, 0)
for r in range(0, n):
alpha = float(n - r) / n
t = self.ffft2(Z[r, :], alpha)
Y[0:n, r] = t.transpose()
alpha = float(-r) / n
t = self.ffft2(Z[r + n, :], alpha)
Y[0:n, r + n] = t.transpose()
self.update_progress(r, n * 3)
PaddedX = np.hstack((np.zeros((n, n // 2)), image, np.zeros((n, n // 2)))).transpose()
Z = np.fft.fftshift(PaddedX, 0)
Z = np.fft.fft(Z, axis=0)
Z =
|
np.fft.fftshift(Z, 0)
|
numpy.fft.fftshift
|
import torch
import torch.nn as nn
import torch.utils.data as data
from torch.autograd import Variable as V
import cv2
import os
import numpy as np
from time import time
from networks.dinknet import DUNet
BATCHSIZE_PER_CARD = 2
class TTAFrame():
def __init__(self, net):
self.net = net().cuda()
self.net = torch.nn.DataParallel(self.net, device_ids=range(torch.cuda.device_count()))
def test_one_img(self, img, evalmode=True):
if evalmode:
self.net.eval()
batchsize = torch.cuda.device_count() * BATCHSIZE_PER_CARD
if batchsize >= 8:
return self.test_one_img_1(img)
elif batchsize >= 4:
return self.test_one_img_2(img)
elif batchsize >= 2:
return self.test_one_img_4(img)
def test_one_img_8(self, img):
img90 = np.array(np.rot90(img))
img1 = np.concatenate([img[None],img90[None]])
img2 = np.array(img1)[:,::-1]
img3 = np.array(img1)[:,:,::-1]
img4 = np.array(img2)[:,:,::-1]
img1 = img1.transpose(0,3,1,2)
img2 = img2.transpose(0,3,1,2)
img3 = img3.transpose(0,3,1,2)
img4 = img4.transpose(0,3,1,2)
img1 = V(torch.Tensor(np.array(img1, np.float32)/255.0 * 3.2 -1.6).cuda())
img2 = V(torch.Tensor(np.array(img2, np.float32)/255.0 * 3.2 -1.6).cuda())
img3 = V(torch.Tensor(
|
np.array(img3, np.float32)
|
numpy.array
|
from __future__ import print_function
import os
import warnings
import numpy as np
import fitsio
from tractor.splinesky import SplineSky
from tractor import PixelizedPsfEx, PixelizedPSF
from astrometry.util.fits import fits_table
from legacypipe.utils import read_primary_header
from legacypipe.bits import DQ_BITS
import logging
logger = logging.getLogger('legacypipe.image')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
'''
Base class for handling the images we process. These are all
processed by variants of the NOAO Community Pipeline (CP), so this
base class is pretty specific.
'''
def remap_dq_cp_codes(dq, ignore_codes=None):
'''
Some versions of the CP use integer codes, not bit masks.
This converts them.
1 = bad
2 = no value (for remapped and stacked data)
3 = saturated
4 = bleed mask
5 = cosmic ray
6 = low weight
7 = diff detect (multi-exposure difference detection from median)
8 = long streak (e.g. satellite trail)
'''
if ignore_codes is None:
ignore_codes = []
dqbits = np.zeros(dq.shape, np.int16)
# Some images (eg, 90prime//CP20160403/ksb_160404_103333_ood_g_v1-CCD1.fits)
# around saturated stars have the core with value 3 (satur), surrounded by one
# pixel of value 1 (bad), and then more pixels with value 4 (bleed).
# Set the BAD ones to SATUR.
from scipy.ndimage.morphology import binary_dilation
dq[np.logical_and(dq == 1, binary_dilation(dq == 3))] = 3
for code,bitname in [(1, 'badpix'),
(2, 'badpix'),
(3, 'satur'),
(4, 'bleed'),
(5, 'cr'),
(6, 'badpix'),
(7, 'trans'),
(8, 'trans'),
]:
if code in ignore_codes:
continue
dqbits[dq == code] |= DQ_BITS[bitname]
return dqbits
def apply_amp_correction_northern(camera, band, expnum, ccdname, mjdobs,
img, invvar, x0, y0):
from pkg_resources import resource_filename
dirname = resource_filename('legacypipe', 'data')
fn = os.path.join(dirname, 'ampcorrections.fits')
A = fits_table(fn)
# Find relevant row -- camera, filter, ccdname, mjd_start, mjd_end,
# And then multiple rows of:
# xlo, xhi, ylo, yhi -> dzp
# that might overlap this image.
I = np.flatnonzero([(cam.strip() == camera) and
(f.strip() == band) and
(ccd.strip() == ccdname) and
(not(np.isfinite(mjdstart)) or (mjdobs >= mjdstart)) and
(not(np.isfinite(mjdend )) or (mjdobs <= mjdend))
for cam,f,ccd,mjdstart,mjdend
in zip(A.camera, A.filter, A.ccdname,
A.mjd_start, A.mjd_end)])
info('Found', len(I), 'relevant rows in amp-corrections file.')
if len(I) == 0:
return
if img is not None:
H,W = img.shape
else:
H,W = invvar.shape
# x0,y0 are integer pixel coords
# x1,y1 are INCLUSIVE integer pixel coords
x1 = x0 + W - 1
y1 = y0 + H - 1
debug_corr = False
if debug_corr:
count_corr = np.zeros((H,W), np.uint8)
corr_map = np.zeros((H,W), np.float32)
fitsio.write('amp-corr-image-before-%s-%s-%s.fits' % (camera, expnum, ccdname), img, clobber=True)
for a in A[I]:
# In the file, xhi,yhi are NON-inclusive.
if a.xlo > x1 or a.xhi <= x0:
continue
if a.ylo > y1 or a.yhi <= y0:
continue
# Overlap!
info('Found overlap: image x', x0, x1, 'and amp range', a.xlo, a.xhi-1,
'and image y', y0, y1, 'and amp range', a.ylo, a.yhi-1)
xstart = max(0, a.xlo - x0)
xend = min(W, a.xhi - x0)
ystart = max(0, a.ylo - y0)
yend = min(H, a.yhi - y0)
info('Range in image: x', xstart, xend, ', y', ystart, yend, '(with image size %i x %i)' % (W,H))
scale = 10.**(0.4 * a.dzp)
info('dzp', a.dzp, '-> scaling image by', scale)
if img is not None:
img [ystart:yend, xstart:xend] *= scale
if invvar is not None:
invvar[ystart:yend, xstart:xend] /= scale**2
if debug_corr:
count_corr[ystart:yend, xstart:xend] += 1
corr_map[ystart:yend, xstart:xend] = scale
if debug_corr:
assert(np.all(count_corr == 1))
fitsio.write('amp-corr-image-after-%s-%s-%s.fits' % (camera, expnum, ccdname), img, clobber=True)
fitsio.write('amp-corr-map-%s-%s-%s.fits' % (camera, expnum, ccdname), corr_map, clobber=True)
class LegacySurveyImage(object):
'''
A base class containing common code for the images we handle.
You probably shouldn't need to directly instantiate this class,
but rather use the recipe described in the __init__ method.
Objects of this class represent the metadata we have on an image,
and are used to handle some of the details of going from an entry
in the CCDs table to a tractor Image object.
'''
# this is defined here for testing purposes (to handle the small
# images used in unit tests): box size for SplineSky model
splinesky_boxsize = 1024
def __init__(self, survey, ccd, image_fn=None, image_hdu=0):
'''
Create a new LegacySurveyImage object, from a LegacySurveyData object,
and one row of a CCDs fits_table object.
You may not need to instantiate this class directly, instead using
survey.get_image_object():
survey = LegacySurveyData()
# targetwcs = ....
# ccds = survey.ccds_touching_wcs(targetwcs, ccdrad=None)
ccds = survey.get_ccds()
im = survey.get_image_object(ccds[0])
# which does the same thing as:
im = DecamImage(survey, ccds[0])
Or, if you have a Community Pipeline-processed input file and
FITS HDU extension number:
survey = LegacySurveyData()
ccds = exposure_metadata([filename], hdus=[hdu])
im = DecamImage(survey, ccds[0])
Perhaps the most important method in this class is
*get_tractor_image*.
'''
super(LegacySurveyImage, self).__init__()
self.survey = survey
if ccd is None and image_fn is None:
raise RuntimeError('Either "ccd" or "image_fn" must be set')
if image_fn is not None:
# Read metadata from image header.
self.image_filename = image_fn
self.imgfn = os.path.join(self.survey.get_image_dir(), image_fn)
print('Survey image dir:', self.survey.get_image_dir())
primhdr = read_primary_header(self.imgfn)
self.band = self.get_band(primhdr)
self.propid = self.get_propid(primhdr)
self.expnum = self.get_expnum(primhdr)
self.camera = self.get_camera(primhdr)
#self.date_obs = self.primhdr['DATE-OBS']
#self.ut = self.get_ut(self.primhdr)
#self.obj = self.primhdr['OBJECT']
namechange = {'date': 'procdate',
'mjd-obs': 'mjdobs'}
for key in ['EXPTIME', 'MJD-OBS', 'HA', 'DATE', 'PLVER', 'PLPROCID']:
val = primhdr.get(key)
if isinstance(val, str):
val = val.strip()
if len(val) == 0:
raise ValueError('Empty header card: %s' % key)
key = namechange.get(key.lower(), key.lower())
key = key.replace('-', '_')
setattr(self, key, val)
# hdu, ccdname, width, height, pixscale
self.hdu = image_hdu
if image_hdu is not None:
hdr = self.read_image_header(ext=image_hdu)
# Parse ZNAXIS[12] / NAXIS[12] ?
info = fitsio.FITS(self.imgfn)[image_hdu].get_info()
#print('Image info:', info)
self.height,self.width = info['dims']
self.hdu = info['hdunum'] - 1
self.ccdname = hdr['EXTNAME'].strip().upper()
self.pixscale = 3600. * np.sqrt(np.abs(hdr['CD1_1'] * hdr['CD2_2'] -
hdr['CD1_2'] * hdr['CD2_1']))
self.fwhm = self.get_fwhm(primhdr, hdr)
else:
self.ccdname = ''
self.dq_hdu = self.hdu
self.wt_hdu = self.hdu
self.sig1 = 0.
self.ccdzpt = 0.
self.dradec = (0., 0.)
else:
# Get metadata from ccd table entry.
# Note here that "image_filename" is the *relative* path (from image_dir),
# while "imgfn" is the full path.
imgfn = ccd.image_filename.strip()
self.image_filename = imgfn
self.imgfn = os.path.join(self.survey.get_image_dir(), imgfn)
self.hdu = ccd.image_hdu
self.dq_hdu = ccd.image_hdu
self.wt_hdu = ccd.image_hdu
self.expnum = ccd.expnum
self.ccdname = ccd.ccdname.strip()
self.band = ccd.filter.strip()
self.exptime = ccd.exptime
self.camera = ccd.camera.strip()
self.fwhm = ccd.fwhm
self.propid = ccd.propid
self.mjdobs = ccd.mjd_obs
self.width = ccd.width
self.height = ccd.height
# In nanomaggies.
self.sig1 = ccd.sig1
# Use dummy values to accommodate old calibs (which will fail later
# unless old-calibs-ok=True)
try:
self.plver = getattr(ccd, 'plver', 'xxx').strip()
except:
print('Failed to read PLVER header card as a string. This probably means your python fitsio package is too old.')
print('Try upgrading to version 1.0.5 or later.')
raise
self.procdate = getattr(ccd, 'procdate', 'xxxxxxx').strip()
self.plprocid = getattr(ccd, 'plprocid', 'xxxxxxx').strip()
# Photometric and astrometric zeropoints
self.ccdzpt = ccd.ccdzpt
self.dradec = (ccd.ccdraoff / 3600., ccd.ccddecoff / 3600.)
# in arcsec/pixel
self.pixscale = 3600. * np.sqrt(np.abs(ccd.cd1_1 * ccd.cd2_2 -
ccd.cd1_2 * ccd.cd2_1))
self.compute_filenames()
# Which Data Quality bits mark saturation?
self.dq_saturation_bits = DQ_BITS['satur'] # | DQ_BITS['bleed']
# Calib filenames
basename = os.path.basename(self.image_filename)
### HACK -- keep only the first dotted component of the base filename.
# This allows, eg, create-testcase.py to use image filenames like BASE.N3.fits
# with only a single HDU.
basename = basename.split('.')[0]
imgdir = os.path.dirname(self.image_filename)
calibdir = self.survey.get_calib_dir()
calname = basename+"-"+self.ccdname
self.name = calname
self.sefn = os.path.join(calibdir, 'se', imgdir, basename, calname + '-se.fits')
self.psffn = os.path.join(calibdir, 'psfex-single', imgdir, basename, calname + '-psfex.fits')
self.skyfn = os.path.join(calibdir, 'sky-single', imgdir, basename, calname + '-splinesky.fits')
self.merged_psffn = os.path.join(calibdir, 'psfex', imgdir, basename + '-psfex.fits')
self.merged_skyfn = os.path.join(calibdir, 'sky', imgdir, basename + '-splinesky.fits')
self.old_merged_skyfns = [os.path.join(calibdir, imgdir, basename + '-splinesky.fits')]
self.old_merged_psffns = [os.path.join(calibdir, imgdir, basename + '-psfex.fits')]
# not used by this code -- here for the sake of legacyzpts/merge_calibs.py
self.old_single_psffn = os.path.join(calibdir, imgdir, basename, calname + '-psfex.fits')
self.old_single_skyfn = os.path.join(calibdir, imgdir, basename, calname + '-splinesky.fits')
# for debugging purposes
self.print_imgpath = '/'.join(self.imgfn.split('/')[-5:])
def override_ccd_table_types(self):
return {}
def validate_version(self, *args, **kwargs):
return validate_version(*args, **kwargs)
def compute_filenames(self):
# Compute data quality and weight-map filenames
self.dqfn = self.imgfn.replace('_ooi_', '_ood_').replace('_oki_','_ood_')
self.wtfn = self.imgfn.replace('_ooi_', '_oow_').replace('_oki_','_oow_')
assert(self.dqfn != self.imgfn)
assert(self.wtfn != self.imgfn)
def get_extension_list(self, debug=False):
F = fitsio.FITS(self.imgfn)
exts = []
for f in F[1:]:
exts.append(f.get_extname())
if debug:
break
return exts
def nominal_zeropoint(self, band):
return self.zp0[band]
def extinction(self, band):
return self.k_ext[band]
def calibration_good(self, primhdr):
'''Did the CP processing succeed for this image? If not, no need to process further.
'''
return primhdr.get('WCSCAL', '').strip().lower().startswith('success')
def get_photometric_calibrator_cuts(self, name, cat):
'''Returns whether to keep sources in the *cat* of photometric calibration
stars from, eg, Pan-STARRS1 or SDSS.
'''
if name == 'ps1':
gicolor= cat.median[:,0] - cat.median[:,2]
return ((cat.nmag_ok[:, 0] > 0) &
(cat.nmag_ok[:, 1] > 0) &
(cat.nmag_ok[:, 2] > 0) &
(gicolor > 0.4) &
(gicolor < 2.7))
if name == 'sdss':
return np.ones(len(cat), bool)
raise RuntimeError('Unknown photometric calibration set: %s' % name)
def photometric_calibrator_to_observed(self, name, cat):
if name == 'ps1':
from legacypipe.ps1cat import ps1cat
colorterm = self.colorterm_ps1_to_observed(cat.median, self.band)
ps1band = ps1cat.ps1band[self.band]
return cat.median[:, ps1band] + np.clip(colorterm, -1., +1.)
elif name == 'sdss':
from legacypipe.ps1cat import sdsscat
colorterm = self.colorterm_sdss_to_observed(cat.psfmag, self.band)
band = sdsscat.sdssband[self.band]
return cat.psfmag[:, band] + np.clip(colorterm, -1., +1.)
else:
raise RuntimeError('No photometric conversion from %s to DECam' % name)
def colorterm_ps1_to_observed(self, cat, band):
raise RuntimeError('Not implemented: generic colorterm_ps1_to_observed')
def colorterm_sdss_to_observed(self, cat, band):
raise RuntimeError('Not implemented: generic colorterm_sdss_to_observed')
def get_psfex_merged_filename(self):
return self.merged_psffn
def get_splinesky_merged_filename(self):
return self.merged_skyfn
def get_psfex_unmerged_filename(self):
return self.psffn
def get_splinesky_unmerged_filename(self):
return self.skyfn
def get_radec_bore(self, primhdr):
from astrometry.util.starutil_numpy import hmsstring2ra, dmsstring2dec
# In some DECam exposures, RA,DEC are floating-point, but RA is in *decimal hours*.
# In others, RA does not exist (eg CP/V4.8.2a/CP20160824/c4d_160825_062109_ooi_g_ls9.fits.fz)
# Fall back to TELRA in that case.
ra_bore = dec_bore = None
if 'RA' in primhdr.keys():
try:
ra_bore = hmsstring2ra(primhdr['RA'])
dec_bore = dmsstring2dec(primhdr['DEC'])
except:
pass
if dec_bore is None and 'TELRA' in primhdr.keys():
ra_bore = hmsstring2ra(primhdr['TELRA'])
dec_bore = dmsstring2dec(primhdr['TELDEC'])
if dec_bore is None:
raise ValueError('Failed to parse RA or TELRA in primary header to get telescope boresight')
return ra_bore, dec_bore
def get_camera(self, primhdr):
cam = primhdr['INSTRUME']
cam = cam.lower()
return cam
def get_gain(self, primhdr, hdr):
return primhdr['GAIN']
def get_band(self, primhdr):
band = primhdr['FILTER']
band = band.split()[0]
return band
def get_propid(self, primhdr):
return primhdr['PROPID']
def get_airmass(self, primhdr, imghdr, ra, dec):
airmass = primhdr['AIRMASS']
if airmass is None:
airmass = self.recompute_airmass(primhdr, ra, dec)
return airmass
def recompute_airmass(self, primhdr, ra, dec):
site = self.get_site()
if site is None:
print('AIRMASS missing and site not defined.')
return None
print('Recomputing AIRMASS')
from astropy.time import Time as apyTime
from astropy.coordinates import SkyCoord, AltAz
time = apyTime(self.mjdobs + 0.5*self.exptime/3600./24., format='mjd')
coords = SkyCoord(ra, dec, unit='deg')
altaz = coords.transform_to(AltAz(obstime=time, location=site))
airmass = altaz.secz
return airmass
def get_site(self):
return None
def get_expnum(self, primhdr):
return primhdr['EXPNUM']
def get_fwhm(self, primhdr, imghdr):
return imghdr.get('FWHM', np.nan)
# Used during zeropointing
def scale_image(self, img):
return img
def scale_weight(self, img):
return img
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def check_for_cached_files(self, survey):
for key in self.get_cacheable_filename_variables():
fn = getattr(self, key, None)
#debug('Image: checking cache for variable', key, '->', fn)
if fn is None:
continue
cfn = survey.check_cache(fn)
#debug('Checking for cached', key, ':', fn, '->', cfn)
if cfn != fn:
print('Using cached', cfn)
setattr(self, key, cfn)
def get_cacheable_filename_variables(self):
'''
These are names of self.X variables that are filenames that
could be cached.
'''
return ['imgfn', 'dqfn', 'wtfn', 'psffn', 'merged_psffn',
'merged_skyfn', 'skyfn']
def get_good_image_slice(self, extent, get_extent=False):
'''
extent = None or extent = [x0,x1,y0,y1]
If *get_extent* = True, returns the new [x0,x1,y0,y1] extent.
Returns a new pair of slices, or *extent* if the whole image is good.
'''
gx0,gx1,gy0,gy1 = self.get_good_image_subregion()
if gx0 is None and gx1 is None and gy0 is None and gy1 is None:
return extent
if extent is None:
imh,imw = self.get_image_shape()
extent = (0, imw, 0, imh)
x0,x1,y0,y1 = extent
if gx0 is not None:
x0 = max(x0, gx0)
if gy0 is not None:
y0 = max(y0, gy0)
if gx1 is not None:
x1 = min(x1, gx1)
if gy1 is not None:
y1 = min(y1, gy1)
if get_extent:
return (x0,x1,y0,y1)
return slice(y0,y1), slice(x0,x1)
def get_good_image_subregion(self):
'''
Returns x0,x1,y0,y1 of the good region of this chip,
or None if no cut should be applied to that edge; returns
(None,None,None,None) if the whole chip is good.
This cut is applied in addition to any masking in the mask or
invvar map.
'''
return None,None,None,None
def get_tractor_image(self, slc=None, radecpoly=None,
gaussPsf=False, pixPsf=True, hybridPsf=True,
normalizePsf=True,
apodize=False,
readsky=True,
nanomaggies=True, subsky=True, tiny=10,
dq=True, invvar=True, pixels=True,
no_remap_invvar=False,
constant_invvar=False,
old_calibs_ok=False,
trim_edges=True):
'''
Returns a tractor.Image ("tim") object for this image.
Options describing a subimage to return:
- *slc*: y,x slice objects
- *radecpoly*: numpy array, shape (N,2), RA,Dec polygon describing
bounding box to select.
- *trim_edges*: if True, drop fully masked rows and columns at the
edge of the image.
Options determining the PSF model to use:
- *gaussPsf*: single circular Gaussian PSF based on header FWHM value.
- *pixPsf*: pixelized PsfEx model.
- *hybridPsf*: combo pixelized PsfEx + Gaussian approx.
Options determining the units of the image:
- *nanomaggies*: convert the image to be in units of NanoMaggies;
*tim.zpscale* contains the scale value the image was divided by.
- *subsky*: instantiate and subtract the initial sky model,
leaving a constant zero sky model?
'''
import astropy.time
from tractor.tractortime import TAITime
from tractor.image import Image
from tractor.basics import NanoMaggies, LinearPhotoCal
get_dq = dq
get_invvar = invvar
primhdr = self.read_image_primary_header()
for fn,kw in [(self.imgfn, dict(data=primhdr)), (self.wtfn, {}), (self.dqfn, {})]:
if not self.validate_version(fn, 'primaryheader',
self.expnum, self.plver, self.plprocid,
cpheader=True, old_calibs_ok=old_calibs_ok, **kw):
raise RuntimeError('Version validation failed for filename %s (PLVER/PLPROCID)' % fn)
band = self.band
wcs = self.get_wcs()
orig_slc = slc
x0,x1,y0,y1,slc = self.get_image_extent(wcs=wcs, slc=slc, radecpoly=radecpoly)
if y1 - y0 < tiny or x1 - x0 < tiny:
debug('Skipping tiny subimage (y %i to %i, x %i to %i)' % (y0, y1, x0, x1))
debug('slice:', orig_slc, '->', slc, 'radecpoly', radecpoly)
return None
# Read image pixels
if pixels:
debug('Reading image slice:', slc)
img,imghdr = self.read_image(header=True, slc=slc)
self.check_image_header(imghdr)
else:
img = np.zeros((y1-y0, x1-x0), np.float32)
imghdr = self.read_image_header()
assert(np.all(np.isfinite(img)))
# Read data-quality (flags) map and zero out the invvars of masked pixels
dq = None
if get_invvar:
get_dq = True
if get_dq:
dq,dqhdr = self.read_dq(slc=slc, header=True)
if dq is not None:
dq = self.remap_dq(dq, dqhdr)
# Read inverse-variance (weight) map
if get_invvar:
invvar = self.read_invvar(slc=slc, dq=dq)
else:
invvar = np.ones_like(img) * 1./self.sig1**2
if np.all(invvar == 0.):
debug('Skipping zero-invvar image')
return None
self.fix_saturation(img, dq, invvar, primhdr, imghdr, slc)
template_meta = None
if pixels:
template = self.get_sky_template(slc=slc, old_calibs_ok=old_calibs_ok)
if template is not None:
debug('Subtracting sky template')
# unpack
template,template_meta = template
img -= template
# for create_testcase: omit remappings.
if not no_remap_invvar:
invvar = self.remap_invvar(invvar, primhdr, img, dq)
# header 'FWHM' is in pixels
psf_fwhm = self.get_fwhm(primhdr, imghdr)
assert(psf_fwhm > 0)
psf_sigma = psf_fwhm / 2.35
# Ugly: occasionally the CP marks edge pixels with SATUR (and
# nearby pixels with BLEED). Convert connected blobs of
# SATUR|BLEED pixels that are touching the left or right (not
# top/botom) to EDGE. An example of this is
# mosaic-121450-CCD3-z at RA,Dec (261.4182, 58.8528). Note
# that here we're not demanding it be the full CCD edge; we're
# checking our x0,x1 subregion, which is not ideal.
# Here we're assuming the bleed direction is vertical.
# This step is not redundant with the following trimming of
# masked edge pixels because the SATUR|BLEED pixels in these
# cases do not fill full columns, so they still cause issues
# with source detection.
if get_dq:
from scipy.ndimage.measurements import label
bits = DQ_BITS['satur'] | DQ_BITS['bleed']
if np.any(dq[:,0] & bits) or np.any(dq[:,-1] & bits):
blobmap,_ = label(dq & bits)
badblobs = np.unique(np.append(blobmap[:,0], blobmap[:,-1]))
badblobs = badblobs[badblobs != 0]
#debug('Bad blobs:', badblobs)
for bad in badblobs:
n = np.sum(blobmap == bad)
debug('Setting', n, 'edge SATUR|BLEED pixels to EDGE')
dq[blobmap == bad] = DQ_BITS['edge']
if trim_edges:
# Drop rows and columns at the image edges that are all masked.
for y0_new in range(y0, y1):
if not np.all(invvar[y0_new-y0,:] == 0):
break
for y1_new in reversed(range(y0, y1)):
if not np.all(invvar[y1_new-y0,:] == 0):
break
for x0_new in range(x0, x1):
if not np.all(invvar[:,x0_new-x0] == 0):
break
for x1_new in reversed(range(x0, x1)):
if not np.all(invvar[:,x1_new-x0] == 0):
break
y1_new += 1
x1_new += 1
if x0_new != x0 or x1_new != x1 or y0_new != y0 or y1_new != y1:
#debug('Old x0,x1', x0,x1, 'y0,y1', y0,y1)
#debug('New x0,x1', x0_new,x1_new, 'y0,y1', y0_new,y1_new)
if y1_new - y0_new < tiny or x1_new - x0_new < tiny:
debug('Skipping tiny subimage (after clipping masked edges)')
return None
img = img [y0_new-y0 : y1_new-y0, x0_new-x0 : x1_new-x0]
invvar = invvar[y0_new-y0 : y1_new-y0, x0_new-x0 : x1_new-x0]
if get_dq:
dq = dq[y0_new-y0 : y1_new-y0, x0_new-x0 : x1_new-x0]
x0,x1,y0,y1 = x0_new,x1_new,y0_new,y1_new
slc = slice(y0,y1), slice(x0,x1)
if readsky:
sky = self.read_sky_model(slc=slc, primhdr=primhdr, imghdr=imghdr,
old_calibs_ok=old_calibs_ok,
template_meta=template_meta)
else:
from tractor.sky import ConstantSky
sky = ConstantSky(0.)
skymod = np.zeros_like(img)
sky.addTo(skymod)
midsky = np.median(skymod)
orig_sky = sky
if subsky:
from tractor.sky import ConstantSky
debug('Instantiating and subtracting sky model')
if pixels:
img -= skymod
zsky = ConstantSky(0.)
zsky.version = getattr(sky, 'version', '')
zsky.plver = getattr(sky, 'plver', '')
del skymod
sky = zsky
del zsky
orig_zpscale = zpscale = NanoMaggies.zeropointToScale(self.ccdzpt)
if nanomaggies:
# Scale images to Nanomaggies
img /= zpscale
invvar = invvar * zpscale**2
if not subsky:
sky.scale(1./zpscale)
zpscale = 1.
if constant_invvar:
assert(nanomaggies)
invvar[invvar > 0] = 1./self.sig1**2
if apodize and slc is not None:
sy,sx = slc
y0,y1 = sy.start, sy.stop
x0,x1 = sx.start, sx.stop
H,W = invvar.shape
# Compute apodization ramps -- separately for x and y to
# handle narrow images
xx = np.linspace(-np.pi, np.pi, min(W,100))
rampx = np.arctan(xx)
rampx = (rampx - rampx.min()) / (rampx.max() - rampx.min())
xx = np.linspace(-np.pi, np.pi, min(H,100))
rampy = np.arctan(xx)
rampy = (rampy - rampy.min()) / (rampy.max() - rampy.min())
# bottom
invvar[:len(rampy),:] *= rampy[:,np.newaxis]
# left
invvar[:,:len(rampx)] *= rampx[np.newaxis,:]
# top
invvar[-len(rampy):,:] *= rampy[::-1][:,np.newaxis]
# right
invvar[:,-len(rampx):] *= rampx[::-1][np.newaxis,:]
if False:
import pylab as plt
plt.clf()
plt.imshow(invvar, interpolation='nearest', origin='lower')
plt.savefig('apodized-%i-%s.png' % (self.expnum, self.ccdname))
if subsky:
# Warn if the subtracted sky doesn't seem to work well
# (can happen, eg, if sky calibration product is inconsistent with
# the data)
imgmed = np.median(img[invvar>0])
if np.abs(imgmed) > self.sig1:
warnings.warn('image median is %.2f sigma away from zero!' % (imgmed / self.sig1))
if subsky:
self.apply_amp_correction(img, invvar, x0, y0)
# Convert MJD-OBS, in UTC, into TAI
mjd_tai = astropy.time.Time(self.mjdobs, format='mjd', scale='utc').tai.mjd
tai = TAITime(None, mjd=mjd_tai)
# tractor WCS object
twcs = self.get_tractor_wcs(wcs, x0, y0, primhdr=primhdr, imghdr=imghdr,
tai=tai)
psf = self.read_psf_model(x0, y0, gaussPsf=gaussPsf, pixPsf=pixPsf,
hybridPsf=hybridPsf, normalizePsf=normalizePsf,
psf_sigma=psf_sigma,
w=x1 - x0, h=y1 - y0,
old_calibs_ok=old_calibs_ok)
tim = Image(img, invvar=invvar, wcs=twcs, psf=psf,
photocal=LinearPhotoCal(zpscale, band=band),
sky=sky, name=self.name + ' ' + band)
assert(np.all(np.isfinite(tim.getInvError())))
tim.band = band
# HACK -- create a local PSF model to instantiate the PsfEx
# model, which handles non-unit pixel scaling.
fullpsf = tim.psf
th,tw = tim.shape
tim.psf = fullpsf.constantPsfAt(tw//2, th//2)
tim.psfnorm = self.psf_norm(tim)
# Galaxy-detection norm
tim.galnorm = self.galaxy_norm(tim)
#print('Galnorm:', tim.galnorm)
if not (np.isfinite(tim.psfnorm) and
|
np.isfinite(tim.galnorm)
|
numpy.isfinite
|
# ---------------------------------------------------------- #
# ---------------- makeClassificationStats.py -------------- #
# --------- https://github.com/jhoormann/RMCodeDump -------- #
# ---------------------------------------------------------- #
# Here I will create a table with various stats describing #
# an AGN, variation, magnitudes, SNR ratio for various bands #
# and emission lines. Spectral statistics are calculated #
# for each exposure, before calibration. #
# ---------------------------------------------------------- #
import numpy as np
import pandas as pd
import OzDES_Calculation as ozcalc
import matplotlib.cm as cm
import matplotlib.pyplot as plt
title_font = {'size':'12', 'color':'black', 'weight':'normal', 'verticalalignment':'bottom'}
axis_font = {'size':'12'}
# Main direction where to find data
dataPath = "../OzDES_Data/"
# File listing the AGN ID's
sources = pd.read_table(dataPath + "RM_IDs.txt", delim_whitespace=True)
nsources = len(sources)
randAGN = nsources*
|
np.random.rand(100)
|
numpy.random.rand
|
import numpy as np
import sys
import random
import os
import time
import argparse
import glob
import matplotlib.pyplot as plt
try:
from mayavi import mlab as mayalab
except:
pass
np.random.seed(2)
# from contact_point_dataset_torch_multi_label import MyDataset
from hang_dataset import MyDataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
from data_helper import *
from coord_helper import *
from rotation_lib import *
from bullet_helper import *
from s2_utils import *
import pybullet as p
K = 128
def plot_corr(pc_o, pc_h, pose_transl, pose_quat, cp_top_k_idx_o, cp_top_k_idx_h, corr, aa=False):
corr = np.reshape(corr, (K, K))
pc_o_transformed = transform_pc(pc_o, pose_transl, pose_quat, aa=aa)
# plot_pc(pc_h)
# plot_pc(pc_o_transformed)
top_k_corr, top_k_corr_idx = top_k_np(corr, 512, sort=True)
top_k_corr_idx_o = top_k_corr_idx[:, 0]
top_k_corr_idx_h = top_k_corr_idx[:, 1]
# print('top k corr mean', np.mean(top_k_corr), np.max(top_k_corr), np.min(top_k_corr))
# plot_pc_s(pc_o_transformed[cp_top_k_idx_o][top_k_corr_idx_o], top_k_corr)
# plot_pc_s(pc_h[cp_top_k_idx_h][top_k_corr_idx_h], top_k_corr)
# mayalab.show()
plot_pc(pc_h)
plot_pc(pc_o_transformed)
partial_pc_o = pc_o_transformed[cp_top_k_idx_o][top_k_corr_idx_o[:3]]
partial_pc_h = pc_h[cp_top_k_idx_h][top_k_corr_idx_h[:3]]
plot_pc(partial_pc_o, color=(0, 1, 0), scale=0.002)
plot_pc(partial_pc_h, color=(0, 0, 1), scale=0.002)
mayalab.show()
rotation_center = np.mean(partial_pc_h - partial_pc_o, axis=0)
# plot_pc(pc_h)
# plot_pc(pc_o_transformed + rotation_center[np.newaxis, :])
# plot_pc(partial_pc_o + rotation_center[np.newaxis, :], color=(0, 1, 0), scale=0.002)
# plot_pc(partial_pc_h, color=(0, 0, 1), scale=0.002)
# mayalab.show()
# plot_pc(pc_o_transformed[cp_top_k_idx_o][top_k_np(corr[:, 0], 5)[1]], scale=0.002, color=(1, 0, 0))
return rotation_center[:3]
def print_helper(a):
return '{} {} {}'.format(np.mean(a), np.max(a), np.min(a), np.std(a))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument("--exp_name", default="")
parser.add_argument("--eval_epoch", type=int, default=-1)
parser.add_argument("--eval_ct", type=int, default=-1)
parser.add_argument('--test_list', default='test_list')
parser.add_argument('--n_gt_sample', type=int, default=128)
parser.add_argument('--restrict_object_cat', default='')
args = parser.parse_args()
assert (args.eval_ct != -1) or (args.eval_epoch != -1)
data_dir = os.path.join(args.home_dir_data, 'geo_data')
hook_dict, object_dict = load_all_hooks_objects(data_dir, ret_dict=True)
runs_dir = 'runs/exp_s2b'
p_env = p_Env(args.home_dir_data, gui=True, physics=False)
for i, run_folder_dir in enumerate(glob.glob('{}/*{}'.format(runs_dir, args.exp_name))):
# assert i == 0, run_folder_dir
result_folder = run_folder_dir
if args.eval_ct != -1:
eval_file_dir_arr = glob.glob('{}/eval/*_ct_{}.json'.format(run_folder_dir, args.eval_ct))
elif args.eval_epoch != -1:
eval_file_dir_arr = glob.glob('{}/eval/*eval_epoch_{}_ct_*.json'.format(run_folder_dir, args.eval_epoch))
assert len(eval_file_dir_arr) == 1, eval_file_dir_arr
eval_file_dir = eval_file_dir_arr[0]
eval_result_dict = load_json(eval_file_dir)
for result_file_name in eval_result_dict:
for i, one_result in enumerate(eval_result_dict[result_file_name]):
print(result_file_name, i)
pc_o = np.array(one_result['pc_o'])
pc_h = np.array(one_result['pc_h'])
gt_cp_score_o = np.array(one_result['gt_cp_score_o'])
gt_cp_score_h = np.array(one_result['gt_cp_score_h'])
pred_cp_score_o = np.array(one_result['pred_cp_score_o'])
pred_cp_score_h = np.array(one_result['pred_cp_score_h'])
pred_cp_top_k_idx_o = np.array(one_result['pred_cp_top_k_idx_o'])
pred_cp_top_k_idx_h = np.array(one_result['pred_cp_top_k_idx_h'])
loss_ce = one_result['loss_ce']
loss_listnet = one_result['loss_listnet']
if 'gt_cp_map_per_o' in one_result:
gt_cp_map_per_o = np.array(one_result['gt_cp_map_per_o'])
gt_cp_map_per_h = np.array(one_result['gt_cp_map_per_h'])
_, gt_cp_top_k_idx_o = top_k_np(gt_cp_score_o, k=128)
_, gt_cp_top_k_idx_h = top_k_np(gt_cp_score_h, k=128)
gt_gt_cp_corr = create_gt_cp_corr_preload_discretize(gt_cp_map_per_o[np.newaxis, :], gt_cp_map_per_h[np.newaxis, :], gt_cp_top_k_idx_o[np.newaxis, :], gt_cp_top_k_idx_h[np.newaxis, :], n_gt_sample=args.n_gt_sample)
gt_gt_cp_corr = gt_gt_cp_corr[0]
gt_cp_corr = np.array(one_result['gt_cp_corr'])
pred_cp_corr = np.array(one_result['pred_cp_corr'])
pred_cp_corr_top_k_idx = np.array(one_result['pred_cp_corr_top_k_idx'])
gt_pose = np.array(one_result['gt_pose'])
pred_transl = np.array(one_result['pred_transl'])
pred_aa = np.array(one_result['pred_aa'])
# p_env.load_pair_w_pose(result_file_name, gt_pose[:3], gt_pose[3:], aa=True)
p_env.load_pair_w_pose(result_file_name, pred_transl, pred_aa, aa=True)
flag = input('in')
if flag == 's':
continue
print('gt cp o {}'.format(print_helper(gt_cp_score_o)))
print('gt cp h {}'.format(print_helper(gt_cp_score_h)))
print('pred cp o {}'.format(print_helper(pred_cp_score_o)))
print('pred cp h {}'.format(print_helper(pred_cp_score_h)))
print('loss o', one_result['loss_o'], 'loss h', one_result['loss_h'])
# print('calc o', np.mean(np.abs(pred_cp_score_o - gt_cp_score_o)**2), np.mean(np.abs(pred_cp_score_o/ np.max(pred_cp_score_o) - gt_cp_score_o)**2))
# print('calc h', np.mean(np.abs(pred_cp_score_h - gt_cp_score_h)**2), np.mean(np.abs(pred_cp_score_h/ np.max(pred_cp_score_h) - gt_cp_score_h)**2))
if flag != 'corr':
# plot_pc_s(pc_o, gt_cp_score_o)
# mayalab.show()
plot_pc_s(pc_o, pred_cp_score_o, abs=False)
mayalab.show()
plot_pc(pc_o)
plot_pc(pc_o[pred_cp_top_k_idx_o], color=[1, 0, 0])
# plot_pc_s(pc_o, pred_cp_score_o, abs=False)
mayalab.show()
# plot_pc_s(pc_h, gt_cp_score_h)
# mayalab.show()
plot_pc_s(pc_h, pred_cp_score_h, abs=False)
mayalab.show()
#plot top k on hook
plot_pc(pc_h)
plot_pc(pc_h[pred_cp_top_k_idx_h], color=[1, 0, 0])
# plot_pc_s(pc_h, pred_cp_score_h, abs=False)
mayalab.show()
#plot correspondence
print('gt cp corr',
|
np.mean(gt_cp_corr)
|
numpy.mean
|
import warnings
import numpy as np
import tensorflow.compat.v1 as tf
from cascor.units.unit import CCNNUnit
from cascor._settings import dtype
class BayesianLinear(CCNNUnit):
def __init__(self, alpha=0.01, beta=100.0):
self.alpha = alpha
self.beta = beta
def build(self, num_inputs, num_outputs, num_targets):
if num_outputs != num_targets:
raise Exception('num. outputs != num. targets: cannot use bayesian linear regression as candidate!')
# initialize Gaussian prior on the weights
self.mean, self.cov, self.cov_inv = [], [], []
for each_num_input in num_inputs:
self.mean.append(np.zeros((each_num_input + 1, num_outputs)))
self.cov.append(np.eye(each_num_input + 1) / self.alpha)
self.cov_inv.append(np.linalg.inv(self.cov[-1]))
def evaluate_losses(self, X, y):
losses = []
for mean, x in zip(self.mean, X):
Phi = np.column_stack([x, np.ones((x.shape[0], 1))])
loss =
|
np.mean((Phi @ mean - y) ** 2)
|
numpy.mean
|
import numpy as np
import pytest
import autogalaxy as ag
from autogalaxy import exc
from autogalaxy.mock import mock
def test_via_signal_to_noise_map(dataset_quantity_7x7_array_2d, sub_mask_2d_7x7):
data = ag.Array2D.manual_native(array=[[1.0, 2.0], [3.0, 4.0]], pixel_scales=1.0)
signal_to_noise_map = ag.Array2D.manual_native(
array=[[1.0, 5.0], [15.0, 40.0]], pixel_scales=1.0
)
dataset_quantity = ag.DatasetQuantity.via_signal_to_noise_map(
data=data, signal_to_noise_map=signal_to_noise_map
)
assert dataset_quantity.signal_to_noise_map == pytest.approx(
signal_to_noise_map, 1.0e-4
)
assert dataset_quantity.noise_map.native == pytest.approx(
np.array([[1.0, 0.4], [0.2, 0.1]]), 1.0e-4
)
data = ag.VectorYX2D.manual_native(
vectors=[[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]]], pixel_scales=1.0
)
signal_to_noise_map = ag.Array2D.manual_native(
array=[[1.0, 5.0], [15.0, 40.0]], pixel_scales=1.0
)
dataset_quantity = ag.DatasetQuantity.via_signal_to_noise_map(
data=data, signal_to_noise_map=signal_to_noise_map
)
assert dataset_quantity.signal_to_noise_map == pytest.approx(
np.array([[1.0, 1.0], [5.0, 5.0], [15.0, 15.0], [40.0, 40.0]]), 1.0e-4
)
assert dataset_quantity.noise_map.native == pytest.approx(
np.array([[[1.0, 1.0], [0.4, 0.4]], [[0.2, 0.2], [0.1, 0.1]]]), 1.0e-4
)
def test__apply_mask__masks_dataset(
dataset_quantity_7x7_array_2d, dataset_quantity_7x7_vector_yx_2d, sub_mask_2d_7x7
):
dataset_quantity_7x7 = dataset_quantity_7x7_array_2d.apply_mask(
mask=sub_mask_2d_7x7
)
assert (dataset_quantity_7x7.data.slim == np.ones(9)).all()
assert (
dataset_quantity_7x7.data.native == np.ones((7, 7)) * np.invert(sub_mask_2d_7x7)
).all()
assert (dataset_quantity_7x7.noise_map.slim == 2.0 * np.ones(9)).all()
assert (
dataset_quantity_7x7.noise_map.native
== 2.0 * np.ones((7, 7)) * np.invert(sub_mask_2d_7x7)
).all()
dataset_quantity_7x7 = dataset_quantity_7x7_vector_yx_2d.apply_mask(
mask=sub_mask_2d_7x7
)
assert (dataset_quantity_7x7.data.slim == np.ones((9, 2))).all()
assert (dataset_quantity_7x7.noise_map.slim == 2.0 * np.ones((9, 2))).all()
def test__grid(
dataset_quantity_7x7_array_2d,
sub_mask_2d_7x7,
grid_2d_7x7,
sub_grid_2d_7x7,
blurring_grid_2d_7x7,
grid_2d_iterate_7x7,
):
masked_imaging_7x7 = dataset_quantity_7x7_array_2d.apply_mask(mask=sub_mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=ag.SettingsImaging(grid_class=ag.Grid2D, sub_size=2)
)
assert isinstance(masked_imaging_7x7.grid, ag.Grid2D)
assert (masked_imaging_7x7.grid.binned == grid_2d_7x7).all()
assert (masked_imaging_7x7.grid.slim == sub_grid_2d_7x7).all()
masked_imaging_7x7 = dataset_quantity_7x7_array_2d.apply_mask(mask=sub_mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=ag.SettingsImaging(grid_class=ag.Grid2DIterate)
)
assert isinstance(masked_imaging_7x7.grid, ag.Grid2DIterate)
assert (masked_imaging_7x7.grid.binned == grid_2d_iterate_7x7).all()
def test__vector_data__y_x():
data = ag.VectorYX2D.manual_native(
vectors=[[[1.0, 5.0], [2.0, 6.0]], [[3.0, 7.0], [4.0, 8.0]]],
pixel_scales=1.0,
sub_size=1,
)
noise_map = ag.VectorYX2D.manual_native(
vectors=[[[1.1, 5.1], [2.1, 6.1]], [[3.1, 7.1], [4.1, 8.1]]],
pixel_scales=1.0,
sub_size=1,
)
dataset_quantity = ag.DatasetQuantity(data=data, noise_map=noise_map)
assert isinstance(dataset_quantity.y, ag.DatasetQuantity)
assert (dataset_quantity.y.data.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (dataset_quantity.y.noise_map.slim == np.array([1.1, 2.1, 3.1, 4.1])).all()
assert isinstance(dataset_quantity.y, ag.DatasetQuantity)
assert (dataset_quantity.x.data.slim ==
|
np.array([5.0, 6.0, 7.0, 8.0])
|
numpy.array
|
from __future__ import print_function, division
import skimage as sk
import skimage.io as skio
from scipy.signal import *
import cv2
import scipy
import torch.nn as nn
import imgaug
import imageio
import imgaug as ia
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
import os
import torch
import pandas as pd
from skimage import io, transform
import matplotlib.pyplot as plt
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import numpy as np
from skimage.color import rgb2gray
import pandas as pd
from PIL import Image
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# GLOBAL PARAMETERS
meta = torch.load('mean-std.pt')
res = 300
class SkinData(Dataset):
def __init__(self, root_dir, data, transform=None, mode='train'):
self.root_dir = root_dir
self.data = pd.read_csv(data)
self.transform = transform
self.mode = mode
def __len__(self):
self.length = len(self.data)
return self.length
def __getitem__(self, idx):
"""
Obtains an 'image' and 'target' as a tuple
##INPUT##
idx: (int) item id
##OUTPUT##
image: (tensor) image after transformations
target: (dictionary) contains the targets
- 'bbox': (list) [xmin, ymin, xmax, ymax]
- 'labels': (tensor) (7,) One Hot Encoded Vector for the Disease Diagnoses
- 'area': (float/int) area of bounding box
- 'id': idx
"""
datapoint = self.data.iloc[idx]
image = Image.open(datapoint['path'])
target = {}
minx = datapoint['x']
miny = datapoint['y']
w = datapoint['w']
h = datapoint['h']
maxx = minx + w
maxy = miny + h
target['area'] = torch.tensor([w * h])
target['labels'] = torch.tensor(datapoint[-7:])
target['boxes'] = torch.tensor([minx, miny, maxx, maxy])
target["image_id"] = torch.tensor([idx])
if self.transform is not None:
image, target = self.transform((
|
np.array(image)
|
numpy.array
|
""" Unit test for the SqliteRecorder. """
import errno
import os
from shutil import rmtree
from tempfile import mkdtemp
import time
import numpy as np
from sqlitedict import SqliteDict
from openmdao.core.problem import Problem
from openmdao.core.group import Group
from openmdao.core.parallel_group import ParallelGroup
from openmdao.core.component import Component
from openmdao.core.mpi_wrap import MPI
from openmdao.components.indep_var_comp import IndepVarComp
from openmdao.recorders.sqlite_recorder import SqliteRecorder
from openmdao.recorders.test.test_sqlite import _assertMetadataRecorded, _assertIterationDataRecorded
from openmdao.test.mpi_util import MPITestCase
if MPI:
from openmdao.core.petsc_impl import PetscImpl as impl
coordinate = [MPI.COMM_WORLD.rank, 'Driver', (1, )]
else:
from openmdao.core.basic_impl import BasicImpl as impl
coordinate = [0, 'Driver', (1, )]
class ABCDArrayComp(Component):
def __init__(self, arr_size=9, delay=0.01):
super(ABCDArrayComp, self).__init__()
self.add_param('a', np.ones(arr_size, float))
self.add_param('b', np.ones(arr_size, float))
self.add_param('in_string', '')
self.add_param('in_list', [])
self.add_output('c', np.ones(arr_size, float))
self.add_output('d', np.ones(arr_size, float))
self.add_output('out_string', '')
self.add_output('out_list', [])
self.delay = delay
def solve_nonlinear(self, params, unknowns, resids):
time.sleep(self.delay)
unknowns['c'] = params['a'] + params['b']
unknowns['d'] = params['a'] - params['b']
unknowns['out_string'] = params['in_string'] + '_' + self.name
unknowns['out_list'] = params['in_list'] + [1.5]
def run(problem):
t0 = time.time()
problem.run()
t1 = time.time()
return t0, t1
class TestSqliteRecorder(MPITestCase):
filename = ""
dir = ""
N_PROCS = 2
def setUp(self):
self.dir = mkdtemp()
self.filename = os.path.join(self.dir, "sqlite_test")
self.tablename_metadata = 'metadata'
self.tablename_iterations = 'iterations'
self.recorder = SqliteRecorder(self.filename)
self.recorder.options['record_metadata'] = False
self.eps = 1e-5
def tearDown(self):
try:
rmtree(self.dir)
except OSError as e:
# If directory already deleted, keep going
if e.errno not in (errno.ENOENT, errno.EACCES, errno.EPERM):
raise e
def assertMetadataRecorded(self, expected):
if self.comm.rank != 0:
return
db = SqliteDict(self.filename, self.tablename_metadata)
_assertMetadataRecorded(self, db, expected)
db.close()
def assertIterationDataRecorded(self, expected, tolerance, root):
if self.comm.rank != 0:
return
db = SqliteDict(self.filename, self.tablename_iterations)
_assertIterationDataRecorded(self, db, expected, tolerance)
db.close()
def test_basic(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("G1.P1.x", np.array([1.0, 1.0, 1.0])),
("G1.P2.x", np.array([2.0, 2.0, 2.0])),
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
("C1.out_string", "_C1"),
("C1.out_list", [1.5]),
]
expected_resids = [
("G1.P1.x", np.array([0.0, 0.0, 0.0])),
("G1.P2.x", np.array([0.0, 0.0, 0.0])),
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
("C1.out_string", ""),
("C1.out_list", []),
]
self.assertIterationDataRecorded(((coordinate, (t0, t1),
expected_params, expected_unknowns,
expected_resids),),
self.eps, prob.root)
def test_includes(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
self.recorder.options['includes'] = ['C1.*']
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
("C1.out_string", "_C1"),
("C1.out_list", [1.5]),
]
expected_resids = [
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
("C1.out_string", ""),
("C1.out_list", []),
]
self.assertIterationDataRecorded(((coordinate, (t0, t1), expected_params, expected_unknowns, expected_resids),), self.eps, prob.root)
def test_includes_and_excludes(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['includes'] = ['C1.*']
self.recorder.options['excludes'] = ['*.out*']
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
]
expected_resids = [
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
]
self.assertIterationDataRecorded(((coordinate, (t0, t1), expected_params, expected_unknowns, expected_resids),), self.eps, prob.root)
def test_solver_record(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.root.nl_solver.add_recorder(self.recorder)
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
if MPI:
coord = [MPI.COMM_WORLD.rank, 'Driver', (1, ), "root", (1,)]
else:
coord = [0, 'Driver', (1, ), "root", (1,)]
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("G1.P1.x", np.array([1.0, 1.0, 1.0])),
("G1.P2.x", np.array([2.0, 2.0, 2.0])),
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
("C1.out_string", "_C1"),
("C1.out_list", [1.5]),
]
expected_resids = [
("G1.P1.x", np.array([0.0, 0.0, 0.0])),
("G1.P2.x", np.array([0.0, 0.0, 0.0])),
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
("C1.out_string", ""),
("C1.out_list", []),
]
self.assertIterationDataRecorded(((coord, (t0, t1), expected_params, expected_unknowns, expected_resids),), self.eps, prob.root)
def test_driver_records_metadata(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x',
|
np.ones(size, float)
|
numpy.ones
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
import mars.tensor as mt
import mars.dataframe as md
from mars.executor import register, Executor
from mars.tensor.core import TensorOrder
from mars.tensor.datasource import ArrayDataSource
from mars.tiles import get_tiled
from mars.session import new_session, Session
class Test(unittest.TestCase):
def setUp(self):
new_session().as_default()
def testSessionExecute(self):
a = mt.random.rand(10, 20)
res = a.sum().to_numpy()
self.assertTrue(np.isscalar(res))
self.assertLess(res, 200)
def testSessionAsyncExecute(self):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
expected = raw_a.sum()
res = a.sum().to_numpy(wait=False).result()
self.assertEqual(expected, res)
res = a.sum().execute(wait=False)
res = res.result().fetch()
self.assertEqual(expected, res)
raw_df = pd.DataFrame(raw_a)
expected = raw_df.sum()
df = md.DataFrame(a)
res = df.sum().to_pandas(wait=False).result()
pd.testing.assert_series_equal(expected, res)
res = df.sum().execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(expected, res)
t = [df.sum(), a.sum()]
res = mt.ExecutableTuple(t).to_object(wait=False).result()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
self.assertEqual(raw_a.sum(), res[1])
res = mt.ExecutableTuple(t).execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
self.assertEqual(raw_a.sum(), res[1])
def testMultipleOutputExecute(self):
data = np.random.random((5, 9))
# test multiple outputs
arr1 = mt.tensor(data.copy(), chunk_size=3)
result = mt.modf(arr1).execute().fetch()
expected = np.modf(data)
np.testing.assert_array_equal(result[0], expected[0])
np.testing.assert_array_equal(result[1], expected[1])
# test 1 output
arr2 = mt.tensor(data.copy(), chunk_size=3)
result = ((arr2 + 1) * 2).to_numpy()
expected = (data + 1) * 2
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
arr3 = mt.tensor(data.copy(), chunk_size=3)
arrs = mt.split(arr3, 3, axis=1)
result = arrs[0].to_numpy()
expected = np.split(data, 3, axis=1)[0]
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
data = np.random.randint(0, 10, (5, 5))
arr3 = (mt.tensor(data) + 1) * 2
arrs = mt.linalg.qr(arr3)
result = (arrs[0] + 1).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 1
np.testing.assert_array_almost_equal(result, expected)
result = (arrs[0] + 2).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 2
np.testing.assert_array_almost_equal(result, expected)
s = mt.shape(0)
result = s.execute().fetch()
expected = np.shape(0)
self.assertEqual(result, expected)
def testReExecuteSame(self):
data = np.random.random((5, 9))
# test run the same tensor
arr4 = mt.tensor(data.copy(), chunk_size=3) + 1
result1 = arr4.to_numpy()
expected = data + 1
np.testing.assert_array_equal(result1, expected)
result2 = arr4.to_numpy()
np.testing.assert_array_equal(result1, result2)
# test run the same tensor with single chunk
arr4 = mt.tensor(data.copy())
result1 = arr4.to_numpy()
expected = data
np.testing.assert_array_equal(result1, expected)
result2 = arr4.to_numpy()
np.testing.assert_array_equal(result1, result2)
# modify result
sess = Session.default_or_local()
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr4).chunks[0].key] = data + 2
result3 = arr4.to_numpy()
np.testing.assert_array_equal(result3, data + 2)
# test run same key tensor
arr5 = mt.ones((10, 10), chunk_size=3)
result1 = arr5.to_numpy()
del arr5
arr6 = mt.ones((10, 10), chunk_size=3)
result2 = arr6.to_numpy()
np.testing.assert_array_equal(result1, result2)
# test copy, make sure it will not let the execution cache missed
df = md.DataFrame(mt.ones((10, 3), chunk_size=5))
executed = [False]
def add_one(x):
if executed[0]: # pragma: no cover
raise ValueError('executed before')
return x + 1
df2 = df.apply(add_one)
pd.testing.assert_frame_equal(df2.to_pandas(), pd.DataFrame(np.ones((10, 3)) + 1))
executed[0] = True
df3 = df2.copy()
df4 = df3 * 2
pd.testing.assert_frame_equal(df4.to_pandas(), pd.DataFrame(np.ones((10, 3)) * 4))
def testExecuteBothExecutedAndNot(self):
data = np.random.random((5, 9))
arr1 = mt.tensor(data, chunk_size=4) * 2
arr2 = mt.tensor(data) + 1
np.testing.assert_array_equal(arr2.to_numpy(), data + 1)
# modify result
sess = Session.default_or_local()
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr2).chunks[0].key] = data + 2
results = sess.run(arr1, arr2)
np.testing.assert_array_equal(results[0], data * 2)
np.testing.assert_array_equal(results[1], data + 2)
def testTensorExecuteNotFetch(self):
data = np.random.random((5, 9))
sess = Session.default_or_local()
arr1 = mt.tensor(data, chunk_size=2) * 2
with self.assertRaises(ValueError):
sess.fetch(arr1)
self.assertIs(arr1.execute(), arr1)
# modify result
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr1).chunks[0].key] = data[:2, :2] * 3
expected = data * 2
expected[:2, :2] = data[:2, :2] * 3
np.testing.assert_array_equal(arr1.to_numpy(), expected)
def testDataFrameExecuteNotFetch(self):
data1 = pd.DataFrame(np.random.random((5, 4)), columns=list('abcd'))
sess = Session.default_or_local()
df1 = md.DataFrame(data1, chunk_size=2)
with self.assertRaises(ValueError):
sess.fetch(df1)
self.assertIs(df1.execute(), df1)
self.assertEqual(len(df1[df1['a'] > 1].to_pandas(fetch_kwargs={'batch_size': 2})), 0)
self.assertEqual(len(df1[df1['a'] > 1]['a'].to_pandas(fetch_kwargs={'batch_size': 2})), 0)
# modify result
executor = sess._sess._executor
executor.chunk_result[get_tiled(df1).chunks[0].key] = data1.iloc[:2, :2] * 3
expected = data1
expected.iloc[:2, :2] = data1.iloc[:2, :2] * 3
pd.testing.assert_frame_equal(df1.to_pandas(), expected)
pd.testing.assert_frame_equal(df1.to_pandas(fetch_kwargs={'batch_size': 2}), expected)
def testClosedSession(self):
session = new_session()
arr = mt.ones((10, 10))
result = session.run(arr)
np.testing.assert_array_equal(result, np.ones((10, 10)))
session.close()
with self.assertRaises(RuntimeError):
session.run(arr)
with self.assertRaises(RuntimeError):
session.run(arr + 1)
def testBoolIndexing(self):
arr = mt.random.rand(10, 10, chunk_size=5)
arr[3:8, 3:8] = mt.ones((5, 5))
arr2 = arr[arr == 1]
self.assertEqual(arr2.shape, (np.nan,))
arr2.execute()
self.assertEqual(arr2.shape, (25,))
arr3 = arr2.reshape((5, 5))
expected = np.ones((5, 5))
np.testing.assert_array_equal(arr3.to_numpy(), expected)
def testArrayProtocol(self):
arr = mt.ones((10, 20))
result = np.asarray(arr)
np.testing.assert_array_equal(result, np.ones((10, 20)))
arr2 = mt.ones((10, 20))
result = np.asarray(arr2, mt.bool_)
np.testing.assert_array_equal(result, np.ones((10, 20), dtype=np.bool_))
arr3 = mt.ones((10, 20)).sum()
result = np.asarray(arr3)
np.testing.assert_array_equal(result, np.asarray(200))
arr4 = mt.ones((10, 20)).sum()
result = np.asarray(arr4, dtype=np.float_)
np.testing.assert_array_equal(result, np.asarray(200, dtype=np.float_))
def testRandomExecuteInSessions(self):
arr = mt.random.rand(20, 20)
sess1 = new_session()
res1 = sess1.run(arr)
sess2 = new_session()
res2 = sess2.run(arr)
np.testing.assert_array_equal(res1, res2)
def testFetch(self):
sess = new_session()
arr1 = mt.ones((10, 5), chunk_size=3)
r1 = sess.run(arr1)
r2 = sess.run(arr1)
np.testing.assert_array_equal(r1, r2)
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr1).chunks[0].key] = np.ones((3, 3)) * 2
r3 = sess.run(arr1 + 1)
np.testing.assert_array_equal(r3[:3, :3], np.ones((3, 3)) * 3)
# rerun to ensure arr1's chunk results still exist
r4 = sess.run(arr1 + 1)
np.testing.assert_array_equal(r4[:3, :3], np.ones((3, 3)) * 3)
arr2 = mt.ones((10, 5), chunk_size=3)
r5 = sess.run(arr2)
np.testing.assert_array_equal(r5[:3, :3], np.ones((3, 3)) * 2)
r6 = sess.run(arr2 + 1)
np.testing.assert_array_equal(r6[:3, :3], np.ones((3, 3)) * 3)
df = md.DataFrame(np.random.rand(10, 2), columns=list('ab'))
s = df['a'].map(lambda x: np.ones((3, 3)), dtype='object').sum()
np.testing.assert_array_equal(s.execute().fetch(), np.ones((3, 3)) * 10)
# test fetch multiple tensors
raw = np.random.rand(5, 10)
arr1 = mt.ones((5, 10), chunk_size=5)
arr2 = mt.tensor(raw, chunk_size=3)
arr3 = mt.sum(arr2)
sess.run(arr1, arr2, arr3)
fetch1, fetch2, fetch3 = sess.fetch(arr1, arr2, arr3)
np.testing.assert_array_equal(fetch1, np.ones((5, 10)))
np.testing.assert_array_equal(fetch2, raw)
np.testing.assert_almost_equal(fetch3, raw.sum())
fetch1, fetch2, fetch3 = sess.fetch([arr1, arr2, arr3])
np.testing.assert_array_equal(fetch1, np.ones((5, 10)))
|
np.testing.assert_array_equal(fetch2, raw)
|
numpy.testing.assert_array_equal
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def test_jagged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = np.array([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_jagged_shape_object(self):
# The jagged dimension of a list is turned into an object array
a = np.array([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2], [3, 3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
|
assert_equal(a == b, [[True, True, False], [False, False, True]])
|
numpy.testing.assert_equal
|
import datetime as dt
import gc
import json
import logging
import os
import pickle
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
import h5py
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyproj
import rasterio as rio
import simplekml
from cataloging.vi import gliImage, ngrdiImage, osaviImage
from fluidml.common import Task
#from PIL import Image
from pycpd import RigidRegistration
from pykml import parser
from rasterio.enums import Resampling
from rasterio.transform import rowcol, xy
from rasterio.windows import Window
from scipy.ndimage import distance_transform_edt
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
#from skimage.exposure import equalize_adapthist
from skimage.feature import peak_local_max
from skimage.filters import gaussian, threshold_otsu
from skimage.measure import label, regionprops
from skimage.segmentation import watershed
from skimage.transform import hough_line, hough_line_peaks, resize
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger(__name__)
# suppress pickle 'error' from rasterio
logging.Logger.manager.loggerDict['rasterio'].setLevel(logging.CRITICAL)
logging.Logger.manager.loggerDict['matplotlib'].setLevel(logging.CRITICAL)
import warnings
warnings.filterwarnings("ignore")
mpl.use('Agg')
def read_raster(
image_path: str,
all_channels: np.array,
channels: List[str]
):
ch = [np.argmax(all_channels == c)+1 for c in channels]
raster = rio.open(image_path)
if raster.dtypes[0] == "float32":
data = raster.read(ch, fill_value=np.nan)
data /= np.nanmax(data)
elif raster.dtypes[0] == "uint8":
if "alpha" in all_channels:
data = raster.read(ch).astype(np.float32)
alpha_ch = raster.read(int(np.argmax(all_channels == "alpha")+1))
for d in data[:,:]:
d[alpha_ch == 0] = np.nan
else:
data = raster.read(ch, fill_value=0).astype(np.float32)
else:
raise NotImplementedError()
return np.transpose(data, axes=(1,2,0))
def write_onechannel_raster(
image_path: str,
image: np.array,
meta: Dict, dtype: str
):
if dtype == 'float32':
meta.update({
'dtype': 'float32',
'height': image.shape[0],'count': 1,'nodata': -32767,
'width': image.shape[1]})
elif dtype == 'uint8':
meta.update({
'dtype': 'uint8',
'height': image.shape[0],'count': 1,'nodata': 0,
'width': image.shape[1]})
else:
raise NotImplementedError()
with rio.open(image_path, "w", **meta) as dest:
dest.write(image,1)
def calc_m_per_px(
raster_meta: Dict
) -> float:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of anchor point
lon0, lat0 = xy(raster_meta["transform"],0,0)
# calculate UTM zone
utm_zone = int(np.floor((lon0/360)*60+31))
utm = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
UTM0_x, UTM0_y = utm(*xy(raster_meta["transform"],0,0))
UTM1_x, UTM1_y = utm(*xy(raster_meta["transform"],0,1))
UTM2_x, UTM2_y = utm(*xy(raster_meta["transform"],1,0))
# calculate unit pixel distances
pxx = abs(UTM1_x - UTM0_x)
pxy = abs(UTM2_y - UTM0_y)
# take mean (assume quadratic pixels)
m_per_px = np.mean([pxx, pxy])
return m_per_px
def px_to_utm(
point_cloud: np.ndarray,
raster_meta: Dict
) -> Tuple[np.ndarray, pyproj.proj.Proj]:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of point cloud
lon, lat = np.asarray(xy(raster_meta["transform"],*point_cloud.T))
# calculate UTM zone
utm_zone = int(np.floor((lon.mean()/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
utm = np.asarray(utm_transform(lon, lat)).T
return utm, utm_transform
def readCoordsFromKml(
filename: str
) -> np.ndarray:
with open(filename, "r") as kmlfile:
root = parser.parse(kmlfile).getroot()
lonlat = []
for c in root.Document.iterchildren():
lonlat.append([float(x) for x in c.Point.coordinates.text.split(",")[:2]])
lonlat = np.asarray(lonlat)
return lonlat
def growFunction(
x: float,
g: float,
lg: float,
xg: float,
d: float,
ld: float,
xd: float
) -> float:
if d > 0:
return (g/(1+np.exp(-lg*(x-xg)))) - d/(1+np.exp(-ld*(x-xd)))
else:
return (g/(1+np.exp(-lg*(x-xg))))
def cumDays(
observation_dates: Union[List[float],np.array]
) -> np.array:
cum_days = np.cumsum([d.days for d in np.diff(np.sort(observation_dates))]).astype(float)
cum_days = np.hstack((0, cum_days))
return cum_days
def growScaling(
cum_days: np.array,
bounds: Tuple,
grow_func_params: np.array
) -> np.array:
earliest, latest = bounds
grow_func = growFunction(cum_days, *grow_func_params)
maxgrow_val = np.max(grow_func)
grow_func = (grow_func - grow_func[0]) / (maxgrow_val - grow_func[0])
scaled = grow_func * (latest - earliest) + earliest
return scaled
def makeDirectory(
directory: str
) -> None:
if not os.path.exists(directory):
os.makedirs(directory)
def group_points(
points: np.array,
layers: np.array,
max_dist: float
) -> Tuple[np.array, np.array]:
nn = NearestNeighbors(n_neighbors=1, n_jobs=-1)
# initialization
# -> all labels to -1
labels = -np.ones_like(layers)
# all given layers
uni_layers = np.unique(layers)
# -> give points of first layer individual group labels
labels[layers == uni_layers[0]] = np.arange(np.sum(layers == uni_layers[0]))
# -> first evaluation point cloud: first layer
centroids = points[layers == uni_layers[0]]
ind = np.arange(len(points))
for i in range(1, len(uni_layers)):
# fit nearest neighbor model
nn.fit(centroids)
# evaluate on next layer
dist, ass_group = nn.kneighbors(points[layers == uni_layers[i]])
dist = dist.flatten()
ass_group = ass_group.flatten()
# exclude points that have more than max_dist distance to a neighbor
# new_member array:
# 1 = valid member candidate for existing group
# 0 = valid member candidate for new group
# -1 = excluded due to multiple candidates for a single group
new_member = (dist <= max_dist).astype(int)
# if multiple (valid!) points are assigned to the same group, take the nearest
valid = np.copy(new_member).astype(bool)
valid_ind = np.arange(len(valid))[valid]
for j, counts in enumerate(np.bincount(ass_group[valid])):
if counts > 1:
ass_group_ind = valid_ind[ass_group[valid] == j]
best_ind = ass_group_ind[np.argsort(dist[ass_group_ind])]
new_member[best_ind[1:]] = -1
# assign the group labels to the new members
layer_ind = ind[layers == uni_layers[i]]
old_layer_ind = layer_ind[new_member == 1]
labels[old_layer_ind] = ass_group[new_member == 1]
# give new group labels to points not registered so far
new_layer_ind = layer_ind[new_member == 0]
labels[new_layer_ind] = np.arange(labels.max()+1, labels.max()+1+len(new_layer_ind))
# new reference cloud are the centroids of the so far accumulated clusters
centroids = np.stack([np.mean(points[labels == label], axis=0) for label in range(labels.max()+1)])
return labels, centroids
def inverse_transform(
xy_centered_aligned,
xy_center,
transform_coeffs
):
s = transform_coeffs[0]
rot = np.deg2rad(transform_coeffs[1])
t = transform_coeffs[2:]
rot_inv = np.array([[np.cos(rot), np.sin(rot)], [-np.sin(rot), np.cos(rot)]])
return rot_inv@(xy_centered_aligned-t).T/s + xy_center
def add_non_detected(
df_less: pd.DataFrame,
df_meta: pd.DataFrame
) -> pd.DataFrame:
dates = np.unique(df_meta["date"])
xy_center = df_meta["xy_center"].iloc[0]
df_add = pd.DataFrame()
for g_id in np.unique(df_less["group_id"]):
df_group = df_less[df_less["group_id"] == g_id]
missing_dates = dates[np.isin(dates, df_group["date"], invert=True)]
for d in missing_dates:
xy_centered_aligned = df_group["xy_centered_aligned_cm"].mean(axis=0) # group centroid [cm (UTM)]
cropline_y = df_group["y_cropline_rotated_cm"].iloc[0]
align_transform = df_meta[df_meta["date"] == d]["align_transform"].iloc[0]
gps_transform = df_meta[df_meta["date"] == d]["gps_transform"].iloc[0]
utm_transform = df_meta[df_meta["date"] == d]["utm_transform"].iloc[0]
#cr = df_meta[df_meta["date"] == d]["cover_ratio"].values
#mc = df_meta[df_meta["date"] == d]["align_median_confidence"].values
xy_backtrans = inverse_transform(xy_centered_aligned, xy_center, align_transform)
lonlat_backtrans = utm_transform(*xy_backtrans/100., inverse=True)
df_add = df_add.append(
dict([("field_id" , df_group["field_id"].iloc[0]),
("date" , d),
("group_id" , g_id),
("group_size" , df_group["group_size"].iloc[0]),
("group_cropline_id" , df_group["group_cropline_id"].iloc[0]),
("xy_cm" , xy_backtrans),
("xy_px" , list(rowcol(gps_transform, *lonlat_backtrans))),
("lonlat" , lonlat_backtrans),
("xy_centered_aligned_cm" , xy_centered_aligned),
("xy_centroid_centered_aligned_cm" , xy_centered_aligned),
("y_cropline_rotated_cm" , cropline_y),
("centroid_dist_cm" , 0.),
("detected" , False)]), ignore_index=True)
return df_add
def filterGoodPlantsByPercDet(
plants_df: pd.DataFrame,
meta_df: pd.DataFrame,
filter_coverratio: float,
perc_min_det: float
) -> pd.DataFrame:
plants_meta_df = plants_df.merge(meta_df, on=["date", "field_id"], how="left")
n_dates = len(np.unique(meta_df["date"]))
# good plant group := at least perc_min_det direct detection ratio up to certain given cover ratio
good_idx = []
for f_id in np.unique(meta_df["field_id"]):
n_counts_below_cr_thres = np.sum(np.unique(plants_meta_df[plants_meta_df["field_id"]==f_id]["cover_ratio"]) <= filter_coverratio)
groups, counts = np.unique(plants_meta_df[(plants_meta_df["field_id"]==f_id) & (plants_meta_df["cover_ratio"] <= filter_coverratio) & (plants_meta_df["detected"] == True)]["group_id"], return_counts=True)
interest_groups = groups[counts/float(n_counts_below_cr_thres) >= perc_min_det]
candidates = plants_meta_df[(plants_meta_df["field_id"]==f_id) & (np.isin(plants_meta_df["group_id"], interest_groups))]
for g_id in interest_groups:
cand_group = candidates[candidates["group_id"]==g_id]
if len(cand_group)==n_dates:
good_idx.extend(cand_group.index)
good_df = plants_meta_df.loc[good_idx].sort_values(["field_id", "group_id", "date"])
return good_df
class SegmentSoilPlants(Task):
def __init__(
self,
image_path: str,
image_channels: List[str],
veg_index: str,
use_watershed: bool,
max_coverratio: float,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.image_path = image_path
self.image_channels = np.asarray(image_channels)
self.veg_index = veg_index
self.use_watershed = use_watershed
self.max_coverratio = max_coverratio
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_raw(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot raw image.")
if len(self.image_channels) < 4:
n_rows, n_cols = 1, len(self.image_channels)
else:
n_rows, n_cols = 2, len(self.image_channels)//2
fig, ax = plt.subplots(n_rows, n_cols, sharex=True, sharey=True, figsize=(self.width/500*n_cols, self.height/800*n_rows))
data = read_raster(self.image_path, self.image_channels, self.image_channels)
for (i, (a, c)) in enumerate(zip(ax.ravel(), self.image_channels)):
im = a.imshow(data[:,:,i], cmap=self.plot_cmap)
try:
fig.colorbar(im, ax=a)
except:
pass
a.set(xlabel='x', ylabel='y', title = c, aspect='equal')
fig.suptitle("raw image data")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_01_channels"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
del data, fig, ax, im
plt.close("all")
gc.collect()
def plot_segmentation(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot segmentation image.")
fig = plt.figure(figsize=(3*self.width/500, self.height/500), tight_layout=True)
gridspec = gs.GridSpec(1,3,width_ratios=[2,1,2], figure=fig)
ax1 = fig.add_subplot(gridspec[0])
ax2 = fig.add_subplot(gridspec[1])
ax3 = fig.add_subplot(gridspec[2])
m = ax1.imshow(self.vi_image.astype(float), cmap=self.plot_cmap, vmin=-1, vmax=1)
cb = fig.colorbar(m, ax=ax1)
cb.set_label("VI")
ax1.set(title=f"{self.veg_index} image", xlabel="px", ylabel="px")
ax2.hist(self.vi_image[np.isfinite(self.vi_image)], bins=256, orientation="horizontal", color="C0")
ax2.set(title=f"{self.veg_index} value distribution", ylim=(-1,1), xlabel="counts", xscale="log")
if self.cover_ratio_est < 0.01:
ax2.axhline(self.thres, c='r', label=f"Threshold (99-percentile): {self.thres:.2f}")
else:
ax2.axhline(self.thres, c='r', label=f"Threshold (Otsu): {self.thres:.2f}")
ax2.legend()
ax3.imshow(self.seg_mask, cmap=self.plot_cmap)
ax3.set(title=f"Segmented plant area (cover ratio: {100.*self.cover_ratio:.2f} %)", xlabel="px", ylabel="px")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_02_segmentation"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax1, ax2, ax3
gc.collect()
def run(
self
):
try:
self.field_id, d = os.path.basename(self.image_path).replace(".tif", "").split("_")[:2]
year = int(d[:4])
month = int(d[4:6])
day = int(d[6:8])
self.date = dt.datetime(year, month, day)
except:
logger.error(f"Wrong image path or no files found: {self.image_path}")
logger.info(f"{self.name}-{self.date.date()} -> Load image.")
raster = rio.open(self.image_path)
raster_meta = raster.meta
self.height, self.width = raster.shape
px_res = calc_m_per_px(raster_meta)*100. # cm/px
logger.info(f"{self.name}-{self.date.date()} -> Calculated resolution: {px_res:.4f} cm/px.")
del raster
gc.collect()
# calculate Vegetation Index which has values in [-1,1]
if self.veg_index == "NGRDI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G"])
self.vi_image = ngrdiImage(R = channels[:,:,0], G = channels[:,:,1])
est_thres = 0
elif self.veg_index == "GLI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G", "B"])
self.vi_image = gliImage(R = channels[:,:,0], G = channels[:,:,1], B = channels[:,:,2])
est_thres = 0.2
elif self.veg_index == "OSAVI":
channels = read_raster(self.image_path, self.image_channels, ["R", "NIR"])
self.vi_image = osaviImage(R = channels[:,:,0], NIR = channels[:,:,1], y_osavi = 0.6)
est_thres = 0.25
del channels
gc.collect()
# cover ratio estimation
self.cover_ratio_est = np.nansum(self.vi_image >= est_thres)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Use {self.veg_index} Vegetation Index. Cover ratio estimation: {self.cover_ratio_est*100.:.2f} %")
if self.cover_ratio_est <= self.max_coverratio:
# calculate threshold with Otsu's method
if self.cover_ratio_est < 0.01:
self.thres = np.percentile(self.vi_image[np.isfinite(self.vi_image)], 99)
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio below 1 % -> Take 99-percentile as threshold: {self.thres:.2f}")
else:
self.thres = threshold_otsu(self.vi_image[np.isfinite(self.vi_image)])
logger.info(f"{self.name}-{self.date.date()} -> Otsu threshold: {self.thres:.2f}")
# segmentation
if self.use_watershed:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants with watershed method.")
markers = np.zeros_like(self.vi_image, dtype=np.uint8)
markers[self.vi_image <= self.thres] = 1 # soil
markers[self.vi_image > self.thres] = 2 # plant
self.seg_mask = (watershed(self.vi_image, markers) - 1).astype(bool) # True -> plant, False -> soil
del markers
else:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants without watershed method.")
self.seg_mask = np.zeros_like(self.vi_image, dtype=bool) # True -> plant, False -> soil
self.seg_mask[self.vi_image > self.thres] = True # plant
self.cover_ratio = np.sum(self.seg_mask)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Cover ratio recalculated: {self.cover_ratio*100.:.2f} %")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_segmentation()
gc.collect()
else:
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio ({self.cover_ratio_est*100.:.2f} %) is too high to extract plants -> Skip plot.")
self.seg_mask = []
self.cover_ratio = self.cover_ratio_est
self.save(obj=self.seg_mask, name="segmentation_mask", type_='pickle')
self.save(obj=self.cover_ratio, name="cover_ratio", type_='json')
self.save(obj=self.field_id, name="field_id", type_='json')
self.save(obj=self.date, name="date", type_='pickle')
self.save(obj=raster_meta, name="raster_meta", type_='pickle')
self.save(obj=px_res, name="px_resolution", type_='json')
if (self.make_orthoimage) and (self.seg_mask != []):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save segmentation mask as orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_segmentation.tif"),
np.uint8(self.seg_mask*255),
raster_meta,
"uint8")
# plot raw channel information
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_raw()
gc.collect()
class FitGrowFunction(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot(
self
):
logger.info(f"{self.name} -> Plot Grow function.")
g, lg, xg, d, ld, xd = self.fit
cd = np.linspace(0, self.cum_days[-1], 1000)
cal_days = [self.observation_dates[0] + dt.timedelta(days=x) for x in self.cum_days]
fig, ax = plt.subplots()
ax.scatter(self.cum_days, self.cover_ratios, label="observations")
if d > 0:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}-\frac{d}{1+e^{-\lambda_d(x-x_d)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}\n$d$={d:.4g}, $\\lambda_d$={ld:.4g}, $x_d$={xd:.4g}"
else:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}"
ax.plot(cd, growFunction(cd, *self.fit), c="r", label=label)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.set(xlabel="days", ylabel="cover ratio")
ax.legend()
ax.grid()
ax_dt = ax.twiny()
ax_dt.set_xlim(map(lambda cd: self.observation_dates[0] + dt.timedelta(days=cd), ax.get_xlim()))
ax_dt.set_xlabel("calendar date")
ax_dt.set_xticks(cal_days)
ax_dt.tick_params(axis='x', labelrotation=90)
ax.set(title=f"{self.field_id}: grow function fit")
savename = os.path.join(self.plot_dir, f"{self.field_id}_grow_function"+self.plot_format)
fig.savefig(savename, dpi=self.plot_dpi, bbox_inches='tight')
plt.close("all")
del fig, ax, ax_dt
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios = []
observation_dates = []
for r in reduced_results:
cover_ratios.append(r["result"]["cover_ratio"])
observation_dates.append(r["result"]["date"])
observation_dates = np.asarray(observation_dates)
cover_ratios = np.asarray(cover_ratios)
sort = np.argsort(observation_dates)
self.observation_dates = observation_dates[sort]
self.cover_ratios = cover_ratios[sort]
self.cum_days = cumDays(self.observation_dates)
self.field_id = reduced_results[0]["result"]["field_id"]
try:
self.fit, self.cov = curve_fit(growFunction, self.cum_days, self.cover_ratios,
p0=[0.8, 0.1, self.cum_days[-1]/3, 0.3, 0.1, 2*self.cum_days[-1]/3],
maxfev=1000000)
# calculate corrected cover ratios with grow function
#gf_cover_ratio = growFunction(self.cum_days, *self.fit)
#self.save(obj=gf_cover_ratio, name="grow_function_cover_ratios", type_='pickle')
#self.save(obj=self.observation_dates, name="dates", type_='pickle')
logger.info(f"{self.name} -> Grow function fitted")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
except Exception as e:
self.fit = np.nan
self.cov = np.nan
logger.warning(f"{self.name} -> Grow function could not be fitted. Error: {e}")
self.save(obj=self.fit, name="grow_function_fit_params", type_='pickle')
self.save(obj=self.cov, name="grow_function_cov_matrix", type_='pickle')
class ExtractPlantPositions(Task):
def __init__(
self,
min_peak_distance: float,
peak_threshold: float,
gauss_sigma_bounds: Tuple[float, float],
use_growfunction: bool,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.min_peak_distance = min_peak_distance
self.peak_threshold = peak_threshold
self.gauss_sigma_bounds = gauss_sigma_bounds
self.use_growfunction = use_growfunction
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_gauss_blur(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot Gaussian blur image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
im = ax.imshow(self.blurred, cmap='gray')
ax.set(title=f"Gaussian blur ($\sigma$ = {self.sigma:.2f} px)", aspect='equal', xlabel='x [cm]', ylabel='y [cm]')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_03_gauss_blur"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_peaks(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot peak position image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
ax.scatter(*self.peaks.T[::-1], color='red', s=2, label=f"{len(self.peaks)} peaks")
ax.imshow(self.blurred, cmap=self.plot_cmap)
ax.set(title=f"Peaks (min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px)", aspect='equal', xlabel='x [px]', ylabel='y [px]')
ax.legend()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_04_peaks"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
segmentation_mask: np.ndarray,
#grow_function_cover_ratios: np.array,
#dates: np.array,
px_resolution: float,
cover_ratio: float,
date: dt.datetime,
field_id: str,
raster_meta: Dict
):
self.date = date
self.field_id = field_id
self.px_res = px_resolution
if len(segmentation_mask) > 0:
# apply gaussian filter with scaled sigma
if self.use_growfunction:
raise NotImplementedError()
#cover_ratio = grow_function_cover_ratios[dates == date]
#logger.info(f"{self.name}-{self.date.date()} -> Use cover ratio from grow function fit. ({100.*cover_ratio:.2f} %)")
else:
logger.info(f"{self.name}-{self.date.date()} -> Use standard cover ratio. ({100.*cover_ratio:.2f} %)")
self.sigma = (self.gauss_sigma_bounds[0] + cover_ratio*np.diff(self.gauss_sigma_bounds)[0]) / self.px_res
logger.info(f"{self.name}-{self.date.date()} -> Blurring with sigma = {self.sigma*px_resolution:.2f} cm = {self.sigma:.2f} px.")
self.blurred = gaussian(segmentation_mask.astype(np.float32), sigma=self.sigma)
# detect peaks
logger.info(f"{self.name}-{self.date.date()} -> Detect peaks with threshold {self.peak_threshold} and min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px.")
self.peaks = peak_local_max(self.blurred, min_distance=int(np.round(self.min_peak_distance/self.px_res)), threshold_abs=self.peak_threshold, exclude_border=False)
# convert peak position from pixel to cm coordinates with UTM coordinate transformation
utm_peaks, utm_transform = px_to_utm(point_cloud=self.peaks, raster_meta=raster_meta)
utm_peaks *= 100 # m * 100 = cm
n_peaks = len(self.peaks)
self.height, self.width = self.blurred.shape
logger.info(f"{self.name}-{self.date.date()} -> {n_peaks} peaks detected.")
if (self.make_orthoimage):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save Gauss blurred orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_blurred.tif"),
self.blurred,
raster_meta,
"float32")
logger.info(f"{self.name}-{self.date.date()} -> Export found peak positions as KML file.")
kml = simplekml.Kml()
for (lon, lat) in np.asarray(xy(raster_meta["transform"], *self.peaks.T)).T:
kml.newpoint(coords=[(lon, lat)])
kml.save(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_peaks.kml"))
else:
logger.warn(f"{self.name}-{self.date.date()} -> No segmentation mask due to large cover ratio -> Skip plot.")
utm_peaks = np.array([])
# calculate UTM zone
lon, lat = np.asarray(xy(raster_meta["transform"], raster_meta["height"]//2, raster_meta["width"]//2))
utm_zone = int(np.floor((lon/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
self.save(obj=utm_peaks, name="plant_positions", type_="pickle")
self.save(obj=utm_transform, name="utm_transform", type_="pickle")
# plot blurred image and contrast image with peak positions
if (len(segmentation_mask) > 0) and self.plot_result:
makeDirectory(self.plot_dir)
self.plot_gauss_blur()
self.plot_peaks()
gc.collect()
class LoadPeaks(Task):
def __init__(
self,
field_id: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.field_id = field_id
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot(
self
):
logger.info(f"{self.name} -> Plot raw peaks image.")
fig, ax = plt.subplots()
ax.scatter(*self.C.T, s=2, alpha=0.8, c=self.layers, cmap=self.plot_cmap)
ax.set(title=f"{self.field_id}\nraw points", xlabel='x [cm]', ylabel='y [cm]', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_01_raw"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios, dates, gps_transforms, px_resolutions, field_ids, peaks, utm_transforms, segmentation_masks = [], [], [], [], [], [], [], []
for r in reduced_results:
try:
if len(r["config"].keys()) == 1:
cover_ratios.append(r["result"]["cover_ratio"])
dates.append(r["result"]["date"])
gps_transforms.append(r["result"]["raster_meta"]["transform"])
px_resolutions.append(r["result"]["px_resolution"])
field_ids.append(r["result"]["field_id"])
segmentation_masks.append(r["result"]["segmentation_mask"])
else:
peaks.append(r["result"]["plant_positions"])
utm_transforms.append(r["result"]["utm_transform"])
except:
logger.error(r)
assert len(np.unique(field_ids)) == 1, logger.error(f"{self.name} -> Multiple field IDs!")
assert np.unique(field_ids)[0] == self.field_id, logger.error(f"{self.name} -> Wrong field ID!")
cover_ratios = np.asarray(cover_ratios)
px_resolutions = np.asarray(px_resolutions)
dates = pd.DatetimeIndex(dates)
P = np.asarray(peaks)
logger.info(f"{self.name} -> Load data for {len(dates)} dates.")
# sort dates and layers by cover ratio
cr_sort = np.argsort(cover_ratios)
P = P[cr_sort]
dates = dates[cr_sort]
segmentation_masks = [segmentation_masks[c] for c in cr_sort]
gps_transforms = [gps_transforms[c] for c in cr_sort]
px_resolutions = px_resolutions[cr_sort]
cover_ratios = np.sort(cover_ratios)
n_layers = len(dates)
logger.info(f"{self.name} -> Sorted dates and layers by cover ratio. Layers: {cr_sort}, dates: {dates}, cover ratios: {cover_ratios}")
# dates for printing (e.g. in plots)
printdates = dates.format(formatter=lambda x: x.strftime('%m-%d'))
emptymask = [len(p)>0 for p in P]
logger.info(f"{self.name} -> Peaks for {np.sum(emptymask)} dates available.")
# stack point clouds and save layers
self.C = np.vstack(P[emptymask])
self.layers = np.repeat(np.arange(len(P)), np.array([len(p) for p in P]))
self.save(obj=self.C, name="point_cloud", type_="pickle")
self.save(obj=self.layers, name="layers", type_="pickle")
self.save(obj=cover_ratios, name="cover_ratios", type_="pickle")
self.save(obj=self.field_id, name="field_id", type_="json")
self.save(obj=printdates, name="printdates", type_="pickle")
self.save(obj=dates, name="dates", type_="pickle")
self.save(obj=gps_transforms, name="gps_transforms", type_="pickle")
self.save(obj=px_resolutions, name="px_resolutions", type_="pickle")
self.save(obj=utm_transforms, name="utm_transforms", type_="pickle")
self.save(obj=segmentation_masks, name="segmentation_masks", type_="pickle")
# plot raw point information
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class AlignPoints(Task):
def __init__(
self,
max_centroid_distance_cpd: float,
max_centroid_distance_group: float,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.max_centroid_distance_cpd = max_centroid_distance_cpd
self.max_centroid_distance_group = max_centroid_distance_group
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
@staticmethod
def transform(
coords: np.array,
T: np.array
) -> np.array:
return T[0]*coords@T[1] + T[2]
def plot_aligned(
self
):
logger.info(f"{self.name} -> Plot aligned peak position image.")
fig, ax = plt.subplots()
ax.scatter(*self.P_aligned.T, s=2, alpha=0.8, c=self.layers, cmap=self.plot_cmap)
ax.set(title=f"{self.field_id}\naligned points\naligned dates: {self.aligned_dates}", xlabel='x - mean [cm]', ylabel='y - mean [cm]', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_02_aligned"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_confidence(
self
):
logger.info(f"{self.name} -> Plot alignment mean confidence.")
fig, ax = plt.subplots()
ax.scatter(100*self.cover_ratios, 100*self.median_conf)
ax.set(xlim=(0,100), ylim=(0,100), title=f"{self.field_id}\n", xlabel='cover ratio [%]', ylabel='median alignment confidence [%]', aspect='equal')
ax.grid()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_03_cr_vs_conf"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud: np.ndarray,
layers: np.array,
cover_ratios: np.array,
printdates: np.array,
field_id: str,
utm_transforms: List
):
self.field_id = field_id
self.layers = layers
self.printdates = printdates
self.cover_ratios = cover_ratios
uni_layers = np.sort(np.unique(layers))
n_layers = len(self.cover_ratios)
# centralize point clouds
# calculate centroid of all points in UTM coordinates
P_mean = point_cloud.mean(axis=0)
# apply on point cloud
P_c = point_cloud - P_mean
scaF = np.ones(n_layers)
rotA = np.zeros(n_layers)
traV = np.zeros((n_layers, 2))
self.median_conf = np.nan*np.ones(n_layers)
self.P_aligned = P_c.copy()
P_centroid = P_c[layers == uni_layers[0]]
self.P_aligned[layers == uni_layers[0]] = P_centroid
aligned_layers = []
for l in uni_layers:
if l != 0:
X = P_centroid
Y = P_c[layers == l]
# filter points with no neighbours inside max_dist radius
nnX = NearestNeighbors(n_neighbors=1, n_jobs=-1)
nnY = NearestNeighbors(n_neighbors=1, n_jobs=-1)
nnX.fit(X)
nnY.fit(Y)
distXY, _ = nnY.kneighbors(X)
distYX, _ = nnX.kneighbors(Y)
X_filt = X[(distXY <= self.max_centroid_distance_cpd).flatten()]
Y_filt = Y[(distYX <= self.max_centroid_distance_cpd).flatten()]
# Rigid Transformation: T(X) = s*R@X + t
# s: scaling factor
# R: rotation matrix
# t: translation vector
# <NAME>, <NAME>: "Point Set Registration: Coherent Point Drift"
# https://arxiv.org/pdf/0905.2635.pdf
# registration with filtered points
logger.info(f"{self.name} -> Layer {l} of {len(uni_layers)} -> Try to align {len(Y_filt)} of {len(Y)} points to {len(X_filt)} of {len(X)} centroids. Maximum centroid distance: {self.max_centroid_distance_cpd} cm.")
reg = RigidRegistration(X=X_filt, Y=Y_filt) # X = target, Y = source
_, T = reg.register()
self.median_conf[l] = np.median(
|
np.max(reg.P, axis=1)
|
numpy.max
|
"""
Defines the ExplicitOpModel class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import itertools as _itertools
import uuid as _uuid
import warnings as _warnings
import numpy as _np
import scipy as _scipy
from pygsti.models import explicitcalc as _explicitcalc
from pygsti.models import model as _mdl, gaugegroup as _gg
from pygsti.models.memberdict import OrderedMemberDict as _OrderedMemberDict
from pygsti.models.layerrules import LayerRules as _LayerRules
from pygsti.models.modelparaminterposer import LinearInterposer as _LinearInterposer
from pygsti.models.fogistore import FirstOrderGaugeInvariantStore as _FOGIStore
from pygsti.models.gaugegroup import GaugeGroup as _GaugeGroup
from pygsti.forwardsims.forwardsim import ForwardSimulator as _FSim
from pygsti.forwardsims import matrixforwardsim as _matrixfwdsim
from pygsti.modelmembers import instruments as _instrument
from pygsti.modelmembers import operations as _op
from pygsti.modelmembers import povms as _povm
from pygsti.modelmembers import states as _state
from pygsti.modelmembers.modelmembergraph import ModelMemberGraph as _MMGraph
from pygsti.modelmembers.operations import opfactory as _opfactory
from pygsti.baseobjs.basis import Basis as _Basis
from pygsti.baseobjs.basis import BuiltinBasis as _BuiltinBasis, DirectSumBasis as _DirectSumBasis
from pygsti.baseobjs.label import Label as _Label, CircuitLabel as _CircuitLabel
from pygsti.baseobjs import statespace as _statespace
from pygsti.tools import basistools as _bt
from pygsti.tools import jamiolkowski as _jt
from pygsti.tools import matrixtools as _mt
from pygsti.tools import optools as _ot
from pygsti.tools import fogitools as _fogit
from pygsti.tools import slicetools as _slct
from pygsti.tools.legacytools import deprecate as _deprecated_fn
class ExplicitOpModel(_mdl.OpModel):
"""
Encapsulates a set of gate, state preparation, and POVM effect operations.
An ExplictOpModel stores a set of labeled LinearOperator objects and
provides dictionary-like access to their matrices. State preparation
and POVM effect operations are represented as column vectors.
Parameters
----------
state_space : StateSpace
The state space for this model.
basis : {"pp","gm","qt","std","sv"} or Basis, optional
The basis used for the state space by dense superoperator representations.
default_param : {"full", "TP", "CPTP", etc.}, optional
Specifies the default gate and SPAM vector parameterization type.
Can be any value allowed by :method:`set_all_parameterizations`,
which also gives a description of each parameterization type.
prep_prefix: string, optional
Key prefixe for state preparations, allowing the model to determing what
type of object a key corresponds to.
effect_prefix : string, optional
Key prefix for POVM effects, allowing the model to determing what
type of object a key corresponds to.
gate_prefix : string, optional
Key prefix for gates, allowing the model to determing what
type of object a key corresponds to.
povm_prefix : string, optional
Key prefix for POVMs, allowing the model to determing what
type of object a key corresponds to.
instrument_prefix : string, optional
Key prefix for instruments, allowing the model to determing what
type of object a key corresponds to.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The circuit simulator used to compute any
requested probabilities, e.g. from :method:`probs` or
:method:`bulk_probs`. The default value of `"auto"` automatically
selects the simulation type, and is usually what you want. Other
special allowed values are:
- "matrix" : op_matrix-op_matrix products are computed and
cached to get composite gates which can then quickly simulate
a circuit for any preparation and outcome. High memory demand;
best for a small number of (1 or 2) qubits.
- "map" : op_matrix-state_vector products are repeatedly computed
to simulate circuits. Slower for a small number of qubits, but
faster and more memory efficient for higher numbers of qubits (3+).
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
"""
#Whether access to gates & spam vecs via Model indexing is allowed
_strict = False
def __init__(self, state_space, basis="pp", default_gate_type="full",
default_prep_type="auto", default_povm_type="auto",
default_instrument_type="auto", prep_prefix="rho", effect_prefix="E",
gate_prefix="G", povm_prefix="M", instrument_prefix="I",
simulator="auto", evotype="default"):
#More options now (TODO enumerate?)
#assert(default_param in ('full','TP','CPTP','H+S','S','static',
# 'H+S terms','clifford','H+S clifford terms'))
def flagfn(typ): return {'auto_embed': True, 'match_parent_statespace': True,
'match_parent_evotype': True, 'cast_to_type': typ}
if default_prep_type == "auto":
default_prep_type = _state.state_type_from_op_type(default_gate_type)
if default_povm_type == "auto":
default_povm_type = _povm.povm_type_from_op_type(default_gate_type)
if default_instrument_type == "auto":
default_instrument_type = _instrument.instrument_type_from_op_type(default_gate_type)
self.preps = _OrderedMemberDict(self, default_prep_type, prep_prefix, flagfn("state"))
self.povms = _OrderedMemberDict(self, default_povm_type, povm_prefix, flagfn("povm"))
self.operations = _OrderedMemberDict(self, default_gate_type, gate_prefix, flagfn("operation"))
self.instruments = _OrderedMemberDict(self, default_instrument_type, instrument_prefix, flagfn("instrument"))
self.factories = _OrderedMemberDict(self, default_gate_type, gate_prefix, flagfn("factory"))
self.effects_prefix = effect_prefix
self._default_gauge_group = None
self.fogi_info = None
super(ExplicitOpModel, self).__init__(state_space, basis, evotype, ExplicitLayerRules(), simulator)
@property
def _primitive_prep_label_dict(self):
return self.preps
@property
def _primitive_povm_label_dict(self):
return self.povms
@property
def _primitive_op_label_dict(self):
# don't include 'implied' ops as primitive ops -- FUTURE - maybe should include empty layer ([])?
return _collections.OrderedDict([(k, None) for k in self.operations
if not (k.name.startswith('{') and k.name.endswith('}'))])
@property
def _primitive_instrument_label_dict(self):
return self.instruments
#Functions required for base class functionality
def _iter_parameterized_objs(self):
for lbl, obj in _itertools.chain(self.preps.items(),
self.povms.items(),
self.operations.items(),
self.instruments.items(),
self.factories.items()):
yield (lbl, obj)
def _excalc(self):
""" Create & return a special explicit-model calculator for this model """
self._clean_paramvec() # ensures paramvec is rebuild if needed
simplified_effects = _collections.OrderedDict()
for povm_lbl, povm in self.povms.items():
for k, e in povm.simplify_effects(povm_lbl).items():
simplified_effects[k] = e
simplified_ops = _collections.OrderedDict()
for k, g in self.operations.items(): simplified_ops[k] = g
for inst_lbl, inst in self.instruments.items():
for k, g in inst.simplify_operations(inst_lbl).items():
simplified_ops[k] = g
simplified_preps = self.preps
return _explicitcalc.ExplicitOpModelCalc(self.state_space.dim, simplified_preps, simplified_ops,
simplified_effects, self.num_params, self._param_interposer)
#Unneeded - just use string processing & rely on effect labels *not* having underscores in them
#def simplify_spamtuple_to_outcome_label(self, simplified_spamTuple):
# #TODO: make this more efficient (prep lbl isn't even used!)
# for prep_lbl in self.preps:
# for povm_lbl in self.povms:
# for elbl in self.povms[povm_lbl]:
# if simplified_spamTuple == (prep_lbl, povm_lbl + "_" + elbl):
# return (elbl,) # outcome "label" (a tuple)
# raise ValueError("No outcome label found for simplified spam_tuple: ", simplified_spamTuple)
def _embed_operation(self, op_target_labels, op_val, force=False):
"""
Called by OrderedMemberDict._auto_embed to create an embedded-gate
object that embeds `op_val` into the sub-space of
`self.state_space` given by `op_target_labels`.
Parameters
----------
op_target_labels : list
A list of `op_val`'s target state space labels.
op_val : LinearOperator
The gate object to embed. Note this should be a legitimate
LinearOperator-derived object and not just a numpy array.
force : bool, optional
Always wrap with an embedded LinearOperator, even if the
dimension of `op_val` is the full model dimension.
Returns
-------
LinearOperator
A gate of the full model dimension.
"""
if self.state_space is None:
raise ValueError("Must set model state space before adding auto-embedded gates.")
if op_val.state_space == self.state_space and not force:
return op_val # if gate operates on full dimension, no need to embed.
return _op.EmbeddedOp(self.state_space, op_target_labels, op_val)
@property
def default_gauge_group(self):
"""
Gets the default gauge group for performing gauge transformations on this Model.
Returns
-------
GaugeGroup
"""
return self._default_gauge_group
@default_gauge_group.setter
def default_gauge_group(self, value):
"""
The default gauge group.
"""
self._default_gauge_group = value
@property
def prep(self):
"""
The unique state preparation in this model, if one exists.
If not, a ValueError is raised.
Returns
-------
State
"""
if len(self.preps) != 1:
raise ValueError("'.prep' can only be used on models"
" with a *single* state prep. This Model has"
" %d state preps!" % len(self.preps))
return list(self.preps.values())[0]
@property
def effects(self):
"""
The effect vectors from the unique POVM in this model, if one exists.
If not, a ValueError is raised.
Returns
-------
list of POVMEffects
"""
if len(self.povms) != 1:
raise ValueError("'.effects' can only be used on models"
" with a *single* POVM. This Model has"
" %d POVMS!" % len(self.povms))
return list(self.povms.values())[0]
def __setitem__(self, label, value):
"""
Set an operator, state, or POVM associated with a given label.
Parameters
----------
label : string
the gate or SPAM vector label.
value : numpy array or LinearOperator or State or POVM
a operation matrix, state vector, or POVM, which must have the
appropriate state space for the Model and appropriate type
given the prefix of the label.
"""
if ExplicitOpModel._strict:
raise KeyError("Strict-mode: invalid key %s" % repr(label))
if not isinstance(label, _Label): label = _Label(label)
if label == _Label(()): # special case
self.operations[label] = value
elif label.has_prefix(self.preps._prefix):
self.preps[label] = value
elif label.has_prefix(self.povms._prefix):
self.povms[label] = value
elif label.has_prefix(self.operations._prefix):
self.operations[label] = value
elif label.has_prefix(self.instruments._prefix, typ="any"):
self.instruments[label] = value
else:
raise KeyError("Key %s has an invalid prefix" % label)
def __getitem__(self, label):
"""
Get an operation, state, or POVM associated with a given label.
Parameters
----------
label : string
the gate, state vector, or POVM label.
"""
if ExplicitOpModel._strict:
raise KeyError("Strict-mode: invalid key %s" % label)
if not isinstance(label, _Label): label = _Label(label)
if label == _Label(()): # special case
return self.operations[label]
elif label.has_prefix(self.preps._prefix):
return self.preps[label]
elif label.has_prefix(self.povms._prefix):
return self.povms[label]
elif label.has_prefix(self.operations._prefix):
return self.operations[label]
elif label.has_prefix(self.instruments._prefix, typ="any"):
return self.instruments[label]
else:
raise KeyError("Key %s has an invalid prefix" % label)
def convert_members_inplace(self, to_type, categories_to_convert='all', labels_to_convert='all',
ideal_model=None, flatten_structure=False, set_default_gauge_group=False):
"""
TODO: docstring -- like set_all_parameterizations but doesn't set default gauge group by default
"""
if isinstance(categories_to_convert, str): categories_to_convert = (categories_to_convert,)
if any([c in categories_to_convert for c in ('all', 'ops', 'operations')]):
for lbl, gate in self.operations.items():
if labels_to_convert == 'all' or lbl in labels_to_convert:
ideal = ideal_model.operations.get(lbl, None) if (ideal_model is not None) else None
self.operations[lbl] = _op.convert(gate, to_type, self.basis, ideal, flatten_structure)
if any([c in categories_to_convert for c in ('all', 'instruments')]):
for lbl, inst in self.instruments.items():
if labels_to_convert == 'all' or lbl in labels_to_convert:
ideal = ideal_model.instruments.get(lbl, None) if (ideal_model is not None) else None
self.instruments[lbl] = _instrument.convert(inst, to_type, self.basis, ideal, flatten_structure)
if any([c in categories_to_convert for c in ('all', 'preps')]):
for lbl, prep in self.preps.items():
if labels_to_convert == 'all' or lbl in labels_to_convert:
ideal = ideal_model.preps.get(lbl, None) if (ideal_model is not None) else None
self.preps[lbl] = _state.convert(prep, to_type, self.basis, ideal, flatten_structure)
if any([c in categories_to_convert for c in ('all', 'povms')]):
for lbl, povm in self.povms.items():
if labels_to_convert == 'all' or lbl in labels_to_convert:
ideal = ideal_model.povms.get(lbl, None) if (ideal_model is not None) else None
self.povms[lbl] = _povm.convert(povm, to_type, self.basis, ideal, flatten_structure)
self._clean_paramvec() # param indices were probabaly updated
if set_default_gauge_group:
self.set_default_gauge_group_for_member_type(to_type)
def set_default_gauge_group_for_member_type(self, member_type):
""" TODO: docstring """
if member_type == 'full':
self.default_gauge_group = _gg.FullGaugeGroup(self.state_space, self.evotype)
elif member_type in ['full TP', 'TP']: # TODO: get from verbose_conversion dictionary of modelmembers?
self.default_gauge_group = _gg.TPGaugeGroup(self.state_space, self.evotype)
elif member_type == 'CPTP':
self.default_gauge_group = _gg.UnitaryGaugeGroup(self.state_space, self.basis, self.evotype)
else: # typ in ('static','H+S','S', 'H+S terms', ...)
self.default_gauge_group = _gg.TrivialGaugeGroup(self.state_space)
def set_all_parameterizations(self, gate_type, prep_type="auto", povm_type="auto",
instrument_type="auto", extra=None):
"""
Convert all gates, states, and POVMs to a specific parameterization type.
Parameters
----------
parameterization_type : string
The gate, state, and POVM parameterization type. Allowed
values are (where '*' means " terms" and " clifford terms"
evolution-type suffixes are allowed):
- "full" : each gate / state / POVM effect element is an independent parameter
- "TP" : Trace-Preserving gates and state preps
- "static" : no parameters
- "static unitary" : no parameters; convert superops to unitaries
- "clifford" : no parameters; convert unitaries to Clifford symplecitics.
- "GLND*" : General unconstrained Lindbladian
- "CPTP*" : Completely-Positive-Trace-Preserving
- "H+S+A*" : Hamiltoian, Pauli-Stochastic, and Affine errors
- "H+S*" : Hamiltonian and Pauli-Stochastic errors
- "S+A*" : Pauli-Stochastic and Affine errors
- "S*" : Pauli-Stochastic errors
- "H+D+A*" : Hamiltoian, Depolarization, and Affine errors
- "H+D*" : Hamiltonian and Depolarization errors
- "D+A*" : Depolarization and Affine errors
- "D*" : Depolarization errors
- Any of the above with "S" replaced with "s" or "D" replaced with
"d". This removes the CPTP constraint on the gates and SPAM
operations (and as such is seldom used).
extra : dict, optional
For `"H+S terms"` type, this may specify a dictionary
of unitary gates and pure state vectors to be used
as the *ideal* operation of each gate/SPAM operation.
Returns
-------
None
"""
typ = gate_type
assert(extra is None), "`extra` argument is unused and should be left as `None`"
if extra is None: extra = {}
rtyp = _state.state_type_from_op_type(gate_type) if prep_type == "auto" else prep_type
povmtyp = _povm.povm_type_from_op_type(gate_type) if povm_type == "auto" else povm_type
ityp = _instrument.instrument_type_from_op_type(gate_type) if instrument_type == "auto" else instrument_type
self.convert_members_inplace(typ, 'operations', 'all', flatten_structure=True)
self.convert_members_inplace(ityp, 'instruments', 'all', flatten_structure=True)
self.convert_members_inplace(rtyp, 'preps', 'all', flatten_structure=True)
self.convert_members_inplace(povmtyp, 'povms', 'all', flatten_structure=True)
self.set_default_gauge_group_for_member_type(typ)
def __setstate__(self, state_dict):
if "gates" in state_dict:
#Unpickling an OLD-version Model (or GateSet)
_warnings.warn("Unpickling deprecated-format ExplicitOpModel (GateSet). Please re-save/pickle asap.")
self.operations = state_dict['gates']
self.state_space = state_dict['stateSpaceLabels']
self._paramlbls = None
del state_dict['gates']
del state_dict['_autogator']
del state_dict['auto_idle_gatename']
del state_dict['stateSpaceLabels']
if "effects" in state_dict:
raise ValueError(("This model (GateSet) object is too old to unpickle - "
"try using pyGSTi v0.9.6 to upgrade it to a version "
"that this version can upgrade to the current version."))
#Backward compatibility:
if 'basis' in state_dict:
state_dict['_basis'] = state_dict['basis']; del state_dict['basis']
if 'state_space_labels' in state_dict:
state_dict['state_space'] = state_dict['state_space_labels']; del state_dict['state_space_labels']
if 'factories' not in state_dict:
ops = state_dict['operations']
state_dict['factories'] = _OrderedMemberDict(self, ops.default_param, ops._prefix, ops.flags)
super().__setstate__(state_dict) # ~ self.__dict__.update(state_dict)
if 'uuid' not in state_dict:
self.uuid = _uuid.uuid4() # create a new uuid
#Additionally, must re-connect this model as the parent
# of relevant OrderedDict-derived classes, which *don't*
# preserve this information upon pickling so as to avoid
# circular pickling...
self.preps.parent = self
self.povms.parent = self
#self.effects.parent = self
self.operations.parent = self
self.instruments.parent = self
self.factories.parent = self
for o in self.preps.values(): o.relink_parent(self)
for o in self.povms.values(): o.relink_parent(self)
#for o in self.effects.values(): o.relink_parent(self)
for o in self.operations.values(): o.relink_parent(self)
for o in self.instruments.values(): o.relink_parent(self)
for o in self.factories.values(): o.relink_parent(self)
@property
def num_elements(self):
"""
Return the number of total operation matrix and spam vector elements in this model.
This is in general different from the number of *parameters* in the
model, which are the number of free variables used to generate all of
the matrix and vector *elements*.
Returns
-------
int
the number of model elements.
"""
rhoSize = [rho.hilbert_schmidt_size for rho in self.preps.values()]
povmSize = [povm.num_elements for povm in self.povms.values()]
opSize = [gate.hilbert_schmidt_size for gate in self.operations.values()]
instSize = [i.num_elements for i in self.instruments.values()]
#Don't count self.factories?
return sum(rhoSize) + sum(povmSize) + sum(opSize) + sum(instSize)
@property
def num_nongauge_params(self):
"""
Return the number of non-gauge parameters in this model.
Returns
-------
int
the number of non-gauge model parameters.
"""
return self.num_params - self.num_gauge_params
@property
def num_gauge_params(self):
"""
Return the number of gauge parameters in this model.
Returns
-------
int
the number of gauge model parameters.
"""
#Note maybe we need some way for some evotypes to punt here? (and just return 0?)
if self.num_params == 0:
return 0 # save the trouble of getting gauge params when there are no params to begin with
dPG = self._excalc()._buildup_dpg()
gaugeDirs = _mt.nullspace_qr(dPG) # cols are gauge directions
if gaugeDirs.size == 0: # if there are *no* gauge directions
return 0 # calling matrix_rank on a length-0 array => error
return _np.linalg.matrix_rank(gaugeDirs[0:self.num_params, :])
def deriv_wrt_params(self):
"""
The element-wise derivative of all this models' operations.
Constructs a matrix whose columns are the vectorized derivatives of all
the model's raw matrix and vector *elements* (placed in a vector)
with respect to each single model parameter.
Thus, each column has length equal to the number of elements in the
model, and there are num_params() columns. In the case of a "fully
parameterized model" (i.e. all operation matrices and SPAM vectors are
fully parameterized) then the resulting matrix will be the (square)
identity matrix.
Returns
-------
numpy array
2D array of derivatives.
"""
return self._excalc().deriv_wrt_params()
def compute_nongauge_and_gauge_spaces(self, item_weights=None, non_gauge_mix_mx=None):
"""
TODO: docstring
"""
return self._excalc().nongauge_and_gauge_spaces(item_weights, non_gauge_mix_mx)
def compute_nongauge_projector(self, item_weights=None, non_gauge_mix_mx=None):
"""
Construct a projector onto the non-gauge parameter space.
Useful for isolating the gauge degrees of freedom from the non-gauge
degrees of freedom.
Parameters
----------
item_weights : dict, optional
Dictionary of weighting factors for individual gates and spam operators.
Keys can be gate, state preparation, POVM effect, spam labels, or the
special strings "gates" or "spam" whic represent the entire set of gate
or SPAM operators, respectively. Values are floating point numbers.
These weights define the metric used to compute the non-gauge space,
*orthogonal* the gauge space, that is projected onto.
non_gauge_mix_mx : numpy array, optional
An array of shape (n_non_gauge_params,n_gauge_params) specifying how to
mix the non-gauge degrees of freedom into the gauge degrees of
freedom that are projected out by the returned object. This argument
essentially sets the off-diagonal block of the metric used for
orthogonality in the "gauge + non-gauge" space. It is for advanced
usage and typically left as None (the default).
Returns
-------
numpy array
The projection operator as a N x N matrix, where N is the number
of parameters (obtained via num_params()). This projector acts on
parameter-space, and has rank equal to the number of non-gauge
degrees of freedom.
"""
return self._excalc().nongauge_projector(item_weights, non_gauge_mix_mx)
def transform_inplace(self, s):
"""
Gauge transform this model.
Update each of the operation matrices G in this model with inv(s) * G * s,
each rhoVec with inv(s) * rhoVec, and each EVec with EVec * s
Parameters
----------
s : GaugeGroupElement
A gauge group element which specifies the "s" matrix
(and it's inverse) used in the above similarity transform.
Returns
-------
None
"""
for rhoVec in self.preps.values():
rhoVec.transform_inplace(s)
for povm in self.povms.values():
povm.transform_inplace(s)
for opObj in self.operations.values():
opObj.transform_inplace(s)
for instrument in self.instruments.values():
instrument.transform_inplace(s)
for factory in self.factories.values():
factory.transform_inplace(s)
self._clean_paramvec() # transform may leave dirty members
def frobeniusdist(self, other_model, transform_mx=None,
item_weights=None, normalize=True):
"""
Compute the weighted frobenius norm of the difference between this model and other_model.
Differences in each corresponding gate matrix and spam vector element
are squared, weighted (using `item_weights` as applicable), then summed.
The value returned is the square root of this sum, or the square root of
this sum divided by the number of summands if normalize == True.
Parameters
----------
other_model : Model
the other model to difference against.
transform_mx : numpy array, optional
if not None, transform this model by
G => inv(transform_mx) * G * transform_mx, for each operation matrix G
(and similar for rho and E vectors) before taking the difference.
This transformation is applied only for the difference and does
not alter the values stored in this model.
item_weights : dict, optional
Dictionary of weighting factors for individual gates and spam
operators. Weights are applied multiplicatively to the squared
differences, i.e., (*before* the final square root is taken). Keys
can be gate, state preparation, POVM effect, or spam labels, as well
as the two special labels `"gates"` and `"spam"` which apply to all
of the gate or SPAM elements, respectively (but are overridden by
specific element values). Values are floating point numbers.
By default, all weights are 1.0.
normalize : bool, optional
if True (the default), the sum of weighted squared-differences
is divided by the weighted number of differences before the
final square root is taken. If False, the division is not performed.
Returns
-------
float
"""
return self._excalc().frobeniusdist(other_model._excalc(), transform_mx,
item_weights, normalize)
def residuals(self, other_model, transform_mx=None, item_weights=None):
"""
Compute the weighted residuals between two models.
Residuals are the differences in corresponding operation matrix and spam
vector elements.
Parameters
----------
other_model : Model
the other model to difference against.
transform_mx : numpy array, optional
if not None, transform this model by
G => inv(transform_mx) * G * transform_mx, for each operation matrix G
(and similar for rho and E vectors) before taking the difference.
This transformation is applied only for the difference and does
not alter the values stored in this model.
item_weights : dict, optional
Dictionary of weighting factors for individual gates and spam
operators. Weights applied such that they act multiplicatively on
the *squared* differences, so that the residuals themselves are
scaled by the square roots of these weights. Keys can be gate, state
preparation, POVM effect, or spam labels, as well as the two special
labels `"gates"` and `"spam"` which apply to all of the gate or SPAM
elements, respectively (but are overridden by specific element
values). Values are floating point numbers. By default, all weights
are 1.0.
Returns
-------
residuals : numpy.ndarray
A 1D array of residuals (differences w.r.t. other)
nSummands : int
The (weighted) number of elements accounted for by the residuals.
"""
return self._excalc().residuals(other_model._excalc(), transform_mx, item_weights)
def jtracedist(self, other_model, transform_mx=None, include_spam=True):
"""
Compute the Jamiolkowski trace distance between this model and `other_model`.
This is defined as the maximum of the trace distances between each
corresponding gate, including spam gates.
Parameters
----------
other_model : Model
the other model to difference against.
transform_mx : numpy array, optional
if not None, transform this model by
G => inv(transform_mx) * G * transform_mx, for each operation matrix G
(and similar for rho and E vectors) before taking the difference.
This transformation is applied only for the difference and does
not alter the values stored in this model.
include_spam : bool, optional
Whether to add to the max-trace-distance the frobenius distances
between corresponding SPAM operations.
Returns
-------
float
"""
return self._excalc().jtracedist(other_model._excalc(), transform_mx, include_spam)
def diamonddist(self, other_model, transform_mx=None, include_spam=True):
"""
Compute the diamond-norm distance between this model and `other_model`.
This is defined as the maximum of the diamond-norm distances between each
corresponding gate, including spam gates.
Parameters
----------
other_model : Model
the other model to difference against.
transform_mx : numpy array, optional
if not None, transform this model by
G => inv(transform_mx) * G * transform_mx, for each operation matrix G
(and similar for rho and E vectors) before taking the difference.
This transformation is applied only for the difference and does
not alter the values stored in this model.
include_spam : bool, optional
Whether to add to the max-diamond-distance the frobenius distances
between corresponding SPAM operations.
Returns
-------
float
"""
return self._excalc().diamonddist(other_model._excalc(), transform_mx, include_spam)
def _tpdist(self):
"""
Compute the "distance" between this model and the space of trace-preserving (TP) maps.
This is defined as the square root of the sum-of-squared deviations
among the first row of all operation matrices and the first element of
all state preparations.
Returns
-------
float
"""
penalty = 0.0
for operationMx in list(self.operations.values()):
penalty += abs(operationMx[0, 0] - 1.0)**2
for k in range(1, operationMx.shape[1]):
penalty += abs(operationMx[0, k])**2
op_dim = self.state_space.dim
firstEl = 1.0 / op_dim**0.25
for rhoVec in list(self.preps.values()):
penalty += abs(rhoVec[0, 0] - firstEl)**2
return _np.sqrt(penalty)
def strdiff(self, other_model, metric='frobenius'):
"""
Return a string describing the distances between this model and `other_model`.
The returned string displays differences between each corresponding gate,
state prep, and POVM effect.
Parameters
----------
other_model : Model
the other model to difference against.
metric : {'frobenius', 'infidelity', 'diamond'}
Which distance metric to use.
Returns
-------
str
"""
if metric == 'frobenius':
def dist(a, b): return _np.linalg.norm(a - b)
def vecdist(a, b): return _np.linalg.norm(a - b)
elif metric == 'infidelity':
def dist(a, b): return _ot.entanglement_infidelity(a, b, self.basis)
def vecdist(a, b): return _np.linalg.norm(a - b)
elif metric == 'diamond':
def dist(a, b): return 0.5 * _ot.diamondist(a, b, self.basis)
def vecdist(a, b): return _np.linalg.norm(a - b)
else:
raise ValueError("Invalid `metric` argument: %s" % metric)
on_space = 'HilbertSchmidt'
s = "Model Difference:\n"
s += " Preps:\n"
for lbl in self.preps:
s += " %s = %g\n" % \
(str(lbl), vecdist(self.preps[lbl].to_dense(on_space), other_model.preps[lbl].to_dense(on_space)))
s += " POVMs:\n"
for povm_lbl, povm in self.povms.items():
s += " %s: " % str(povm_lbl)
for lbl in povm:
s += " %s = %g\n" % \
(lbl, vecdist(povm[lbl].to_dense(on_space), other_model.povms[povm_lbl][lbl].to_dense(on_space)))
s += " Gates:\n"
for lbl in self.operations:
s += " %s = %g\n" % \
(str(lbl), dist(self.operations[lbl].to_dense(on_space),
other_model.operations[lbl].to_dense(on_space)))
if len(self.instruments) > 0:
s += " Instruments:\n"
for inst_lbl, inst in self.instruments.items():
s += " %s: " % str(inst_lbl)
for lbl in inst:
s += " %s = %g\n" % (str(lbl), dist(
inst[lbl].to_dense(on_space), other_model.instruments[inst_lbl][lbl].to_dense(on_space)))
#Note: no way to different factories easily
return s
def _init_copy(self, copy_into, memo):
"""
Copies any "tricky" member of this model into `copy_into`, before
deep copying everything else within a .copy() operation.
"""
# Copy special base class members first
super(ExplicitOpModel, self)._init_copy(copy_into, memo)
# Copy our "tricky" members
copy_into.preps = self.preps.copy(copy_into, memo)
copy_into.povms = self.povms.copy(copy_into, memo)
copy_into.operations = self.operations.copy(copy_into, memo)
copy_into.instruments = self.instruments.copy(copy_into, memo)
copy_into.factories = self.factories.copy(copy_into, memo)
copy_into._default_gauge_group = self._default_gauge_group # Note: SHALLOW copy
def __str__(self):
s = ""
for lbl, vec in self.preps.items():
s += "%s = " % str(lbl) + str(vec) + "\n"
s += "\n"
for lbl, povm in self.povms.items():
s += "%s = " % str(lbl) + str(povm) + "\n"
s += "\n"
for lbl, gate in self.operations.items():
s += "%s = \n" % str(lbl) + str(gate) + "\n\n"
for lbl, inst in self.instruments.items():
s += "%s = " % str(lbl) + str(inst) + "\n"
for lbl, factory in self.factories.items():
s += "%s = (factory)" % lbl + '\n'
s += "\n"
return s
def all_objects(self):
"""
Iterate over all of the (label, operator object) entities in this model.
This iterator runs over all state preparations, POVMS, operations,
and instruments.
"""
for lbl, obj in _itertools.chain(self.preps.items(),
self.povms.items(),
self.operations.items(),
self.instruments.items(),
self.factories.items()):
yield (lbl, obj)
#TODO: how to handle these given possibility of different parameterizations...
# -- maybe only allow these methods to be called when using a "full" parameterization?
# -- or perhaps better to *move* them to the parameterization class
def depolarize(self, op_noise=None, spam_noise=None, max_op_noise=None,
max_spam_noise=None, seed=None):
"""
Apply depolarization uniformly or randomly to this model's gate and/or SPAM elements.
The result is returned without modifying the original (this) model. You
must specify either `op_noise` or `max_op_noise` (for the amount of gate
depolarization), and either `spam_noise` or `max_spam_noise` (for spam
depolarization).
Parameters
----------
op_noise : float, optional
apply depolarizing noise of strength ``1-op_noise`` to all gates in
the model. (Multiplies each assumed-Pauli-basis operation matrix by the
diagonal matrix with ``(1.0-op_noise)`` along all the diagonal
elements except the first (the identity).
spam_noise : float, optional
apply depolarizing noise of strength ``1-spam_noise`` to all SPAM
opeations (state and POVM effects) in the model. (Multiplies the
non-identity part of each assumed-Pauli-basis state preparation
vector and measurement vector by ``(1.0-spam_noise)``).
max_op_noise : float, optional
specified instead of `op_noise`; apply a random depolarization
with maximum strength ``1-max_op_noise`` to each gate in the
model.
max_spam_noise : float, optional
specified instead of `spam_noise`; apply a random depolarization
with maximum strength ``1-max_spam_noise`` to each state preparation
and POVM in the model.
seed : int, optional
if not ``None``, seed numpy's random number generator with this value
before generating random depolarizations.
Returns
-------
Model
the depolarized Model
"""
newModel = self.copy() # start by just copying the current model
rndm = _np.random.RandomState(seed)
if max_op_noise is not None:
if op_noise is not None:
raise ValueError("Must specify at most one of 'op_noise' and 'max_op_noise' NOT both")
#Apply random depolarization to each gate
r = max_op_noise * rndm.random_sample(len(self.operations))
for i, label in enumerate(self.operations):
newModel.operations[label].depolarize(r[i])
r = max_op_noise * rndm.random_sample(len(self.instruments))
for i, label in enumerate(self.instruments):
newModel.instruments[label].depolarize(r[i])
r = max_op_noise * rndm.random_sample(len(self.factories))
for i, label in enumerate(self.factories):
newModel.factories[label].depolarize(r[i])
elif op_noise is not None:
#Apply the same depolarization to each gate
for label in self.operations:
newModel.operations[label].depolarize(op_noise)
for label in self.instruments:
newModel.instruments[label].depolarize(op_noise)
for label in self.factories:
newModel.factories[label].depolarize(op_noise)
if max_spam_noise is not None:
if spam_noise is not None:
raise ValueError("Must specify at most one of 'noise' and 'max_noise' NOT both")
#Apply random depolarization to each rho and E vector
r = max_spam_noise * rndm.random_sample(len(self.preps))
for (i, lbl) in enumerate(self.preps):
newModel.preps[lbl].depolarize(r[i])
r = max_spam_noise * rndm.random_sample(len(self.povms))
for label in self.povms:
newModel.povms[label].depolarize(r[i])
elif spam_noise is not None:
#Apply the same depolarization to each gate
for lbl in self.preps:
newModel.preps[lbl].depolarize(spam_noise)
# Just depolarize the preps - leave POVMs alone
#for label in self.povms:
# newModel.povms[label].depolarize(spam_noise)
newModel._clean_paramvec() # depolarize may leave dirty members
return newModel
def rotate(self, rotate=None, max_rotate=None, seed=None):
"""
Apply a rotation uniformly or randomly to this model.
Uniformly means the same rotation applied to each gate and
randomly means different random rotations are applied to each gate of
this model. The result is returned without modifying the original (this) model.
You must specify either `rotate` or `max_rotate`. This method currently
only works on n-qubit models.
Parameters
----------
rotate : tuple of floats, optional
If you specify the `rotate` argument, then the same rotation
operation is applied to each gate. That is, each gate's matrix `G`
is composed with a rotation operation `R` (so `G` -> `dot(R, G)` )
where `R` is the unitary superoperator corresponding to the unitary
operator `U = exp( sum_k( i * rotate[k] / 2.0 * Pauli_k ) )`. Here
`Pauli_k` ranges over all of the non-identity un-normalized Pauli
operators (e.g. {X,Y,Z} for 1 qubit, {IX, IY, IZ, XI, XX, XY, XZ,
YI, YX, YY, YZ, ZI, ZX, ZY, ZZ} for 2 qubits).
max_rotate : float, optional
If `max_rotate` is specified (*instead* of `rotate`), then pyGSTi
randomly generates a different `rotate` tuple, and applies the
corresponding rotation, to each gate in this `Model`. Each
component of each tuple is drawn uniformly from [0, `max_rotate`).
seed : int, optional
if not None, seed numpy's random number generator with this value
before generating random depolarizations.
Returns
-------
Model
the rotated Model
"""
newModel = self.copy() # start by just copying model
dim = self.state_space.dim
myBasis = self.basis
if max_rotate is not None:
if rotate is not None:
raise ValueError("Must specify exactly one of 'rotate' and 'max_rotate' NOT both")
#Apply random rotation to each gate
rndm = _np.random.RandomState(seed)
r = max_rotate * rndm.random_sample(len(self.operations) * (dim - 1))
for i, label in enumerate(self.operations):
rot = _np.array(r[(dim - 1) * i:(dim - 1) * (i + 1)])
newModel.operations[label].rotate(rot, myBasis)
r = max_rotate * rndm.random_sample(len(self.instruments) * (dim - 1))
for i, label in enumerate(self.instruments):
rot = _np.array(r[(dim - 1) * i:(dim - 1) * (i + 1)])
newModel.instruments[label].rotate(rot, myBasis)
r = max_rotate * rndm.random_sample(len(self.factories) * (dim - 1))
for i, label in enumerate(self.factories):
rot = _np.array(r[(dim - 1) * i:(dim - 1) * (i + 1)])
newModel.factories[label].rotate(rot, myBasis)
elif rotate is not None:
assert(len(rotate) == dim - 1), \
"Invalid 'rotate' argument. You must supply a tuple of length %d" % (dim - 1)
for label in self.operations:
newModel.operations[label].rotate(rotate, myBasis)
for label in self.instruments:
newModel.instruments[label].rotate(rotate, myBasis)
for label in self.factories:
newModel.factories[label].rotate(rotate, myBasis)
else: raise ValueError("Must specify either 'rotate' or 'max_rotate' "
+ "-- neither was non-None")
newModel._clean_paramvec() # rotate may leave dirty members
return newModel
def randomize_with_unitary(self, scale, seed=None, rand_state=None):
"""
Create a new model with random unitary perturbations.
Apply a random unitary to each element of a model, and return the
result, without modifying the original (this) model. This method
works on Model as long as the dimension is a perfect square.
Parameters
----------
scale : float
maximum element magnitude in the generator of each random unitary
transform.
seed : int, optional
if not None, seed numpy's random number generator with this value
before generating random depolarizations.
rand_state : numpy.random.RandomState
A RandomState object to generate samples from. Can be useful to set
instead of `seed` if you want reproducible distribution samples
across multiple random function calls but you don't want to bother
with manually incrementing seeds between those calls.
Returns
-------
Model
the randomized Model
"""
if rand_state is None:
rndm = _np.random.RandomState(seed)
else:
rndm = rand_state
op_dim = self.state_space.dim
unitary_dim = int(round(_np.sqrt(op_dim)))
assert(unitary_dim**2 == op_dim), \
"Model dimension must be a perfect square, %d is not" % op_dim
mdl_randomized = self.copy()
for opLabel, gate in self.operations.items():
randMat = scale * (rndm.randn(unitary_dim, unitary_dim)
+ 1j * rndm.randn(unitary_dim, unitary_dim))
randMat = _np.transpose(_np.conjugate(randMat)) + randMat
# make randMat Hermetian: (A_dag + A)^dag = (A_dag + A)
randUnitary = _scipy.linalg.expm(-1j * randMat)
randOp = _ot.unitary_to_superop(randUnitary, self.basis)
mdl_randomized.operations[opLabel] = _op.FullArbitraryOp(
_np.dot(randOp, gate))
#Note: this function does NOT randomize instruments
return mdl_randomized
def increase_dimension(self, new_dimension):
"""
Enlarge the dimension of this model.
Enlarge the spam vectors and operation matrices of model to a specified
dimension, and return the resulting inflated model. Spam vectors
are zero-padded and operation matrices are padded with 1's on the diagonal
and zeros on the off-diagonal (effectively padded by identity operation).
Parameters
----------
new_dimension : int
the dimension of the returned model. That is,
the returned model will have rho and E vectors that
have shape (new_dimension,1) and operation matrices with shape
(new_dimension,new_dimension)
Returns
-------
Model
the increased-dimension Model
"""
if isinstance(new_dimension, _statespace.StateSpace):
state_space = new_dimension
new_dimension = state_space.dim
else:
state_space = _statespace.default_space_for_dim(new_dimension)
curDim = self.state_space.dim
assert(new_dimension > curDim)
#For now, just create a dumb default state space labels and basis for the new model:
sslbls = [('L%d' % i,) for i in range(new_dimension)] # interpret as independent classical levels
dumb_basis = _DirectSumBasis([_BuiltinBasis('gm', 1)] * new_dimension,
name="Unknown") # - just act on diagonal density mx
new_model = ExplicitOpModel(sslbls, dumb_basis, "full", "auto", "auto", "auto",
self.preps._prefix, self.effects_prefix,
self.operations._prefix, self.povms._prefix,
self.instruments._prefix, self._sim.copy())
#new_model._dim = new_dimension # dim will be set when elements are added
#new_model.reset_basis() #FUTURE: maybe user can specify how increase is being done?
addedDim = new_dimension - curDim
vec_zeroPad = _np.zeros((addedDim, 1), 'd')
evotype = self.evotype
#Increase dimension of rhoVecs and EVecs by zero-padding
for lbl, rhoVec in self.preps.items():
assert(len(rhoVec) == curDim)
new_model.preps[lbl] = \
_state.FullState(_np.concatenate((rhoVec, vec_zeroPad)), evotype, state_space)
for lbl, povm in self.povms.items():
assert(povm.state_space.dim == curDim)
effects = [(elbl, _np.concatenate((EVec, vec_zeroPad)))
for elbl, EVec in povm.items()]
if isinstance(povm, _povm.TPPOVM):
new_model.povms[lbl] = _povm.TPPOVM(effects, evotype, state_space)
else:
new_model.povms[lbl] = _povm.UnconstrainedPOVM(effects, evotype, state_space) # everything else
#Increase dimension of gates by assuming they act as identity on additional (unknown) space
for opLabel, gate in self.operations.items():
assert(gate.shape == (curDim, curDim))
newOp = _np.zeros((new_dimension, new_dimension))
newOp[0:curDim, 0:curDim] = gate[:, :]
for i in range(curDim, new_dimension): newOp[i, i] = 1.0
new_model.operations[opLabel] = _op.FullArbitraryOp(newOp, evotype, state_space)
for instLabel, inst in self.instruments.items():
inst_ops = []
for outcomeLbl, gate in inst.items():
newOp = _np.zeros((new_dimension, new_dimension))
newOp[0:curDim, 0:curDim] = gate[:, :]
for i in range(curDim, new_dimension): newOp[i, i] = 1.0
inst_ops.append((outcomeLbl, _op.FullArbitraryOp(newOp, evotype, state_space)))
new_model.instruments[instLabel] = _instrument.Instrument(inst_ops, evotype, state_space)
if len(self.factories) > 0:
raise NotImplementedError("Changing dimension of models with factories is not supported yet!")
return new_model
def _decrease_dimension(self, new_dimension):
"""
Decrease the dimension of this model.
Shrink the spam vectors and operation matrices of model to a specified
dimension, and return the resulting model.
Parameters
----------
new_dimension : int
the dimension of the returned model. That is,
the returned model will have rho and E vectors that
have shape (new_dimension,1) and operation matrices with shape
(new_dimension,new_dimension)
Returns
-------
Model
the decreased-dimension Model
"""
if isinstance(new_dimension, _statespace.StateSpace):
state_space = new_dimension
new_dimension = state_space.dim
else:
state_space = _statespace.default_space_for_dim(new_dimension)
curDim = self.state_space.dim
assert(new_dimension < curDim)
#For now, just create a dumb default state space labels and basis for the new model:
sslbls = [('L%d' % i,) for i in range(new_dimension)] # interpret as independent classical levels
dumb_basis = _DirectSumBasis([_BuiltinBasis('gm', 1)] * new_dimension,
name="Unknown") # - just act on diagonal density mx
new_model = ExplicitOpModel(sslbls, dumb_basis, "full", "auto", "auto", "auto",
self.preps._prefix, self.effects_prefix,
self.operations._prefix, self.povms._prefix,
self.instruments._prefix, self._sim.copy())
#new_model._dim = new_dimension # dim will be set when elements are added
#new_model.reset_basis() #FUTURE: maybe user can specify how decrease is being done?
#Decrease dimension of rhoVecs and EVecs by truncation
for lbl, rhoVec in self.preps.items():
assert(len(rhoVec) == curDim)
new_model.preps[lbl] = \
_state.FullState(rhoVec[0:new_dimension, :], self.evotype, state_space)
for lbl, povm in self.povms.items():
assert(povm.state_space.dim == curDim)
effects = [(elbl, EVec[0:new_dimension, :]) for elbl, EVec in povm.items()]
if isinstance(povm, _povm.TPPOVM):
new_model.povms[lbl] = _povm.TPPOVM(effects, self.evotype, state_space)
else:
new_model.povms[lbl] = _povm.UnconstrainedPOVM(effects, self.evotype, state_space) # everything else
#Decrease dimension of gates by truncation
for opLabel, gate in self.operations.items():
assert(gate.shape == (curDim, curDim))
newOp = _np.zeros((new_dimension, new_dimension))
newOp[:, :] = gate[0:new_dimension, 0:new_dimension]
new_model.operations[opLabel] = _op.FullArbitraryOp(newOp, self.evotype, state_space)
for instLabel, inst in self.instruments.items():
inst_ops = []
for outcomeLbl, gate in inst.items():
newOp = _np.zeros((new_dimension, new_dimension))
newOp[:, :] = gate[0:new_dimension, 0:new_dimension]
inst_ops.append((outcomeLbl, _op.FullArbitraryOp(newOp, self.evotype, state_space)))
new_model.instruments[instLabel] = _instrument.Instrument(inst_ops, self.evotype, state_space)
if len(self.factories) > 0:
raise NotImplementedError("Changing dimension of models with factories is not supported yet!")
return new_model
def kick(self, absmag=1.0, bias=0, seed=None):
"""
"Kick" this model by adding to each gate a random matrix.
The random matrices have values uniformly distributed in the interval
[bias-absmag,bias+absmag].
Parameters
----------
absmag : float, optional
The maximum magnitude of the entries in the "kick" matrix
relative to bias.
bias : float, optional
The bias of the entries in the "kick" matrix.
seed : int, optional
if not None, seed numpy's random number generator with this value
before generating random depolarizations.
Returns
-------
Model
the kicked model.
"""
kicked_gs = self.copy()
rndm = _np.random.RandomState(seed)
for opLabel, gate in self.operations.items():
delta = absmag * 2.0 * (rndm.random_sample(gate.shape) - 0.5) + bias
kicked_gs.operations[opLabel] = _op.FullArbitraryOp(
kicked_gs.operations[opLabel] + delta)
#Note: does not alter intruments!
return kicked_gs
def compute_clifford_symplectic_reps(self, oplabel_filter=None):
"""
Constructs a dictionary of the symplectic representations for all the Clifford gates in this model.
Non-:class:`StaticCliffordOp` gates will be ignored and their entries omitted
from the returned dictionary.
Parameters
----------
oplabel_filter : iterable, optional
A list, tuple, or set of operation labels whose symplectic
representations should be returned (if they exist).
Returns
-------
dict
keys are operation labels and/or just the root names of gates
(without any state space indices/labels). Values are
`(symplectic_matrix, phase_vector)` tuples.
"""
gfilter = set(oplabel_filter) if oplabel_filter is not None \
else None
srep_dict = {}
for gl, gate in self.operations.items():
if (gfilter is not None) and (gl not in gfilter): continue
if isinstance(gate, _op.EmbeddedOp):
assert(isinstance(gate.embedded_op, _op.StaticCliffordOp)), \
"EmbeddedClifforGate contains a non-StaticCliffordOp!"
lbl = gl.name # strip state space labels off since this is a
# symplectic rep for the *embedded* gate
srep = (gate.embedded_op.smatrix, gate.embedded_op.svector)
elif isinstance(gate, _op.StaticCliffordOp):
lbl = gl.name
srep = (gate.smatrix, gate.svector)
else:
lbl = srep = None
if srep:
if lbl in srep_dict:
assert(srep == srep_dict[lbl]), \
"Inconsistent symplectic reps for %s label!" % lbl
else:
srep_dict[lbl] = srep
return srep_dict
@_deprecated_fn
def print_info(self):
"""
Print to stdout relevant information about this model.
This information includes the Choi matrices and their eigenvalues.
Returns
-------
None
"""
print(self)
print("\n")
print("Basis = ", self.basis.name)
print("Choi Matrices:")
for (label, gate) in self.operations.items():
print(("Choi(%s) in pauli basis = \n" % label,
_mt.mx_to_string_complex(_jt.jamiolkowski_iso(gate))))
print((" --eigenvals = ", sorted(
[ev.real for ev in _np.linalg.eigvals(
_jt.jamiolkowski_iso(gate))]), "\n"))
print(("Sum of negative Choi eigenvalues = ", _jt.sum_of_negative_choi_eigenvalues(self)))
def _effect_labels_for_povm(self, povm_lbl):
"""
Gets the effect labels corresponding to the possible outcomes of POVM label `povm_lbl`.
Parameters
----------
povm_lbl : Label
POVM label.
Returns
-------
list
A list of strings which label the POVM outcomes.
"""
return tuple(self.povms[povm_lbl].keys())
def _member_labels_for_instrument(self, inst_lbl):
"""
Gets the member labels corresponding to the possible outcomes of the instrument labeled by `inst_lbl`.
Parameters
----------
inst_lbl : Label
Instrument label.
Returns
-------
list
A list of strings which label the instrument members.
"""
return tuple(self.instruments[inst_lbl].keys())
def _reinit_opcaches(self):
self._opcaches.clear()
# Add expanded instrument and POVM operations to cache so these are accessible to circuit calcs
simplified_effects = _collections.OrderedDict()
for povm_lbl, povm in self.povms.items():
for k, e in povm.simplify_effects(povm_lbl).items():
simplified_effects[k] = e
simplified_ops = _collections.OrderedDict()
for inst_lbl, inst in self.instruments.items():
for k, g in inst.simplify_operations(inst_lbl).items():
simplified_ops[k] = g
self._opcaches['povm-layers'] = simplified_effects
self._opcaches['op-layers'] = simplified_ops
def create_processor_spec(self, qudit_labels='auto'):
"""
Create a processor specification from this model with the given qudit labels.
Currently this only works for models on qudits.
Parameters
----------
qudit_labels : tuple or `"auto"`, optional
A tuple of qudit labels, e.g. ('Q0', 'Q1') or (0, 1). `"auto"`
uses the labels in this model's state space labels.
Returns
-------
QuditProcessorSpec or QubitProcessorSpec
"""
from pygsti.processors import QubitProcessorSpec as _QubitProcessorSpec
from pygsti.processors import QuditProcessorSpec as _QuditProcessorSpec
#go through ops, building up availability and unitaries, then create procesor spec...
nqudits = self.state_space.num_qudits
gate_unitaries = _collections.OrderedDict()
all_sslbls = self.state_space.tensor_product_block_labels(0)
all_udims = [self.state_space.label_udimension(lbl) for lbl in all_sslbls]
availability = {}
def extract_unitary(Umx, U_sslbls, extracted_sslbls):
if extracted_sslbls is None: return Umx # no extraction to be done
extracted_sslbls = list(extracted_sslbls)
extracted_indices = [U_sslbls.index(lbl) for lbl in extracted_sslbls]
extracted_udims = [self.state_space.label_udimension(lbl) for lbl in extracted_sslbls]
# can assume all lbls are qudits, so increment associated with qudit k is (2^(N-1-k) for qubits):
all_inc = _np.flip(_np.cumprod(list(reversed(all_udims[1:] + [1]))))
extracted_inc = all_inc[extracted_indices]
# assume this is a kronecker product (check this in FUTURE?), so just fill extracted
# unitary by fixing all non-extracted qudits (assumed identity-action on these) to 0
# and looping over extracted ones:
U_extracted = _np.zeros((_np.product(extracted_udims), _np.product(extracted_udims)), complex)
for ii, itup in enumerate(_itertools.product(*[range(ud) for ud in extracted_udims])):
i = _np.dot(extracted_inc, itup)
for jj, jtup in enumerate(_itertools.product(*[range(ud) for ud in extracted_udims])):
j = _np.dot(extracted_inc, jtup)
U_extracted[ii, jj] = Umx[i, j]
return U_extracted
def add_availability(opkey, op):
if opkey == _Label(()) or opkey.is_simple():
if opkey == _Label(()): # special case: turn empty tuple labels into "{idle}" gate in processor spec
gn = "{idle}"
sslbls = None
elif opkey.is_simple():
gn = opkey.name
sslbls = opkey.sslbls
#if sslbls is not None:
# observed_sslbls.update(sslbls)
if gn not in gate_unitaries or gate_unitaries[gn] is None:
U = _ot.superop_to_unitary(op.to_dense('HilbertSchmidt'), self.basis) \
if (op is not None) else None # U == None indicates "unknown, up until this point"
Ulocal = extract_unitary(U, all_sslbls, sslbls)
gate_unitaries[gn] = Ulocal
if gn in availability:
if sslbls not in availability[gn]:
availability[gn].append(sslbls)
else:
availability[gn] = [sslbls]
elif sslbls not in availability[gn]:
availability[gn].append(sslbls)
else: # a COMPOUND label with components => process each component separately
for component in opkey.components:
add_availability(component, None) # recursive call - the reason we need this to be a function!
#observed_sslbls = set()
for opkey, op in self.operations.items(): # TODO: need to deal with special () idle label
add_availability(opkey, op)
#Check that there aren't any undetermined unitaries
unknown_unitaries = [k for k, v in gate_unitaries.items() if v is None]
if len(unknown_unitaries) > 0:
raise ValueError("Unitary not specfied for %s gate(s)!" % str(unknown_unitaries))
if qudit_labels == 'auto':
qudit_labels = self.state_space.tensor_product_block_labels(0)
#OR: qudit_labels = self.state_space.qudit_labels # only works for a QuditSpace
#OR: qudit_labels = self.state_space.qubit_labels # only works for a QubitSpace
#OR: qubit_labels = tuple(sorted(observed_sslbls))
if qudit_labels is None: # special case of legacy explicit models where all gates have availability [None]
qudit_labels = tuple(range(nqudits))
assert(len(qudit_labels) == nqudits), \
"Length of `qudit_labels` must equal %d (not %d)!" % (nqudits, len(qudit_labels))
if all([udim == 2 for udim in all_udims]):
return _QubitProcessorSpec(nqudits, list(gate_unitaries.keys()), gate_unitaries, availability,
qubit_labels=qudit_labels)
else:
return _QuditProcessorSpec(qudit_labels, all_udims, list(gate_unitaries.keys()), gate_unitaries,
availability)
def create_modelmember_graph(self):
return _MMGraph({
'preps': self.preps,
'povms': self.povms,
'operations': self.operations,
'instruments': self.instruments,
'factories': self.factories,
})
def _to_nice_serialization(self):
state = super()._to_nice_serialization()
state.update({'basis': self.basis.to_nice_serialization(),
'default_gate_type': self.operations.default_param,
'default_prep_type': self.preps.default_param,
'default_povm_type': self.povms.default_param,
'default_instrument_type': self.instruments.default_param,
'prep_prefix': self.preps._prefix,
'effect_prefix': self.effects_prefix,
'gate_prefix': self.operations._prefix,
'povm_prefix': self.povms._prefix,
'instrument_prefix': self.instruments._prefix,
'evotype': str(self.evotype), # TODO or serialize?
'simulator': self.sim.to_nice_serialization(),
'default_gauge_group': (self.default_gauge_group.to_nice_serialization()
if (self.default_gauge_group is not None) else None)
})
mmgraph = self.create_modelmember_graph()
state['modelmembers'] = mmgraph.create_serialization_dict()
return state
@classmethod
def _from_nice_serialization(cls, state):
state_space = _statespace.StateSpace.from_nice_serialization(state['state_space'])
basis = _Basis.from_nice_serialization(state['basis'])
modelmembers = _MMGraph.load_modelmembers_from_serialization_dict(state['modelmembers'])
simulator = _FSim.from_nice_serialization(state['simulator'])
default_gauge_group = _GaugeGroup.from_nice_serialization(state['default_gauge_group']) \
if (state['default_gauge_group'] is not None) else None
mdl = cls(state_space, basis, state['default_gate_type'],
state['default_prep_type'], state['default_povm_type'],
state['default_instrument_type'], state['prep_prefix'], state['effect_prefix'],
state['gate_prefix'], state['povm_prefix'], state['instrument_prefix'],
simulator, state['evotype'])
mdl.preps.update(modelmembers.get('preps', {}))
mdl.povms.update(modelmembers.get('povms', {}))
mdl.operations.update(modelmembers.get('operations', {}))
mdl.instruments.update(modelmembers.get('instruments', {}))
mdl.factories.update(modelmembers.get('factories', {}))
mdl._clean_paramvec()
mdl.default_gauge_group = default_gauge_group
return mdl
def errorgen_coefficients(self, normalized_elem_gens=True):
"""TODO: docstring - returns a nested dict containing all the error generator coefficients for all
the operations in this model. """
if not normalized_elem_gens:
def rescale(coeffs):
""" HACK: rescales errorgen coefficients for normalized-Pauli-basis elementary error gens
to be coefficients for the usual un-normalied-Pauli-basis elementary gens. This
is only needed in the Hamiltonian case, as the non-ham "elementary" gen has a
factor of d2 baked into it.
"""
d2 = _np.sqrt(self.dim); d = _np.sqrt(d2)
return {lbl: (val / d if lbl.errorgen_type == 'H' else val) for lbl, val in coeffs.items()}
op_coeffs = {op_label: rescale(self.operations[op_label].errorgen_coefficients())
for op_label in self.operations}
op_coeffs.update({prep_label: rescale(self.preps[prep_label].errorgen_coefficients())
for prep_label in self.preps})
op_coeffs.update({povm_label: rescale(self.povms[povm_label].errorgen_coefficients())
for povm_label in self.povms})
else:
op_coeffs = {op_label: self.operations[op_label].errorgen_coefficients()
for op_label in self.operations}
op_coeffs.update({prep_label: self.preps[prep_label].errorgen_coefficients()
for prep_label in self.preps})
op_coeffs.update({povm_label: self.povms[povm_label].errorgen_coefficients()
for povm_label in self.povms})
return op_coeffs
def _add_reparameterization(self, primitive_op_labels, fogi_dirs, errgenset_space_labels):
# Create re-parameterization map from "fogi" parameters to old/existing model parameters
# Note: fogi_coeffs = dot(ham_fogi_dirs.T, errorgen_vec)
# errorgen_vec = dot(pinv(ham_fogi_dirs.T), fogi_coeffs)
# Ingredients:
# MX : fogi_coeffs -> op_coeffs e.g. pinv(ham_fogi_dirs.T)
# deriv = op_params -> op_coeffs e.g. d(op_coeffs)/d(op_params) implemented by ops
# fogi_deriv = d(fogi_coeffs)/d(fogi_params) : fogi_params -> fogi_coeffs - near I (these are
# nearly identical apart from some squaring?)
#
# so: d(op_params) = inv(Deriv) * MX * fogi_deriv * d(fogi_params)
# d(op_params)/d(fogi_params) = inv(Deriv) * MX * fogi_deriv
# To first order: op_params = (inv(Deriv) * MX * fogi_deriv) * fogi_params := F * fogi_params
# (fogi_params == "model params")
# To compute F,
# -let fogi_deriv == I (shape nFogi,nFogi)
# -MX is shape (nFullSpace, nFogi) == pinv(fogi_dirs.T)
# -deriv is shape (nOpCoeffs, nOpParams), inv(deriv) = (nOpParams, nOpCoeffs)
# - need Deriv of shape (nOpParams, nFullSpace) - build by placing deriv mxs in gpindices rows and
# correct cols). We'll require that deriv be square (op has same #params as coeffs) and is *invertible*
# (raise error otherwise). Then we can construct inv(Deriv) by placing inv(deriv) into inv(Deriv) by
# rows->gpindices and cols->elem_label-match.
nOpParams = self.num_params # the number of parameters *before* any reparameterization. TODO: better way?
errgenset_space_labels_indx = _collections.OrderedDict(
[(lbl, i) for i, lbl in enumerate(errgenset_space_labels)])
invDeriv = _np.zeros((nOpParams, fogi_dirs.shape[0]), 'd')
used_param_indices = set()
for op_label in primitive_op_labels:
#TODO: update this conditional to something more robust (same conditiona in fogitools.py too)
if isinstance(op_label, str) and op_label.startswith('rho'):
op = self.preps[op_label]
elif isinstance(op_label, str) and op_label.startswith('M'):
op = self.povms[op_label]
else:
op = self.operations[op_label]
lbls = op.errorgen_coefficient_labels() # length num_coeffs
param_indices = op.gpindices_as_array() # length num_params
deriv = op.errorgen_coefficients_array_deriv_wrt_params() # shape == (num_coeffs, num_params)
inv_deriv = _np.linalg.inv(deriv)
used_param_indices.update(param_indices)
for i, lbl in enumerate(lbls):
invDeriv[param_indices, errgenset_space_labels_indx[(op_label, lbl)]] = inv_deriv[:, i]
unused_param_indices = sorted(list(set(range(nOpParams)) - used_param_indices))
prefix_mx = _np.zeros((nOpParams, len(unused_param_indices)), 'd')
for j, indx in enumerate(unused_param_indices):
prefix_mx[indx, j] = 1.0
F = _np.dot(invDeriv, _np.linalg.pinv(fogi_dirs.T))
F = _np.concatenate((prefix_mx, F), axis=1)
#Not sure if these are needed: "coefficients" have names, but maybe "parameters" shoudn't?
#fogi_param_names = ["P%d" % i for i in range(len(unused_param_indices))] \
# + ham_fogi_vec_names + other_fogi_vec_names
return _LinearInterposer(F)
def _format_gauge_action_matrix(self, mx, op, reduce_to_model_space, row_basis, op_gauge_basis,
create_complete_basis_fn):
from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis as _CompleteElementaryErrorgenBasis
from pygsti.baseobjs.errorgenbasis import ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis
from pygsti.baseobjs.errorgenspace import ErrorgenSpace as _ErrorgenSpace
import scipy.sparse as _sps
#Next:
# - make linear combos of basis els so that all (nonzero) disallowed rows become zero, i.e.,
# find nullspace of the matrix formed from the (nonzero) disallowed rows of mx.
# - promote op_gauge_basis => op_gauge_space (linear combos of the same elementary basis - op_gauge_basis)
all_sslbls = self.state_space.tensor_product_block_labels(0)
if reduce_to_model_space:
allowed_lbls = op.errorgen_coefficient_labels()
allowed_lbls_set = set(allowed_lbls)
allowed_row_basis = _ExplicitElementaryErrorgenBasis(self.state_space, allowed_lbls, basis1q=None)
disallowed_indices = [i for i, lbl in enumerate(row_basis.labels) if lbl not in allowed_lbls_set]
if len(disallowed_indices) > 0:
disallowed_rows = mx[disallowed_indices, :] # a sparse (lil) matrix
allowed_gauge_linear_combos = _mt.nice_nullspace(disallowed_rows.toarray(), tol=1e-4) # DENSE for now
mx = _sps.csr_matrix(mx.dot(allowed_gauge_linear_combos)) # dot sometimes/always returns dense array
op_gauge_space = _ErrorgenSpace(allowed_gauge_linear_combos, op_gauge_basis) # DENSE mxs in eg-spaces
#FOGI DEBUG: print("DEBUG => mx reduced to ", mx.shape)
else:
op_gauge_space = _ErrorgenSpace(_np.identity(len(op_gauge_basis), 'd'), op_gauge_basis)
else:
allowed_row_basis = create_complete_basis_fn(all_sslbls)
op_gauge_space = _ErrorgenSpace(_np.identity(len(op_gauge_basis), 'd'),
op_gauge_basis)
# Note: above, do we need to store identity? could we just use the basis as a space here? or 'None'?
# "reshape" mx so rows correspond to allowed_row_basis (the op's allowed labels)
# (maybe make this into a subroutine?)
assert(_sps.isspmatrix_csr(mx))
data = []; col_indices = []; rowptr = [0] # build up a CSR matrix manually
allowed_lbls_set = set(allowed_row_basis.labels)
allowed_row_indices = [(i, allowed_row_basis.label_index(lbl))
for i, lbl in enumerate(row_basis.labels) if lbl in allowed_lbls_set]
for i, new_i in sorted(allowed_row_indices, key=lambda x: x[1]):
# transfer i-th row of mx (whose rose are in row_basis) to new_i-th row of new mx
# - first increment rowptr as needed
while len(rowptr) <= new_i:
rowptr.append(len(data))
# - then add data
col_indices.extend(mx.indices[mx.indptr[i]:mx.indptr[i + 1]])
data.extend(mx.data[mx.indptr[i]:mx.indptr[i + 1]])
rowptr.append(len(data))
while len(rowptr) <= len(allowed_row_basis): # fill in rowptr for any (empty) ending rows
rowptr.append(len(data))
allowed_rowspace_mx = _sps.csr_matrix((data, col_indices, rowptr),
shape=(len(allowed_row_basis), mx.shape[1]), dtype=mx.dtype)
return allowed_rowspace_mx, allowed_row_basis, op_gauge_space
def setup_fogi(self, initial_gauge_basis, create_complete_basis_fn=None,
op_label_abbrevs=None, reparameterize=False, reduce_to_model_space=True,
dependent_fogi_action='drop', include_spam=True):
from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis as _CompleteElementaryErrorgenBasis
from pygsti.baseobjs.errorgenbasis import ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis
from pygsti.baseobjs.errorgenspace import ErrorgenSpace as _ErrorgenSpace
import scipy.sparse as _sps
# ExplicitOpModel-specific - and assumes model's ops have specific structure (see extract_std_target*) !!
primitive_op_labels = list(self.operations.keys())
primitive_prep_labels = list(self.preps.keys()) if include_spam else []
primitive_povm_labels = list(self.povms.keys()) if include_spam else []
# "initial" gauge space is the space of error generators initially considered as
# gauge transformations. It can be reduced by the errors allowed on operations (by
# their type and support).
def extract_std_target_mx(op):
# TODO: more general decomposition of op - here it must be Composed(UnitaryOp, ExpErrorGen)
# or just ExpErrorGen
if isinstance(op, _op.ExpErrorgenOp): # assume just an identity op
U = _np.identity(op.state_space.dim, 'd')
elif isinstance(op, _op.ComposedOp): # assume first element gives unitary
op_mx = op.factorops[0].to_dense() # assumes a LindbladOp and low num qubits
nQubits = int(round(_np.log(op_mx.shape[0]) / _np.log(4))); assert(op_mx.shape[0] == 4**nQubits)
tensorprod_std_basis = _Basis.cast('std', [(4,) * nQubits])
U = _bt.change_basis(op_mx, self.basis, tensorprod_std_basis) # 'std' is incorrect
else:
raise ValueError("Could not extract target matrix from %s op!" % str(type(op)))
return U
def extract_std_target_vec(v):
#TODO - make more sophisticated...
dim = v.state_space.dim
nQubits = int(round(_np.log(dim) / _np.log(4))); assert(dim == 4**nQubits)
tensorprod_std_basis = _Basis.cast('std', [(4,) * nQubits])
v = _bt.change_basis(v.to_dense(), self.basis, tensorprod_std_basis) # 'std' is incorrect
return v
if create_complete_basis_fn is None:
assert(isinstance(initial_gauge_basis, _CompleteElementaryErrorgenBasis)), \
("Must supply a custom `create_complete_basis_fn` if initial gauge basis is not a complete basis!")
def create_complete_basis_fn(target_sslbls):
return initial_gauge_basis.create_subbasis(target_sslbls, retain_max_weights=False)
# get gauge action matrices on the initial space
gauge_action_matrices = _collections.OrderedDict()
gauge_action_gauge_spaces = _collections.OrderedDict()
errorgen_coefficient_labels = _collections.OrderedDict() # by operation
for op_label in primitive_op_labels: # Note: "ga" stands for "gauge action" in variable names below
op = self.operations[op_label]
U = extract_std_target_mx(op)
# below: special logic for, e.g., 2Q explicit models with 2Q gate matched with Gx:0 label
target_sslbls = op_label.sslbls if (op_label.sslbls is not None and U.shape[0] < self.state_space.dim) \
else self.state_space.tensor_product_block_labels(0)
op_gauge_basis = initial_gauge_basis.create_subbasis(target_sslbls) # gauge space lbls that overlap target
# Note: can assume gauge action is zero (U acts as identity) on all basis elements not in op_gauge_basis
initial_row_basis = create_complete_basis_fn(target_sslbls)
#support_sslbls, gauge_errgen_basis = get_overlapping_labels(gauge_errgen_space_labels, target_sslbls)
#FOGI DEBUG print("DEBUG -- ", op_label)
mx, row_basis = _fogit.first_order_gauge_action_matrix(U, target_sslbls, self.state_space,
op_gauge_basis, initial_row_basis)
#FOGI DEBUG print("DEBUG => mx is ", mx.shape)
# Note: mx is a sparse lil matrix
# mx cols => op_gauge_basis, mx rows => row_basis, as zero rows have already been removed
# (DONE: - remove all all-zero rows from mx (and corresponding basis labels) )
# Note: row_basis is a simple subset of initial_row_basis
allowed_rowspace_mx, allowed_row_basis, op_gauge_space = \
self._format_gauge_action_matrix(mx, op, reduce_to_model_space, row_basis, op_gauge_basis,
create_complete_basis_fn)
errorgen_coefficient_labels[op_label] = allowed_row_basis.labels
gauge_action_matrices[op_label] = allowed_rowspace_mx
gauge_action_gauge_spaces[op_label] = op_gauge_space
#FOGI DEBUG print("DEBUG => final allowed_rowspace_mx shape =", allowed_rowspace_mx.shape)
# Similar for SPAM
for prep_label in primitive_prep_labels:
prep = self.preps[prep_label]
v = extract_std_target_vec(prep)
target_sslbls = prep_label.sslbls if (prep_label.sslbls is not None and v.shape[0] < self.state_space.dim) \
else self.state_space.tensor_product_block_labels(0)
op_gauge_basis = initial_gauge_basis.create_subbasis(target_sslbls) # gauge space lbls that overlap target
initial_row_basis = create_complete_basis_fn(target_sslbls)
mx, row_basis = _fogit.first_order_gauge_action_matrix_for_prep(v, target_sslbls, self.state_space,
op_gauge_basis, initial_row_basis)
allowed_rowspace_mx, allowed_row_basis, op_gauge_space = \
self._format_gauge_action_matrix(mx, prep, reduce_to_model_space, row_basis, op_gauge_basis,
create_complete_basis_fn)
errorgen_coefficient_labels[prep_label] = allowed_row_basis.labels
gauge_action_matrices[prep_label] = allowed_rowspace_mx
gauge_action_gauge_spaces[prep_label] = op_gauge_space
for povm_label in primitive_povm_labels:
povm = self.povms[povm_label]
vecs = [extract_std_target_vec(effect) for effect in povm.values()]
target_sslbls = povm_label.sslbls if (povm_label.sslbls is not None
and vecs[0].shape[0] < self.state_space.dim) \
else self.state_space.tensor_product_block_labels(0)
op_gauge_basis = initial_gauge_basis.create_subbasis(target_sslbls) # gauge space lbls that overlap target
initial_row_basis = create_complete_basis_fn(target_sslbls)
mx, row_basis = _fogit.first_order_gauge_action_matrix_for_povm(vecs, target_sslbls, self.state_space,
op_gauge_basis, initial_row_basis)
allowed_rowspace_mx, allowed_row_basis, op_gauge_space = \
self._format_gauge_action_matrix(mx, povm, reduce_to_model_space, row_basis, op_gauge_basis,
create_complete_basis_fn)
errorgen_coefficient_labels[povm_label] = allowed_row_basis.labels
gauge_action_matrices[povm_label] = allowed_rowspace_mx
gauge_action_gauge_spaces[povm_label] = op_gauge_space
norm_order = "auto" # NOTE - should be 1 for normalizing 'S' quantities and 2 for 'H',
# so 'auto' utilizes intelligence within FOGIStore
self.fogi_store = _FOGIStore(gauge_action_matrices, gauge_action_gauge_spaces,
errorgen_coefficient_labels, # gauge_errgen_space_labels,
op_label_abbrevs, reduce_to_model_space, dependent_fogi_action,
norm_order=norm_order)
if reparameterize:
self.param_interposer = self._add_reparameterization(
primitive_op_labels + primitive_prep_labels + primitive_povm_labels,
self.fogi_store.fogi_directions.toarray(), # DENSE now (leave sparse in FUTURE?)
self.fogi_store.errorgen_space_op_elem_labels)
def fogi_errorgen_component_labels(self, include_fogv=False, typ='normal'):
labels = self.fogi_store.fogi_errorgen_direction_labels(typ)
if include_fogv:
labels += self.fogi_store.fogv_errorgen_direction_labels(typ)
return labels
def fogi_errorgen_components_array(self, include_fogv=False, normalized_elem_gens=True):
op_coeffs = self.errorgen_coefficients(normalized_elem_gens)
if include_fogv:
fogi_coeffs, fogv_coeffs = self.fogi_store.opcoeffs_to_fogiv_components_array(op_coeffs)
return _np.concatenate((fogi_coeffs, fogv_coeffs))
else:
return self.fogi_store.opcoeffs_to_fogi_components_array(op_coeffs)
def set_fogi_errorgen_components_array(self, components, include_fogv=False, normalized_elem_gens=True,
truncate=False):
fogi, fogv = self.fogi_store.num_fogi_directions, self.fogi_store.num_fogv_directions
if include_fogv:
n = fogi
fogi_coeffs, fogv_coeffs = components[0:fogi], components[n: n + fogv]
op_coeffs = self.fogi_store.fogiv_components_array_to_opcoeffs(fogi_coeffs, fogv_coeffs)
else:
fogi_coeffs = components[0:fogi]
op_coeffs = self.fogi_store.fogi_components_array_to_opcoeffs(fogi_coeffs)
if not normalized_elem_gens:
def inv_rescale(coeffs): # the inverse of the rescaling applied in fogi_errorgen_components_array
d2 = _np.sqrt(self.dim); d = _np.sqrt(d2)
return {lbl: (val * d if lbl.errorgen_type == 'H' else val) for lbl, val in coeffs.items()}
else:
def inv_rescale(coeffs): return coeffs
for op_label, coeff_dict in op_coeffs.items():
#TODO: update this conditional to something more robust (same conditiona in fogitools.py too)
if isinstance(op_label, str) and op_label.startswith('rho'):
self.preps[op_label].set_errorgen_coefficients(inv_rescale(coeff_dict), truncate=truncate)
elif isinstance(op_label, str) and op_label.startswith('M'):
self.povms[op_label].set_errorgen_coefficients(inv_rescale(coeff_dict), truncate=truncate)
else:
self.operations[op_label].set_errorgen_coefficients(inv_rescale(coeff_dict), truncate=truncate)
def fogi_errorgen_vector(self, normalized_elem_gens=False):
"""
Constructs a vector from all the error generator coefficients involved in the FOGI analysis of this model.
Parameters
----------
normalized_elem_gens : bool, optional
Whether or not coefficients correspond to elementary error generators
constructed from *normalized* Pauli matrices or not.
Returns
-------
numpy.ndarray
"""
d = self.errorgen_coefficients(normalized_elem_gens=normalized_elem_gens)
errvec = _np.zeros(self.fogi_store.fogi_directions.shape[0], 'd')
for op_lbl in self.fogi_store.primitive_op_labels:
errdict = d[op_lbl]
elem_errgen_lbls = self.fogi_store.elem_errorgen_labels_by_op[op_lbl]
elem_errgen_indices = _slct.indices(self.fogi_store.op_errorgen_indices[op_lbl])
for elemgen_lbl, i in zip(elem_errgen_lbls, elem_errgen_indices):
errvec[i] = errdict.get(elemgen_lbl, 0.0)
return errvec
def _fogi_errorgen_vector_projection(self, space, normalized_elem_gens=False):
""" A helper function that projects self.errorgen_vector onto the space spanned by the columns of `space` """
errvec = self.fogi_errorgen_vector(normalized_elem_gens)
Pspace = space @ _np.linalg.pinv(space) # construct projector
return Pspace @ errvec # projected errvec
# create map parameter indices <=> fogi_vector_indices (for each fogi store)
def _create_model_parameter_to_fogi_errorgen_space_map(self):
fogi_store = self.fogi_store
num_elem_errgens, num_fogi_vecs = fogi_store.fogi_directions.shape
param_to_fogi_errgen_space_mx = _np.zeros((num_elem_errgens, self.num_params), 'd')
for op_label in fogi_store.primitive_op_labels:
elem_errgen_lbls = fogi_store.elem_errorgen_labels_by_op[op_label]
fogi_errgen_indices = _slct.indices(fogi_store.op_errorgen_indices[op_label])
assert(len(fogi_errgen_indices) == len(elem_errgen_lbls))
op = self.operations[op_label]
coeff_index_lookup = {elem_lbl: i for i, elem_lbl in enumerate(op.errorgen_coefficient_labels())}
coeff_indices = [coeff_index_lookup.get(elem_lbl, None) for elem_lbl in elem_errgen_lbls]
# For our particularly simple parameterization (H+s) op parameter indices == coeff indices:
assert(_np.allclose(op.errorgen_coefficients_array_deriv_wrt_params(), _np.identity(op.num_params))), \
"Currently only supported for simple parameterizations where op parameter indices == coeff indices"
op_param_indices = coeff_indices
gpindices = _slct.indices(op.gpindices)
mdl_param_indices = [(gpindices[i] if (i is not None) else None)
for i in op_param_indices]
for i_errgen, i_param in zip(fogi_errgen_indices, mdl_param_indices):
if i_param is not None:
param_to_fogi_errgen_space_mx[i_errgen, i_param] = 1.0
return param_to_fogi_errgen_space_mx
def fogi_contribution(self, op_label, error_type='H', intrinsic_or_relational='intrinsic',
target='all', hessian_for_errorbars=None):
"""
Computes a contribution to the FOGI error on a single gate.
This method is used when partitioning the (FOGI) error on a gate in
various ways, based on the error type, whether the error is intrinsic
or relational, and the upon the error support.
Parameters
----------
op_label : Label
The operation to compute a contribution for.
error_type : {'H', 'S', 'fogi_total_error', 'fogi_infidelity'}
The type of errors to include in the partition. `'H'` means Hamiltonian
and `'S'` means Pauli stochastic. There are two options for including
*both* H and S errors: `'fogi_total_error'` adds the Hamiltonian errors
linearly with the Pauli tochastic errors, similar to the diamond distance;
`'fogi_infidelity'` adds the Hamiltonian errors in quadrature to the linear
sum of Pauli stochastic errors, similar to the entanglement or average gate
infidelity.
intrinsic_or_relational : {"intrinsic", "relational", "all"}
Restrict to intrinsic or relational errors (or not, using `"all"`).
target : tuple or "all"
A tuple of state space (qubit) labels to restrict to, e.g., `('Q0','Q1')`.
Note that including multiple labels selects only those quantities that
target *all* the labels. The special `"all"` value includes quantities
on all targets (no restriction).
hessian_for_errorbars : numpy.ndarray, optional
If not `None`, a hessian matrix for this model (with shape `(Np, Np)`
where `Np == self.num_params`, the number of model paramters) that is
used to compute and return 1-sigma error bars.
Returns
-------
value : float
The value of the requested contribution.
errorbar : float
The 1-sigma error bar, returned *only* if `hessian_for_errorbars` is given.
"""
if error_type in ('H', 'S'):
space = self.fogi_store.create_fogi_aggregate_single_op_space(op_label, error_type,
intrinsic_or_relational, target)
return self._fogi_contribution_single_type(error_type, space, hessian_for_errorbars)
elif error_type in ('fogi_total_error', 'fogi_infidelity'):
Hspace = self.fogi_store.create_fogi_aggregate_single_op_space(
op_label, 'H', intrinsic_or_relational, target)
Sspace = self.fogi_store.create_fogi_aggregate_single_op_space(
op_label, 'S', intrinsic_or_relational, target)
values = self._fogi_contribution_combined_HS_types(Hspace, Sspace, hessian_for_errorbars)
# (total, infidelity) if hessian is None otherwise (total, total_eb, infidelity, infidelity_eb)
if error_type == 'fogi_total_error':
return values[0] if (hessian_for_errorbars is None) else (values[0], values[1])
else: # error_type == 'fogi_infidelity'
return values[1] if (hessian_for_errorbars is None) else (values[2], values[3])
else:
raise ValueError("Invalid error type: '%s'" % str(error_type))
def _fogi_contribution_single_type(self, errorgen_type, space, hessian=None):
"""
Helper function to compute fogi contribution for a single error generator type,
where aggregation method is unambiguous.
Note: `space` should be a fogi-errgen-space subspace.
"""
fogi_store = self.fogi_store
proj_errvec = self._fogi_errorgen_vector_projection(space, normalized_elem_gens=False)
if errorgen_type == 'H':
val = _np.linalg.norm(proj_errvec)
elif errorgen_type == 'S':
val = sum(proj_errvec)
else:
raise ValueError("Invalid `errorgen_type` '%s' - must be 'H' or 'S'!" % str(errorgen_type))
val = _np.real_if_close(val)
if abs(val) < 1e-10: val = 0.0
if hessian is not None:
if space.size == 0: # special case
errbar = 0.0
else:
T = self._create_model_parameter_to_fogi_errorgen_space_map()
H_errgen_space = T @ hessian @ T.T
errgen_space_to_fogi = fogi_store.fogi_directions.toarray().T
pinv_espace_to_fogi = _np.linalg.pinv(errgen_space_to_fogi)
H_fogi = errgen_space_to_fogi @ H_errgen_space @ pinv_espace_to_fogi
inv_H_fogi = _np.linalg.pinv(H_fogi) # hessian in fogi space
#convert fogi space back to errgen_space
inv_H_errgen_space = pinv_espace_to_fogi @ inv_H_fogi @ errgen_space_to_fogi
Pspace = space @ _np.linalg.pinv(space)
proj_inv_H_errgen_space = Pspace @ inv_H_errgen_space @ Pspace.T # np.linalg.pinv(Pspace) #.T
if errorgen_type == 'H':
# elements added in quadrature, val = sqrt( sum(element^2) ) = dot(proj_errvec_hat, proj_errvec)
proj_errvec_hat = proj_errvec / _np.linalg.norm(proj_errvec)
errbar = proj_errvec_hat.T @ proj_inv_H_errgen_space @ proj_errvec_hat
elif errorgen_type == "S":
# elements added, val = sum(element) = dot(ones, proj_errvec)
ones = _np.ones((proj_inv_H_errgen_space.shape[0], 1), 'd')
errbar = ones.T @ proj_inv_H_errgen_space @ ones
else:
raise ValueError("Invalid `errorgen_type`!")
if abs(errbar) < 1e-10: errbar = 0.0
errbar = _np.sqrt(float(_np.real_if_close(errbar)))
return val if (hessian is None) else (val, errbar)
def _fogi_contribution_combined_HS_types(self, Hspace, Sspace, hessian=None):
"""
Helper function to compute fogi contribution for that combined multiple
(so far only works for H+S) error generator types, where there are multiple
aggregation methods (and all are computed).
Note: `space` should be a fogi-errgen-space subspace.
"""
#TODO: maybe can combine with function above?
errvec = self.fogi_errorgen_vector(normalized_elem_gens=False)
Hvec = self._fogi_errorgen_vector_projection(Hspace, normalized_elem_gens=False)
Hhat = Hvec / _np.linalg.norm(Hvec)
Svec =
|
_np.sum(Sspace, axis=1)
|
numpy.sum
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 14:55:02 2021
@author: <NAME>
Copyright 2021 <NAME>
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import numpy as np
import vtk
import pandas as pd
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.util.numpy_support import vtk_to_numpy
import datetime
import Methods_RA as Method
import csv
import pickle
import os
from scipy.spatial import cKDTree
EXAMPLE_DIR = os.path.dirname(os.path.realpath(__file__))
from create_bridges import add_free_bridge
vtk_version = vtk.vtkVersion.GetVTKSourceVersion().split()[-1].split('.')[0]
def ra_generate_fiber(model, args, job):
simid = job.ID+"/result_RA"
try:
os.makedirs(simid)
except OSError:
print ("Creation of the directory %s failed" % simid)
else:
print ("Successfully created the directory %s " % simid)
simid = job.ID+"/bridges"
try:
os.makedirs(simid)
except OSError:
print ("Creation of the directory %s failed" % simid)
else:
print ("Successfully created the directory %s " % simid)
# Riunet
tao_tv = 0.9
tao_icv = 0.95
tao_scv = 0.10
tao_ct_plus = -0.10
tao_ct_minus = -0.13
tao_ib = -0.06
tao_ras = 0.13
tao_raw = 0.55
with open(os.path.join(EXAMPLE_DIR,'../../element_tag.csv')) as f:
tag_dict = {}
reader = csv.DictReader(f)
for row in reader:
tag_dict[row['name']] = row['tag']
# load epi tags
tricuspid_valve_epi = int(tag_dict['tricuspid_valve_epi'])
superior_vena_cava_epi = int(tag_dict['superior_vena_cava_epi'])
inferior_vena_cava_epi = int(tag_dict['inferior_vena_cava_epi'])
crista_terminalis = int(tag_dict['crista_terminalis'])
inter_caval_bundle_epi = int(tag_dict['inter_caval_bundle_epi'])
right_atrial_lateral_wall_epi = int(tag_dict['right_atrial_wall_epi'])
isthmus_epi = int(tag_dict['isthmus_epi'])
right_atrial_septum_epi = int(tag_dict['right_atrial_septum_epi'])
pectinate_muscle = int(tag_dict['pectinate_muscle'])
right_atrial_appendage_epi = int(tag_dict['right_atrial_appendage_epi'])
# load endo tags
tricuspid_valve_endo = int(tag_dict['tricuspid_valve_endo'])
superior_vena_cava_endo = int(tag_dict['superior_vena_cava_endo'])
inferior_vena_cava_endo = int(tag_dict['inferior_vena_cava_endo'])
inter_caval_bundle_endo = int(tag_dict['inter_caval_bundle_endo'])
right_atrial_lateral_wall_endo = int(tag_dict['right_atrial_wall_endo'])
isthmus_endo = int(tag_dict['isthmus_endo'])
right_atrial_septum_endo = int(tag_dict['right_atrial_septum_endo'])
coronary_sinus = int(tag_dict['coronary_sinus'])
# load bridges tag
bachmann_bundel_right = int(tag_dict['bachmann_bundel_right'])
bachmann_bundel_internal = int(tag_dict['bachmann_bundel_internal'])
# load left atrial wall epi
left_atrial_wall_epi = int(tag_dict['left_atrial_wall_epi'])
# load sinus node
sinus_node = int(tag_dict['sinus_node'])
# number of pectinate muscles
pm_num = 15
# size(Radius) of crista terminalis in mm
w_ct = 4.62*args.scale
# size(Radius) of pectinate muscle in mm
w_pm = 0.66*args.scale
# size(Radius) of Bachmann Bundle in mm
w_bb = 2*args.scale
# radius sinus node
r_SN = 2.5*args.scale
# ab
ab = model.GetCellData().GetArray('phie_ab')
ab_grad = model.GetCellData().GetArray('grad_ab')
ab = vtk.util.numpy_support.vtk_to_numpy(ab)
ab_grad = vtk.util.numpy_support.vtk_to_numpy(ab_grad)
# v
v = model.GetCellData().GetArray('phie_v')
v_grad = model.GetCellData().GetArray('grad_v')
v = vtk.util.numpy_support.vtk_to_numpy(v)
v_grad = vtk.util.numpy_support.vtk_to_numpy(v_grad)
# r
r = model.GetCellData().GetArray('phie_r')
r_grad = model.GetCellData().GetArray('grad_r')
r = vtk.util.numpy_support.vtk_to_numpy(r)
r_grad = vtk.util.numpy_support.vtk_to_numpy(r_grad)
# w
w = model.GetCellData().GetArray('phie_w')
w_grad = model.GetCellData().GetArray('grad_w')
w = vtk.util.numpy_support.vtk_to_numpy(w)
w_grad = vtk.util.numpy_support.vtk_to_numpy(w_grad)
# phie
if args.mesh_type == "vol":
phie = model.GetCellData().GetArray('phie_phi')
phie = vtk.util.numpy_support.vtk_to_numpy(phie)
phie_grad = model.GetCellData().GetArray('grad_phi')
phie_grad = vtk.util.numpy_support.vtk_to_numpy(phie_grad)
start_time = datetime.datetime.now()
print('Calculating fibers... ' + str(start_time))
cellid = vtk.vtkIdFilter()
cellid.CellIdsOn()
cellid.SetInputData(model) # vtkPolyData()
cellid.PointIdsOn()
if int(vtk_version) >= 9:
cellid.SetPointIdsArrayName('Global_ids')
cellid.SetCellIdsArrayName('Global_ids')
else:
cellid.SetIdsArrayName('Global_ids')
cellid.Update()
model = cellid.GetOutput()
# TV
tag = np.zeros((len(ab),), dtype = int)
k = np.copy(ab_grad)
# # # Get valve using Laplacian solutions
# TV_s = Method.vtk_thr(model,0,"CELLS","phie_r",tao_tv) # grad_r
# TV_ids = vtk.util.numpy_support.vtk_to_numpy(TV_s.GetCellData().GetArray('Global_ids'))
# no_TV_s = Method.vtk_thr(model, 1,"CELLS","phie_r",tao_tv)
# Use fixed thickness
ring_ids = np.loadtxt('{}_surf/'.format(args.mesh) + 'ids_TV.vtx', skiprows=2, dtype=int)
rings_pts = vtk.util.numpy_support.vtk_to_numpy(model.GetPoints().GetData())[ring_ids,:]
TV_ids = Method.get_element_ids_around_path_within_radius(model, rings_pts, 4*args.scale)
ra_TV = vtk.vtkIdList()
for var in TV_ids:
ra_TV.InsertNextId(var)
extract = vtk.vtkExtractCells()
extract.SetInputData(model)
extract.SetCellList(ra_TV)
extract.Update()
TV_s = extract.GetOutput()
ra_diff = list(set(list(vtk.util.numpy_support.vtk_to_numpy(model.GetCellData().GetArray('Global_ids')))).difference(set(TV_ids)))
ra_no_TV = vtk.vtkIdList()
for var in ra_diff:
ra_no_TV.InsertNextId(var)
extract = vtk.vtkExtractCells()
extract.SetInputData(model)
extract.SetCellList(ra_no_TV)
extract.Update()
no_TV_s = extract.GetOutput()
# del ra_TV, ra_diff, ra_no_TV
tag[TV_ids] = tricuspid_valve_epi
k[TV_ids] = r_grad[TV_ids]
IVC_s = Method.vtk_thr(no_TV_s, 0,"CELLS","phie_v",tao_icv)
no_IVC_s = Method.vtk_thr(no_TV_s, 1,"CELLS","phie_v",tao_icv)
max_phie_r_ivc = np.max(vtk.util.numpy_support.vtk_to_numpy(IVC_s.GetCellData().GetArray('phie_r')))
RAW_s = Method.vtk_thr(no_TV_s, 1,"CELLS","phie_r", max_phie_r_ivc)
SVC_s = Method.vtk_thr(RAW_s, 1,"CELLS","phie_v",tao_scv)
no_SVC_s = Method.vtk_thr(RAW_s, 0,"CELLS","phie_v",tao_scv)
SVC_s = Method.extract_largest_region(SVC_s)
tao_ct_plus = np.min(vtk.util.numpy_support.vtk_to_numpy(SVC_s.GetCellData().GetArray('phie_w')))
SVC_CT_pt = SVC_s.GetPoint(np.argmin(vtk.util.numpy_support.vtk_to_numpy(SVC_s.GetPointData().GetArray('phie_w'))))
tao_ct_minus = np.min(vtk.util.numpy_support.vtk_to_numpy(IVC_s.GetCellData().GetArray('phie_w')))
IVC_CT_pt = IVC_s.GetPoint(np.argmin(vtk.util.numpy_support.vtk_to_numpy(IVC_s.GetPointData().GetArray('phie_w'))))
IVC_SEPT_CT_pt = IVC_s.GetPoint(np.argmax(vtk.util.numpy_support.vtk_to_numpy(IVC_s.GetPointData().GetArray('phie_w'))))
IVC_max_r_CT_pt = IVC_s.GetPoint(np.argmax(vtk.util.numpy_support.vtk_to_numpy(IVC_s.GetPointData().GetArray('phie_r'))))
CT_band = Method.vtk_thr(RAW_s, 2,"CELLS","phie_w", tao_ct_minus-0.01, tao_ct_plus) # grad_w
CT_ub = Method.vtk_thr(RAW_s, 2,"CELLS","phie_w", tao_ct_plus-0.02, tao_ct_plus) # grad_w
CT_ub = Method.extract_largest_region(CT_ub)
geo_filter = vtk.vtkGeometryFilter()
geo_filter.SetInputData(CT_ub)
geo_filter.Update()
mesh_surf = geo_filter.GetOutput()
loc = vtk.vtkPointLocator()
loc.SetDataSet(mesh_surf)
loc.BuildLocator()
IVC_CT_pt_id = loc.FindClosestPoint(np.array(IVC_CT_pt))
SVC_CT_pt_id = loc.FindClosestPoint(np.array(SVC_CT_pt))
CT_ub_pts = Method.dijkstra_path(mesh_surf, IVC_CT_pt_id, SVC_CT_pt_id)
filter_cell_centers = vtk.vtkCellCenters()
filter_cell_centers.SetInputData(CT_band)
filter_cell_centers.Update()
centroids = filter_cell_centers.GetOutput().GetPoints()
centroids_array = vtk.util.numpy_support.vtk_to_numpy(centroids.GetData())
tree = cKDTree(centroids_array)
ii = tree.query_ball_point(CT_ub_pts, r = 7*args.scale, n_jobs=-1)
ii = set([item for sublist in ii for item in sublist])
cell_ids = vtk.vtkIdList()
for i in ii:
cell_ids.InsertNextId(i)
extract = vtk.vtkExtractCells()
extract.SetInputData(CT_band)
extract.SetCellList(cell_ids)
extract.Update()
CT_band = extract.GetOutput()
CT_band_ids = vtk.util.numpy_support.vtk_to_numpy(CT_band.GetCellData().GetArray('Global_ids'))
tao_RAA = np.max(vtk.util.numpy_support.vtk_to_numpy(CT_band.GetCellData().GetArray('phie_v2')))
# CT_ids = vtk.util.numpy_support.vtk_to_numpy(CT_band.GetCellData().GetArray('Global_ids'))
# tag[CT_ids] = crista_terminalis
# CT part from IVC to septum
loc = vtk.vtkPointLocator()
loc.SetDataSet(CT_band)
loc.BuildLocator()
IVC_CT_pt_id = loc.FindClosestPoint(np.array(IVC_CT_pt))
geo_filter = vtk.vtkGeometryFilter()
geo_filter.SetInputData(no_IVC_s)
geo_filter.Update()
no_IVC_s = geo_filter.GetOutput()
loc = vtk.vtkPointLocator()
loc.SetDataSet(no_IVC_s)
loc.BuildLocator()
IVC_CT_pt_id = loc.FindClosestPoint(np.array(CT_band.GetPoint(IVC_CT_pt_id)))
IVC_max_r_CT_pt_id = loc.FindClosestPoint(np.array(IVC_max_r_CT_pt))
IVC_SEPT_CT_pt_id = loc.FindClosestPoint(np.array(IVC_SEPT_CT_pt))
CT_SEPT_path = np.concatenate((Method.dijkstra_path(no_IVC_s, IVC_CT_pt_id, IVC_max_r_CT_pt_id), Method.dijkstra_path(no_IVC_s, IVC_max_r_CT_pt_id, IVC_SEPT_CT_pt_id)), axis=0)
CT_SEPT_ids = Method.get_element_ids_around_path_within_radius(no_IVC_s, CT_SEPT_path, w_ct)
# SVC_CT_pt_id = loc.FindClosestPoint(SVC_CT_pt)
CT_minus = Method.vtk_thr(RAW_s, 1,"CELLS","phie_w", tao_ct_plus) # grad_ab
RAW_I_ids = vtk.util.numpy_support.vtk_to_numpy(CT_minus.GetCellData().GetArray('Global_ids'))
ii = set(RAW_I_ids) - set(CT_SEPT_ids) - set(CT_band_ids)
cell_ids = vtk.vtkIdList()
for i in ii:
cell_ids.InsertNextId(i)
extract = vtk.vtkExtractCells()
extract.SetInputData(CT_minus)
extract.SetCellList(cell_ids)
extract.Update()
CT_minus = extract.GetOutput()
RAW_I_ids = vtk.util.numpy_support.vtk_to_numpy(CT_minus.GetCellData().GetArray('Global_ids'))
tag[RAW_I_ids] = right_atrial_lateral_wall_epi
k[TV_ids] = r_grad[TV_ids]
CT_plus = Method.vtk_thr(RAW_s, 0,"CELLS","phie_w", tao_ct_plus)
RAW_S = Method.vtk_thr(CT_plus, 2,"CELLS","phie_v", tao_scv, tao_icv) # IB_S grad_v
RAW_S_ids = vtk.util.numpy_support.vtk_to_numpy(RAW_S.GetCellData().GetArray('Global_ids'))
tag[RAW_S_ids] = right_atrial_lateral_wall_epi
k[RAW_S_ids] = ab_grad[RAW_S_ids]
IB = Method.vtk_thr(RAW_S, 1,"CELLS","phie_r", 0.05) # grad_r or w
IB_ids = vtk.util.numpy_support.vtk_to_numpy(IB.GetCellData().GetArray('Global_ids'))
tag[IB_ids] = inter_caval_bundle_epi
k[IB_ids] = v_grad[IB_ids]
df = pd.read_csv(args.mesh+"_surf/rings_centroids.csv")
# calculate the norm vector
v1 = np.array(df["IVC"]) - np.array(df["SVC"])
v2 = np.array(df["TV"]) - np.array(df["IVC"])
norm = np.cross(v1, v2)
#normalize norm
n = np.linalg.norm(norm)
norm_1 = norm/n
plane = vtk.vtkPlane()
plane.SetNormal(norm_1[0], norm_1[1], norm_1[2])
plane.SetOrigin(df["TV"][0], df["TV"][1], df["TV"][2])
meshExtractFilter = vtk.vtkExtractGeometry()
meshExtractFilter.SetInputData(RAW_S)
meshExtractFilter.SetImplicitFunction(plane)
meshExtractFilter.Update()
septal_surf = meshExtractFilter.GetOutput()
RAS_S = Method.vtk_thr(septal_surf, 0,"CELLS","phie_w", tao_ct_plus)
RAS_S = Method.vtk_thr(RAS_S, 0,"CELLS","phie_r", 0.05) # grad_r or w
RAS_S_ids = vtk.util.numpy_support.vtk_to_numpy(RAS_S.GetCellData().GetArray('Global_ids'))
tag[RAS_S_ids] = right_atrial_septum_epi
k[RAS_S_ids] = r_grad[RAS_S_ids]
RAW_low = Method.vtk_thr(no_TV_s, 0,"CELLS","phie_r", max_phie_r_ivc)
meshExtractFilter = vtk.vtkExtractGeometry()
meshExtractFilter.SetInputData(RAW_low)
meshExtractFilter.SetImplicitFunction(plane)
meshExtractFilter.Update()
RAS_low = meshExtractFilter.GetOutput()
RAS_low = Method.vtk_thr(RAS_low, 0,"CELLS","phie_w", 0) # grad_r
RAS_low_ids = vtk.util.numpy_support.vtk_to_numpy(RAS_low.GetCellData().GetArray('Global_ids'))
tag[RAS_low_ids] = right_atrial_septum_epi
k[RAS_low_ids] = r_grad[RAS_low_ids]
RAW_low = Method.vtk_thr(RAW_low, 1,"CELLS","phie_w", 0) # grad_ab
RAW_low_ids = vtk.util.numpy_support.vtk_to_numpy(RAW_low.GetCellData().GetArray('Global_ids'))
tag[RAW_low_ids] = right_atrial_lateral_wall_epi
k[RAW_low_ids] = ab_grad[RAW_low_ids]
# calculate the norm vector
#v1 = np.array(IVC_SEPT_CT_pt) - np.array(IVC_CT_pt)
#v2 = np.array(df["TV"]) - np.array(df["IVC"])
#norm = np.cross(v1, v2)
norm = np.array(df["SVC"]) - np.array(df["IVC"])
#normalize norm
n = np.linalg.norm(norm)
norm_1 = norm/n
plane = vtk.vtkPlane()
plane.SetNormal(norm_1[0], norm_1[1], norm_1[2])
plane.SetOrigin(IVC_SEPT_CT_pt[0], IVC_SEPT_CT_pt[1], IVC_SEPT_CT_pt[2])
meshExtractFilter = vtk.vtkExtractGeometry()
meshExtractFilter.SetInputData(no_TV_s)
meshExtractFilter.SetImplicitFunction(plane)
meshExtractFilter.Update()
septal_surf = meshExtractFilter.GetOutput()
CS_ids = vtk.util.numpy_support.vtk_to_numpy(septal_surf.GetCellData().GetArray('Global_ids'))
#if len(CS_ids) == 0:
ring_ids = np.loadtxt('{}_surf/'.format(args.mesh) + 'ids_CS.vtx', skiprows=2, dtype=int)
rings_pts = vtk.util.numpy_support.vtk_to_numpy(model.GetPoints().GetData())[ring_ids,:]
CS_ids = Method.get_element_ids_around_path_within_radius(model, rings_pts, 4*args.scale)
tag[CS_ids] = coronary_sinus
k[CS_ids] = ab_grad[CS_ids]
#tag = Method.assign_ra_appendage(model, SVC_s, np.array(df["RAA"]), tag, right_atrial_appendage_epi)
RAA_s = Method.vtk_thr(no_TV_s, 0,"CELLS","phie_v2", tao_RAA)
RAA_ids = vtk.util.numpy_support.vtk_to_numpy(RAA_s.GetCellData().GetArray('Global_ids'))
tag[RAA_ids] = right_atrial_appendage_epi
loc = vtk.vtkPointLocator()
loc.SetDataSet(CT_band)
loc.BuildLocator()
RAA_CT_pt = CT_band.GetPoint(loc.FindClosestPoint(np.array(df["RAA"])))
# # calculate the norm vector
# v1 = np.array(SVC_CT_pt) - np.array(RAA_CT_pt)
# v2 = np.array(SVC_CT_pt) - np.array(df["TV"])
# norm = np.cross(v1, v2)
# #normalize norm
# n = np.linalg.norm(norm)
# norm_1 = norm/n
# plane = vtk.vtkPlane()
# plane.SetNormal(norm_1[0], norm_1[1], norm_1[2])
# plane.SetOrigin(SVC_CT_pt[0], SVC_CT_pt[1], SVC_CT_pt[2])
# meshExtractFilter = vtk.vtkExtractGeometry()
# meshExtractFilter.SetInputData(CT_band)
# meshExtractFilter.SetImplicitFunction(plane)
# meshExtractFilter.Update()
# CT = meshExtractFilter.GetOutput()
# CT = Method.extract_largest_region(CT)
CT = CT_band
CT_ids = vtk.util.numpy_support.vtk_to_numpy(CT.GetCellData().GetArray('Global_ids'))
CT_ids = np.setdiff1d(CT_ids, RAA_ids, assume_unique = True)
tag[CT_ids] = crista_terminalis
k[CT_ids] = w_grad[CT_ids]
tag[CT_SEPT_ids] = crista_terminalis
SVC_ids = vtk.util.numpy_support.vtk_to_numpy(SVC_s.GetCellData().GetArray('Global_ids'))
tag[SVC_ids] = superior_vena_cava_epi
k[SVC_ids] = v_grad[SVC_ids]
IVC_ids = vtk.util.numpy_support.vtk_to_numpy(IVC_s.GetCellData().GetArray('Global_ids'))
tag[IVC_ids] = inferior_vena_cava_epi
k[IVC_ids] = v_grad[IVC_ids]
tag = np.where(tag == 0, right_atrial_lateral_wall_epi, tag)
SN_ids = Method.get_element_ids_around_path_within_radius(no_SVC_s, np.asarray([SVC_CT_pt]), r_SN)
tag[SN_ids] = sinus_node
# meshNew = dsa.WrapDataObject(model)
# meshNew.CellData.append(tag, "elemTag")
# writer = vtk.vtkUnstructuredGridWriter()
# writer.SetFileName(job.ID+"/result_RA/RA_epi_with_fiber.vtk")
# writer.SetInputData(meshNew.VTKObject)
# writer.Write()
# print('Region growing...to get the tao_ct_minus...')
# # extract septum
# thresh = vtk.vtkThreshold()
# thresh.SetInputData(model)
# thresh.SetInputArrayToProcess(0, 0, 0, "vtkDataObject::FIELD_ASSOCIATION_CELLS", "phie_r")
# thresh.ThresholdByLower(0.6)
# thresh.Update()
# septum = thresh.GetOutput()
# points_data = septum.GetPoints().GetData()
# septum_points = vtk.util.numpy_support.vtk_to_numpy(points_data)
# # extract ICV form septum
# thresh = vtk.vtkThreshold()
# thresh.SetInputData(septum)
# thresh.SetInputArrayToProcess(0, 0, 0, "vtkDataObject::FIELD_ASSOCIATION_CELLS", "phie_v")
# thresh.ThresholdByUpper(tao_icv)
# thresh.Update()
# ICV = thresh.GetOutput()
# ICV = Method.extract_largest_region(ICV)
# points_data = ICV.GetPoints().GetData()
# ICV_points = vtk.util.numpy_support.vtk_to_numpy(points_data)
# # extract SCV form septum
# thresh = vtk.vtkThreshold()
# thresh.SetInputData(septum)
# thresh.SetInputArrayToProcess(0, 0, 0, "vtkDataObject::FIELD_ASSOCIATION_CELLS", "phie_v")
# thresh.ThresholdByLower(tao_scv)
# thresh.Update()
# SCV = thresh.GetOutput()
# SCV = Method.extract_largest_region(SCV)
# points_data = SCV.GetPoints().GetData()
# SCV_points = vtk.util.numpy_support.vtk_to_numpy(points_data)
# """
# region growing to get tao_ct_minus
# """
# # tao_ct_minus and tao_ct_plus
# value = -0.25
# step = 0.005
# touch_icv = 0
# touch_scv = 0
# k = 1
# while touch_icv == 0 or touch_scv == 0:
# thresh = vtk.vtkThreshold()
# thresh.SetInputData(septum)
# thresh.ThresholdByLower(value)
# thresh.SetInputArrayToProcess(0, 0, 0, "vtkDataObject::FIELD_ASSOCIATION_CELLS", "phie_w")
# thresh.Update()
# temp = thresh.GetOutput()
# points_data = temp.GetPoints().GetData()
# temp = vtk.util.numpy_support.vtk_to_numpy(points_data)
# touch_icv = Method.multidim_intersect_bool(ICV_points, temp)
# touch_scv = Method.multidim_intersect_bool(SCV_points, temp)
# print("touch_icv: ", touch_icv)
# print("touch_scv: ", touch_scv)
# if touch_icv == 0 or touch_scv == 0:
# value += step
# print("Iteration: ", k)
# print("Value of tao_ct_minus: ", value)
# k += 1
# tao_ct_minus = value + 0.001
# print('Region growing...to get the tao_ct_minus...done')
# print("Final tao_ct_minus: ", tao_ct_minus)
# tao_ct_plus = tao_ct_minus + 0.02
# print("Final tao_ct_plus: ", tao_ct_plus)
# tag = np.zeros(len(w))
# print('Bundles selection...')
# #### Bundles selection ####
# for i in range(len(ab_grad)):
# if r[i] >= tao_tv:
# ab_grad[i] = r_grad[i]
# tag[i] = tricuspid_valve_epi
# else:
# if r[i] < tao_raw:
# if w[i] >= tao_ct_minus and w[i] <= tao_ct_plus:
# # ab_grad[i] = w_grad[i]
# tag[i] = crista_terminalis
# elif w[i] <= tao_ct_minus:
# if v[i] >= tao_icv or v[i] <= tao_scv:
# ab_grad[i] = v_grad[i]
# if v[i] >= tao_icv:
# tag[i] = inferior_vena_cava_epi
# if v[i] <= tao_scv:
# tag[i] = superior_vena_cava_epi
# else:
# tag[i] = right_atrial_lateral_wall_epi
# else:
# if v[i] >= tao_icv or v[i] <= tao_scv:
# ab_grad[i] = v_grad[i]
# if v[i] >= tao_icv:
# tag[i] = inferior_vena_cava_epi
# if v[i] <= tao_scv:
# tag[i] = superior_vena_cava_epi
# else:
# if w[i] < tao_ib:
# ab_grad[i] = v_grad[i]
# tag[i] = inter_caval_bundle_epi
# elif w[i] > tao_ras:
# ab_grad[i] = r_grad[i] #right_atrial_septum_lower_epi
# tag[i] = right_atrial_septum_epi
# # tag[i] =120
# else:
# ab_grad[i] = r_grad[i] #right_atrial_septum_upper_epi
# tag[i] = right_atrial_septum_epi
# # tag[i] = 130
# else:
# if v[i] >= tao_icv or v[i] <= tao_scv:
# ab_grad[i] = v_grad[i]
# if v[i] >= tao_icv:
# tag[i] = inferior_vena_cava_epi
# if v[i] <= tao_scv:
# tag[i] = superior_vena_cava_epi
# else:
# if w[i] >= 0:
# ab_grad[i] = r_grad[i] #right_atrial_septum_lower_epi
# tag[i] = right_atrial_septum_epi
# # tag[i] = 140
# else:
# tag[i] = right_atrial_lateral_wall_epi
# if v[i] >= tao_icv or v[i] <= tao_scv:
# ab_grad[i] = v_grad[i]
# if v[i] >= tao_icv:
# tag[i] = inferior_vena_cava_epi
# if v[i] <= tao_scv:
# tag[i] = superior_vena_cava_epi
# # tag = Method.assign_ra_appendage(model, SCV, ra_appex_point, tag, right_atrial_appendage_epi)
# meshNew = dsa.WrapDataObject(model)
# meshNew.CellData.append(tag, "elemTag")
# writer = vtk.vtkUnstructuredGridWriter()
# writer.SetFileName(job.ID+"/result_RA/RA_Tianbao_epi_with_fiber.vtk")
# writer.SetInputData(meshNew.VTKObject)
# writer.Write()
print('Bundles selection...done')
# normalize the gradient phie
abs_phie_grad = np.linalg.norm(phie_grad, axis=1, keepdims=True)
abs_phie_grad = np.where(abs_phie_grad != 0, abs_phie_grad, 1)
phie_grad_norm = phie_grad / abs_phie_grad
##### Local coordinate system #####
# et
et = phie_grad_norm
print('############### et ###############')
# print(et)
# k
#k = ab_grad
print('############### k ###############')
# print(k)
# en
#en = ab_grad
# for i in range(len(k)):
# en[i] = k[i] - np.dot(k[i], et[i]) * et[i]
en = k - et*np.sum(k*et,axis=1).reshape(len(et),1)
# normlize the en
# abs_en = np.linalg.norm(en, axis=1, keepdims=True)
# for i in range(len(abs_en)):
# if abs_en[i] == 0:
# abs_en[i] =1
# en = en/abs_en
abs_en = np.linalg.norm(en, axis=1, keepdims=True)
abs_en = np.where(abs_en != 0, abs_en, 1)
en = en / abs_en
print('############### en ###############')
# print(en)
# el
el = np.cross(en, et)
el = Method.assign_element_fiber_around_path_within_radius(model, CT_SEPT_path, w_ct, el, smooth=True)
el = np.where(el == [0,0,0], [1,0,0], el).astype("float32")
print('############### el ###############')
# print(el)
end_time = datetime.datetime.now()
running_time = end_time - start_time
print('Calculating epicardial fibers... done! ' + str(end_time) + '\nIt takes: ' + str(running_time) + '\n')
if args.mesh_type == "bilayer":
sheet = np.cross(el, et)
for i in range(model.GetPointData().GetNumberOfArrays()-1, -1, -1):
model.GetPointData().RemoveArray(model.GetPointData().GetArrayName(i))
for i in range(model.GetCellData().GetNumberOfArrays()-1, -1, -1):
model.GetCellData().RemoveArray(model.GetCellData().GetArrayName(i))
meshNew = dsa.WrapDataObject(model)
meshNew.CellData.append(tag, "elemTag")
meshNew.CellData.append(el, "fiber")
meshNew.CellData.append(sheet, "sheet")
writer = vtk.vtkUnstructuredGridWriter()
if args.ofmt == 'vtk':
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileName(job.ID+"/result_RA/RA_epi_with_fiber.vtk")
writer.SetFileTypeToBinary()
else:
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(job.ID+"/result_RA/RA_epi_with_fiber.vtu")
writer.SetInputData(meshNew.VTKObject)
writer.Write()
"""
PM and CT
"""
cellid = vtk.vtkIdFilter()
cellid.CellIdsOn()
cellid.SetInputData(meshNew.VTKObject) # vtkPolyData()
cellid.PointIdsOn()
if int(vtk_version) >= 9:
cellid.SetPointIdsArrayName('Global_ids')
cellid.SetCellIdsArrayName('Global_ids')
else:
cellid.SetIdsArrayName('Global_ids')
cellid.Update()
model = cellid.GetOutput()
endo = vtk.vtkUnstructuredGrid()
endo.DeepCopy(model)
CT = Method.vtk_thr(model, 2,"CELLS","elemTag", crista_terminalis, crista_terminalis)
CT_ids = vtk.util.numpy_support.vtk_to_numpy(CT.GetCellData().GetArray('Global_ids'))
elif args.mesh_type == "vol":
CT_id_list = vtk.vtkIdList()
for var in CT_ids:
CT_id_list.InsertNextId(var)
for var in CT_SEPT_ids:
CT_id_list.InsertNextId(var)
extract = vtk.vtkExtractCells()
extract.SetInputData(model)
extract.SetCellList(CT_id_list)
extract.Update()
CT = extract.GetOutput()
CT_ids = vtk.util.numpy_support.vtk_to_numpy(CT.GetCellData().GetArray('Global_ids'))
if args.debug:
meshNew = dsa.WrapDataObject(model)
meshNew.CellData.append(tag, "elemTag")
meshNew.CellData.append(el, "fiber")
writer = vtk.vtkUnstructuredGridWriter()
if args.ofmt == 'vtk':
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileName(job.ID+"/result_RA/RA_epi_with_fiber.vtk")
writer.SetFileTypeToBinary()
else:
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(job.ID+"/result_RA/RA_epi_with_fiber.vtu")
writer.SetInputData(meshNew.VTKObject)
writer.Write()
center = np.asarray((np.array(df["SVC"])+np.array(df["IVC"]))/2)
loc = vtk.vtkPointLocator()
loc.SetDataSet(CT)
loc.BuildLocator()
point1_id = loc.FindClosestPoint(IVC_max_r_CT_pt)
point2_id = loc.FindClosestPoint(SVC_CT_pt)
loc = vtk.vtkPointLocator()
loc.SetDataSet(TV_s)
loc.BuildLocator()
point3_id = loc.FindClosestPoint(IVC_max_r_CT_pt)
point4_id = loc.FindClosestPoint(np.array(df["RAA"])) # this is also the id for Bachmann-Bundle on the right atrium
geo_filter = vtk.vtkGeometryFilter()
geo_filter.SetInputData(CT)
geo_filter.Update()
CT = geo_filter.GetOutput()
# calculate the norm vector
v1 = np.array(df["IVC"]) - np.array(df["SVC"])
v2 = np.array(df["TV"]) -
|
np.array(df["IVC"])
|
numpy.array
|
#!/usr/bin/env python
# CCDPROC code
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np
import os
from glob import glob
import time
#import matplotlib
#matplotlib.use('nbagg')
def ccdlist(input=None):
if input is None: input='*.fits'
files = glob(input)
nfiles = len(files)
dt = np.dtype([('file',np.str,100),('object',np.str,100),('naxis1',int),('naxis2',int),
('imagetyp',np.str,100),('exptime',float),('filter',np.str,100)])
cat =
|
np.zeros(nfiles,dtype=dt)
|
numpy.zeros
|
"""This script contains code to support creation of photometric sourcelists using two techniques:
aperture photometry and segmentation-map based photometry."""
import copy
import pickle # FIX Remove
import sys
from distutils.version import LooseVersion
from astropy.io import fits as fits
from astropy.stats import sigma_clipped_stats
from astropy.table import Column, MaskedColumn, Table, join, vstack
from astropy.convolution import RickerWavelet2DKernel
from astropy.coordinates import SkyCoord
import numpy as np
from scipy import ndimage, stats
import photutils # needed to check version
if LooseVersion(photutils.__version__) < '1.1.0':
OLD_PHOTUTILS = True
from photutils.segmentation import (detect_sources, source_properties,
deblend_sources)
else:
OLD_PHOTUTILS = False
from photutils.segmentation import (detect_sources, SourceCatalog,
deblend_sources)
from photutils.aperture import CircularAperture, CircularAnnulus
from photutils.background import (Background2D, SExtractorBackground,
StdBackgroundRMS)
from photutils.detection import DAOStarFinder, IRAFStarFinder
from photutils.utils import calc_total_error
from stsci.tools import logutil
from stwcs.wcsutil import HSTWCS
from . import astrometric_utils
from . import photometry_tools
from . import deconvolve_utils as decutils
from . import processing_utils as proc_utils
try:
from matplotlib import pyplot as plt
except Exception:
plt = None
CATALOG_TYPES = ['aperture', 'segment']
if OLD_PHOTUTILS:
id_colname = 'id'
flux_colname = 'source_sum'
ferr_colname = 'source_sum_err'
bac_colname = 'background_at_centroid'
else:
id_colname = 'label'
flux_colname = 'segment_flux'
ferr_colname = 'segment_fluxerr'
bac_colname = 'background_centroid'
__taskname__ = 'catalog_utils'
MSG_DATEFMT = '%Y%j%H%M%S'
SPLUNK_MSG_FORMAT = '%(asctime)s %(levelname)s src=%(name)s- %(message)s'
log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout,
format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)
class CatalogImage:
def __init__(self, filename, num_images_mask, log_level):
# set logging level to user-specified level
log.setLevel(log_level)
if isinstance(filename, str):
self.imghdu = fits.open(filename)
self.imgname = filename
else:
self.imghdu = filename
self.imgname = filename.filename()
# This is the "footprint_mask" of the total product object which indicates
# the number of images which comprise each individual pixel
self.num_images_mask = num_images_mask
# Get header information to annotate the output catalogs
if "total" in self.imgname:
self.ghd_product = "tdp"
else:
self.ghd_product = "fdp"
# Fits file read
self.data = self.imghdu[('SCI', 1)].data
self.wht_image = self.imghdu['WHT'].data.copy()
# Get the HSTWCS object from the first extension
self.imgwcs = HSTWCS(self.imghdu, 1)
self.keyword_dict = self._get_header_data()
# Populated by self.compute_background()
self.bkg_background_ra = None
self.bkg_rms_ra = None
self.bkg_rms_median = None
self.footprint_mask = None
self.inv_footprint_mask = None
self.bkg_type = ""
# Populated by self.build_kernel()
self.kernel = None
self.kernel_fwhm = None
self.kernel_psf = False
def close(self):
self.imghdu.close()
self.bkg_background_ra = None
self.bkg_rms_ra = None
self.bkg_rms_median = None
# Finished with wht_image, clean up memory immediately...
del self.wht_image
self.wht_image = None
def build_kernel(self, box_size, win_size, fwhmpsf,
simple_bkg=False,
bkg_skew_threshold=0.5,
zero_percent=25.0,
negative_percent=15.0,
nsigma_clip=3.0,
maxiters=3,
good_fwhm=[1.5, 3.5]):
if self.bkg_background_ra is None:
self.compute_background(box_size, win_size,
simple_bkg=simple_bkg,
bkg_skew_threshold=bkg_skew_threshold,
zero_percent=zero_percent,
negative_percent=negative_percent,
nsigma_clip=nsigma_clip,
maxiters=maxiters)
log.info("Attempt to determine FWHM based upon input data within a good FWHM range of {:.1f} to {:.1f}.".format(good_fwhm[0], good_fwhm[1]))
log.info("If no good FWHM candidate is identified, a value of {:.1f} will be used instead.".format(fwhmpsf / self.imgwcs.pscale))
k, self.kernel_fwhm = astrometric_utils.build_auto_kernel(self.data,
self.wht_image,
good_fwhm=good_fwhm,
num_fwhm=30,
threshold=self.bkg_rms_ra,
fwhm=fwhmpsf / self.imgwcs.pscale)
(self.kernel, self.kernel_psf) = k
def compute_background(self, box_size, win_size,
bkg_estimator=SExtractorBackground, rms_estimator=StdBackgroundRMS,
simple_bkg=False,
bkg_skew_threshold=0.5,
zero_percent=25.0,
negative_percent=15.0,
nsigma_clip=3.0,
maxiters=3):
"""Use a sigma-clipped algorithm or Background2D to determine the background of the input image.
Parameters
----------
image : ndarray
Numpy array of the science extension from the observations FITS file.
box_size : int
Size of box along each axis
win_size : int
Size of 2D filter to apply to the background image
bkg_estimator : subroutine
background estimation algorithm
rms_estimator : subroutine
RMS estimation algorithm
simple_bkg : bool, optional
Forces use of the sigma_clipped_stats algorithm
bkg_skew_threshold : float, optional
Discriminator on the skewness computation - below this limit the Background2D algorithm
will be computed for potential use for the background determination, otherwise
the sigma_clipped_stats algorithm is used.
zero_percent : float, optional
Discriminator on the input image. The percentage of zero values in the illuminated portion
of the input image is determined - if there are more zero values than this lower limit, then
the background is set to an image of constant value zero and the background rms is computed
based on the pixels which are non-zero in the illuminated portion of the input image.
negative_percent : float, optional
Discriminator on the background-subtracted image. The percentage of negative values in the
background-subtracted image is determined - below this limit the Background2D algorithm stays in play,
otherwise the sigma_clipped_stats algorithm is used.
nsigma_clip : float, optional
Parameter for the sigma_clipped_stats algorithm - number of standard deviations to use for both
the lower and upper clipping limit.
maxiters : float, optional
Parameter for the sigma_clipped_stats algorithm - number of sigma-clipping iterations to perform
Attributes
----------
self.bkg_background_ra : 2D ndarray
Background array
self.bkg_rms_ra : 2D ndarray
RMS map array
self.bkg_median : float
background median value over entire 2D array
self.bkg_rms_median : float
background rms value over entire 2D array
self.footprint_mask : bool 2Dndarry
Footprint of input image set to True for the illuminated portion and False for
the non-illuminated portion
self.inv_footprint_mask : bool 2Dndarry
Inverse of the footprint_mask
"""
# Negative allowance in sigma
negative_sigma = -1.0
# Report configuration values to log
log.info("")
log.info("Background Computation")
log.info("File: {}".format(self.imgname))
log.info("Zero threshold: {}".format(zero_percent))
log.info("Sigma-clipped Background Configuration Variables")
log.info(" Negative percent threshold: {}".format(negative_percent))
log.info(" Negative sigma: {}".format(negative_sigma))
log.info(" Nsigma: {}".format(nsigma_clip))
log.info(" Number of iterations: {}".format(maxiters))
log.info("Background2D Configuration Variables")
log.info(" Box size: {}".format(box_size))
log.info(" Window size: {}".format(win_size))
log.info("Background discriminant - skew threshold: {}".format(bkg_skew_threshold))
# SExtractorBackground ans StdBackgroundRMS are the defaults
bkg = None
is_zero_background_defined = False
# Make a local copy of the data(image) being processed in order to reset any
# data values which equal nan (e.g., subarrays) to zero.
imgdata = np.nan_to_num(self.data, copy=True, nan=0.0)
# In order to compute the proper statistics on the input data, need to use the footprint
# mask to get the actual data - illuminated portion (True), non-illuminated (False).
footprint_mask = self.num_images_mask > 0
self.footprint_mask = ndimage.binary_erosion(footprint_mask, iterations=10)
self.inv_footprint_mask = np.invert(self.footprint_mask)
# If the image contains a lot of values identically equal to zero (as in some SBC images),
# set the two-dimensional background image to a constant of zero and the background rms to
# the real rms of the non-zero values in the image.
num_of_illuminated_pixels = self.footprint_mask.sum()
num_of_zeros = np.count_nonzero(imgdata[self.footprint_mask] == 0)
non_zero_pixels = imgdata[self.footprint_mask]
# BACKGROUND COMPUTATION 1 (unusual case)
# If there are too many background zeros in the image (> number_of_zeros_in_background_threshold), set the
# background median and background rms values
if num_of_zeros / float(num_of_illuminated_pixels) * 100.0 > zero_percent:
self.bkg_median = 0.0
self.bkg_rms_median = stats.tstd(non_zero_pixels, limits=[0, None], inclusive=[False, True])
self.bkg_background_ra = np.full_like(imgdata, 0.0)
self.bkg_rms_ra = np.full_like(imgdata, self.bkg_rms_median)
self.bkg_type = 'zero_background'
is_zero_background_defined = True
log.info("Input image contains excessive zero values in the background. Median: {} RMS: {}".format(self.bkg_median, self.bkg_rms_median))
# BACKGROUND COMPUTATION 2 (sigma_clipped_stats)
# If the input data is not the unusual case of an "excessive zero background", compute
# a sigma-clipped background which returns only single values for mean,
# median, and standard deviations
if not is_zero_background_defined:
log.info("")
log.info("Computing the background using sigma-clipped statistics algorithm.")
bkg_mean_full, bkg_median_full, bkg_rms_full = sigma_clipped_stats(imgdata,
self.inv_footprint_mask,
sigma=nsigma_clip,
cenfunc='median',
maxiters=maxiters)
# guard against median being negative (can happen for mostly nebulous fields)
if bkg_median_full < 0.0:
# Recompute after adjusting input image data so that entire image is positive
# This corrects for any gross over-subtraction of the background from the image
imgdata -= (bkg_median_full - bkg_rms_full)
bkg_mean_full, bkg_median_full, bkg_rms_full = sigma_clipped_stats(imgdata,
self.inv_footprint_mask,
sigma=nsigma_clip,
cenfunc='median',
maxiters=maxiters)
# Compute Pearson’s second coefficient of skewness - this is a criterion
# for possibly computing a two-dimensional background fit
# Use the "raw" values generated by sigma_clipped_stats()
# based on full unmasked image
bkg_skew = np.abs(3.0 * (bkg_mean_full - bkg_median_full) / bkg_rms_full)
log.info("Sigma-clipped computed skewness: {0:.2f}".format(bkg_skew))
# Refine background to better compute the median value
imgnz = imgdata * self.footprint_mask
imgnz = imgnz[imgnz > 0.0] # only want non-negative values
imgvals = imgnz[imgnz < (bkg_median_full + (bkg_rms_full * 0.1))]
bkg_mean, bkg_median, bkg_rms = sigma_clipped_stats(imgvals,
None,
sigma=nsigma_clip,
cenfunc='median')
log.info("Sigma-clipped Statistics - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms))
log.info("")
# Ensure the computed values are not negative
if bkg_mean < 0.0 or bkg_median < 0.0 or bkg_rms < 0.0:
bkg_mean = max(0, bkg_mean)
bkg_median = max(0, bkg_median)
bkg_rms = max(0, bkg_rms)
log.info("UPDATED Sigma-clipped Statistics - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms))
log.info("")
# Compute a minimum rms value based upon information directly from the data
if self.keyword_dict["detector"].upper() != "SBC":
minimum_rms = self.keyword_dict['atodgn'] * self.keyword_dict['readnse'] \
* self.keyword_dict['ndrizim'] / self.keyword_dict['texpo_time']
# Compare a minimum rms based upon input characteristics versus the one computed and use
# the larger of the two values.
if (bkg_rms < minimum_rms):
bkg_rms = minimum_rms
log.info("Mimimum RMS of input based upon the readnoise, gain, number of exposures, and total exposure time: {}".format(minimum_rms))
log.info("Sigma-clipped RMS has been updated - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms))
log.info("")
# Generate two-dimensional background and rms images with the attributes of
# the input data, but the content based on the sigma-clipped statistics.
# bkg_median ==> background and bkg_rms ==> background rms
self.bkg_background_ra = np.full_like(imgdata, bkg_median)
self.bkg_rms_ra = np.full_like(imgdata, bkg_rms)
self.bkg_median = bkg_median
self.bkg_rms_median = bkg_rms
self.bkg_type = 'sigma_clipped_background'
negative_threshold = negative_sigma * bkg_rms
# BACKGROUND COMPUTATION 3 (Background2D)
# The simple_bkg = True is the way to force the background to be computed with the
# sigma-clipped algorithm, regardless of any other criterion. If simple_bkg == True,
# the compute_background() is done, otherwise try to use Background2D to compute the background.
if not simple_bkg and not is_zero_background_defined:
# If the sigma-clipped background image skew is greater than the threshold,
# compute a two-dimensional background fit. A larger skew implies
# more sources in the field, which requires a more complex background.
if bkg_skew > bkg_skew_threshold:
log.info("Computing the background using the Background2D algorithm.")
exclude_percentiles = [10, 25, 50, 75]
for percentile in exclude_percentiles:
log.info("Percentile in use: {}".format(percentile))
try:
bkg = Background2D(imgdata, (box_size, box_size), filter_size=(win_size, win_size),
bkg_estimator=bkg_estimator(),
bkgrms_estimator=rms_estimator(),
exclude_percentile=percentile, edge_method="pad",
coverage_mask=self.inv_footprint_mask)
except Exception:
bkg = None
continue
if bkg is not None:
bkg_background_ra = bkg.background
bkg_rms_ra = bkg.background_rms
bkg_rms_median = bkg.background_rms_median
bkg_median = bkg.background_median
negative_threshold = negative_sigma * bkg.background_rms_median
break
# If computation of a two-dimensional background image were successful, compute the
# background-subtracted image and evaluate it for the number of negative values.
#
# If bkg is None, use the sigma-clipped statistics for the background.
# If bkg is not None, but the background-subtracted image is too negative, use the
# sigma-clipped computation for the background.
if bkg is not None:
imgdata_bkgsub = imgdata - bkg_background_ra
# Determine how much of the illuminated portion of the background subtracted
# image is negative
num_negative = np.count_nonzero(imgdata_bkgsub[self.footprint_mask] < negative_threshold)
negative_ratio = num_negative / num_of_illuminated_pixels
del imgdata_bkgsub
# Report this information so the relative percentage and the threshold are known
log.info("Percentage of negative values in the background subtracted image {0:.2f} vs low threshold of {1:.2f}.".format(100.0 * negative_ratio, negative_percent))
# If the background subtracted image has too many negative values which may be
# indicative of large negative regions, the two-dimensional computed background
# fit image should NOT be used. Use the sigma-clipped data instead.
if negative_ratio * 100.0 > negative_percent:
log.info("Percentage of negative values {0:.2f} in the background subtracted image exceeds the threshold of {1:.2f}.".format(100.0 * negative_ratio, negative_percent))
log.info("")
log.info("*** Use the background image determined from the sigma_clip algorithm. ***")
# Update the class variables with the background fit data
else:
self.bkg_background_ra = bkg_background_ra.copy()
self.bkg_rms_ra = bkg_rms_ra.copy()
self.bkg_rms_median = bkg_rms_median
self.bkg_median = bkg_median
self.bkg_type = 'twod_background'
log.info("")
log.info("*** Use the background image determined from the Background2D. ***")
del bkg_background_ra, bkg_rms_ra
# Skewness of sigma_clipped background exceeds threshold
else:
log.info("*** Use the background image determined from the sigma_clip algorithm based upon skewness. ***")
# User requested simple background == sigma_clip algorithm
else:
log.info("*** User requested the sigma_clip algorithm to determine the background image. ***")
log.info("")
log.info("Computation of image background complete")
log.info("Found: ")
log.info(" Median background: {}".format(self.bkg_median))
log.info(" Median RMS background: {}".format(self.bkg_rms_median))
log.info("")
del bkg, imgdata
def _get_header_data(self):
"""Read FITS keywords from the primary or extension header and store the
information in a dictionary
Returns
-------
keyword_dict : dictionary
dictionary of keyword values
"""
keyword_dict = {}
keyword_dict["proposal_id"] = self.imghdu[0].header["PROPOSID"]
keyword_dict["image_file_name"] = self.imghdu[0].header['FILENAME'].upper()
keyword_dict["target_name"] = self.imghdu[0].header["TARGNAME"].upper()
keyword_dict["date_obs"] = self.imghdu[0].header["DATE-OBS"]
keyword_dict["time_obs"] = self.imghdu[0].header["TIME-OBS"]
keyword_dict["instrument"] = self.imghdu[0].header["INSTRUME"].upper()
keyword_dict["detector"] = self.imghdu[0].header["DETECTOR"].upper()
keyword_dict["target_ra"] = self.imghdu[0].header["RA_TARG"]
keyword_dict["target_dec"] = self.imghdu[0].header["DEC_TARG"]
keyword_dict["expo_start"] = self.imghdu[0].header["EXPSTART"]
keyword_dict["texpo_time"] = self.imghdu[0].header["TEXPTIME"]
keyword_dict["exptime"] = self.imghdu[0].header["EXPTIME"]
keyword_dict["ndrizim"] = self.imghdu[0].header["NDRIZIM"]
if keyword_dict["detector"].upper() != "SBC":
keyword_dict["ccd_gain"] = self.imghdu[0].header["CCDGAIN"]
keyword_dict["readnse"] = self._get_max_key_value(self.imghdu[0].header, 'READNSE')
keyword_dict["atodgn"] = self._get_max_key_value(self.imghdu[0].header, 'ATODGN')
keyword_dict["aperture_pa"] = self.imghdu[0].header["PA_V3"]
keyword_dict["gain_keys"] = [self.imghdu[0].header[k[:8]] for k in self.imghdu[0].header["ATODGN*"]]
# The total detection product has the FILTER keyword in
# the primary header - read it for any instrument.
#
# For the filter detection product:
# WFC3 only has FILTER, but ACS has FILTER1 and FILTER2
# in the primary header.
if self.ghd_product.lower() == "tdp":
keyword_dict["filter1"] = self.imghdu[0].header["FILTER"]
# The filter detection product...
else:
if keyword_dict["instrument"] == "ACS":
keyword_dict["filter1"] = self.imghdu[0].header["FILTER1"]
keyword_dict["filter2"] = self.imghdu[0].header["FILTER2"]
else:
keyword_dict["filter1"] = self.imghdu[0].header["FILTER"]
keyword_dict["filter2"] = ""
# Get the HSTWCS object from the first extension
keyword_dict["wcs_name"] = self.imghdu[1].header["WCSNAME"]
keyword_dict["wcs_type"] = self.imghdu[1].header["WCSTYPE"]
keyword_dict["orientation"] = self.imghdu[1].header["ORIENTAT"]
keyword_dict["aperture_ra"] = self.imghdu[1].header["RA_APER"]
keyword_dict["aperture_dec"] = self.imghdu[1].header["DEC_APER"]
keyword_dict["photflam"] = proc_utils.find_flt_keyword(self.imghdu, "PHOTFLAM")
keyword_dict["photplam"] = proc_utils.find_flt_keyword(self.imghdu, "PHOTPLAM")
return keyword_dict
def _get_max_key_value(self, header, root_of_keyword):
"""Read FITS keywords with the same prefix from primary header and return the maximum value
Parameters
----------
header : hdu
The header of a FITS hdu
root_of_keyword : str
The common root portion of a FITS keyword (e.g., READNSE for READNSE[A-D])
Returns
-------
max_value : float
The maximum value or 1.0 of the keywords examined
"""
max_value = max(header[root_of_keyword + "*"].values(), default=1.0)
return max_value
class HAPCatalogs:
"""Generate photometric sourcelist for specified TOTAL or FILTER product image.
"""
crfactor = {'aperture': 300, 'segment': 150} # CRs / hr / 4kx4k pixels
def __init__(self, fitsfile, param_dict, param_dict_qc, num_images_mask, log_level, diagnostic_mode=False, types=None,
tp_sources=None):
# set logging level to user-specified level
log.setLevel(log_level)
self.label = "HAPCatalogs"
self.description = "A class used to generate photometric sourcelists using aperture photometry"
self.imgname = fitsfile
self.param_dict = param_dict
self.param_dict_qc = param_dict_qc
self.diagnostic_mode = diagnostic_mode
self.tp_sources = tp_sources # <---total product catalogs.catalogs[*].sources
# Determine what types of catalogs have been requested
if not isinstance(types, list) and types in [None, 'both']:
types = CATALOG_TYPES
elif types == 'aperture' or types == 'segment':
types = [types]
else:
if any([t not in CATALOG_TYPES for t in types]):
log.error("Catalog types {} not supported. Only {} are valid.".format(types, CATALOG_TYPES))
raise ValueError
self.types = types
# Get various configuration variables needed for the background computation
# Compute the background for this image
self.image = CatalogImage(fitsfile, num_images_mask, log_level)
self.image.compute_background(self.param_dict['bkg_box_size'],
self.param_dict['bkg_filter_size'],
simple_bkg=self.param_dict['simple_bkg'],
bkg_skew_threshold=self.param_dict['bkg_skew_threshold'],
zero_percent=self.param_dict['zero_percent'],
negative_percent=self.param_dict['negative_percent'],
nsigma_clip=self.param_dict['nsigma_clip'],
maxiters=self.param_dict['maxiters'])
self.image.build_kernel(self.param_dict['bkg_box_size'], self.param_dict['bkg_filter_size'],
self.param_dict['dao']['TWEAK_FWHMPSF'],
self.param_dict['simple_bkg'],
self.param_dict['bkg_skew_threshold'],
self.param_dict['zero_percent'],
self.param_dict['negative_percent'],
self.param_dict['nsigma_clip'],
self.param_dict['maxiters'],
self.param_dict['good_fwhm'])
# Initialize all catalog types here...
# This does NOT identify or measure sources to create the catalogs at this point...
# The syntax here is EXTREMELY cludgy, but until a more compact way to do this is found,
# it will have to do...
self.catalogs = {}
if 'segment' in self.types:
self.catalogs['segment'] = HAPSegmentCatalog(self.image, self.param_dict, self.param_dict_qc,
self.diagnostic_mode, tp_sources=tp_sources)
if 'aperture' in self.types:
self.catalogs['aperture'] = HAPPointCatalog(self.image, self.param_dict, self.param_dict_qc,
self.diagnostic_mode, tp_sources=tp_sources)
self.filters = {}
def identify(self, **pars):
"""Build catalogs for this image.
Parameters
----------
types : list
List of catalog types to be generated. If None, build all available catalogs.
Supported types of catalogs include: 'aperture', 'segment'.
"""
# Support user-input value of 'None' which will trigger generation of all catalog types
for catalog in self.catalogs:
log.info("")
log.info("Identifying {} sources".format(catalog))
self.catalogs[catalog].identify_sources(**pars)
def verify_crthresh(self, n1_exposure_time):
"""Verify whether catalogs meet cosmic-ray threshold limits.
... note : If either catalog fails the following test, then both are rejected.
n_cat < thresh
where
thresh = crfactor * n1_exposure_time**2 / texptime
"""
for cat_type in self.catalogs:
crthresh_mask = None
source_cat = self.catalogs[cat_type].sources if cat_type == 'aperture' else self.catalogs[cat_type].source_cat
flag_cols = [colname for colname in source_cat.colnames if colname.startswith('Flag')]
for colname in flag_cols:
catalog_crmask = source_cat[colname] < 2
if crthresh_mask is None:
crthresh_mask = catalog_crmask
else:
# Combine masks for all filters for this catalog type
crthresh_mask = np.bitwise_or(crthresh_mask, catalog_crmask)
source_cat.sources_num_good = len(np.where(crthresh_mask)[0])
reject_catalogs = False
log.info("Determining whether point and/or segment catalogs meet cosmic-ray threshold")
log.info(" based on EXPTIME = {}sec for the n=1 filters".format(n1_exposure_time))
for cat_type in self.catalogs:
source_cat = self.catalogs[cat_type]
if source_cat.sources:
thresh = self.crfactor[cat_type] * n1_exposure_time**2 / self.image.keyword_dict['texpo_time']
source_cat = source_cat.sources if cat_type == 'aperture' else source_cat.source_cat
n_sources = source_cat.sources_num_good # len(source_cat)
all_sources = len(source_cat)
log.info("{} catalog with {} good sources out of {} total sources : CR threshold = {}".format(cat_type, n_sources, all_sources, thresh))
if n_sources < thresh:
reject_catalogs = True
log.info("{} catalog FAILED CR threshold. Rejecting both catalogs...".format(cat_type))
break
return reject_catalogs
def measure(self, filter_name, **pars):
"""Perform photometry and other measurements on sources for this image.
Parameters
----------
types : list
List of catalog types to be generated. If None, build all available catalogs.
Supported types of catalogs include: 'aperture', 'segment'.
"""
# Make sure we at least have a default 2D background computed
for catalog in self.catalogs.values():
if catalog.sources is None:
catalog.identify_sources(**pars)
for catalog in self.catalogs.values():
catalog.measure_sources(filter_name, **pars)
for catalog in self.catalogs.values():
catalog.image.close()
def write(self, reject_catalogs, **pars):
"""Write catalogs for this image to output files.
Parameters
----------
reject_catalogs : bool
Indicator as to whether or not the catalogs (*.ecsv) should be written.
types : list
List of catalog types to be generated. If None, build all available catalogs.
Supported types of catalogs include: 'aperture', 'segment'.
"""
# Make sure we at least have a default 2D background computed
for catalog in self.catalogs.values():
if catalog.source_cat is None:
catalog.source_cat = catalog.sources
catalog.write_catalog(reject_catalogs)
def combine(self, subset_dict):
"""Combine subset columns from the filter catalog with the total detection catalog.
Parameters
----------
subset_dict : dictionary
Dictionary where the keys are the types of catalogs, and the values are
the catalog objects.
"""
for k, v in self.catalogs.items():
v.combine_tables(subset_dict[k]['subset'])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class HAPCatalogBase:
"""Virtual class used to define API for all catalogs"""
catalog_suffix = ".ecsv"
catalog_region_suffix = ".reg"
catalog_format = "ascii.ecsv"
catalog_type = None
def __init__(self, image, param_dict, param_dict_qc, diagnostic_mode, tp_sources):
self.image = image
self.imgname = image.imgname
self.param_dict = param_dict
self.param_dict_qc = param_dict_qc
self.diagnostic_mode = diagnostic_mode
self.sourcelist_filename = self.imgname.replace(self.imgname[-9:], self.catalog_suffix)
# Compute average gain - there will always be at least one gain value in the primary header
gain_keys = self.image.keyword_dict['gain_keys']
gain_values = [g for g in gain_keys if g > 0.0]
self.gain = self.image.keyword_dict['exptime'] * np.mean(gain_values)
# Set the gain for ACS/SBC and WFC3/IR to 1.0
if self.image.keyword_dict["detector"].upper() in ["IR", "SBC"]:
self.gain = 1.0
# Convert photometric aperture radii from arcsec to pixels
self.aper_radius_arcsec = [self.param_dict['aperture_1'], self.param_dict['aperture_2']]
self.aper_radius_list_pixels = []
for aper_radius in self.aper_radius_arcsec:
self.aper_radius_list_pixels.append(aper_radius / self.image.imgwcs.pscale)
# Photometric information
if not tp_sources:
log.info("Average gain of {} for input image {}".format(np.mean(gain_values), self.imgname))
log.info("{}".format("=" * 80))
log.info("")
log.info("")
log.info("SUMMARY OF INPUT PARAMETERS FOR PHOTOMETRY")
log.info("image name: {}".format(self.imgname))
log.info("platescale: {}".format(self.image.imgwcs.pscale))
log.info("radii (pixels): {}".format(self.aper_radius_list_pixels))
log.info("radii (arcsec): {}".format(self.aper_radius_arcsec))
log.info("annulus: {}".format(self.param_dict['skyannulus_arcsec']))
log.info("dSkyAnnulus: {}".format(self.param_dict['dskyannulus_arcsec']))
log.info("salgorithm: {}".format(self.param_dict['salgorithm']))
log.info("gain: {}".format(self.gain))
# log.info("ab_zeropoint: {}".format(self.ab_zeropoint))
log.info(" ")
log.info("{}".format("=" * 80))
log.info("")
# Initialize attributes which are computed by class methods later
self.sources = None # list of identified source positions
self.source_cat = None # catalog of sources and their properties
self.tp_sources = tp_sources
# Determine what regions we have for source identification
# Regions are defined as sections of the image which has the same
# max WHT within a factor of 2.0 (or so).
# make_wht_masks(whtarr, maskarr, scale=1.5, sensitivity=0.95, kernel=(11,11))
self_scale = (self.image.keyword_dict['ndrizim'] - 1) / 2
scale = max(self.param_dict['scale'], self_scale)
self.tp_masks = make_wht_masks(self.image.wht_image, self.image.inv_footprint_mask,
scale=scale,
sensitivity=self.param_dict['sensitivity'],
kernel=(self.param_dict['region_size'],
self.param_dict['region_size']))
def identify_sources(self, **pars):
pass
def measure_sources(self, filter_name, **pars):
pass
def write_catalog(self, reject_catalogs, **pars):
pass
def combine_tables(self, subset_dict):
pass
def annotate_table(self, data_table, param_dict_qc, proc_type="aperture", product="tdp"):
"""Add state metadata to the top of the output source catalog.
Parameters
----------
data_table : QTable
Table of source properties
param_dict_qc : dictionary
Configuration values for quality control step based upon input JSON files (used to build catalog header)
proc_type : str, optional
Identification of catalog type: aperture (aka point) or segment
product : str, optional
Identification string for the catalog product being written. This
controls the data being put into the catalog product
Returns
-------
data_table : QTable
Table of source properties updatd to contain state metadata
"""
data_table.meta["h00"] = [" #=================================================================================================="]
data_table.meta["h01"] = [" # All refereed publications based on data obtained from the HAP must carry the following footnote: "]
data_table.meta["h02"] = [" # "]
data_table.meta["h03"] = [" # Based on observations made with the NASA/ESA Hubble Space Telescope "]
data_table.meta["h04"] = [" # and obtained from the Hubble Advanced Products collection generated "]
data_table.meta["h05"] = [" # by the Space Telescope Science Institute (STScI/NASA). "]
data_table.meta["h06"] = [" # "]
data_table.meta["h07"] = [" # One copy of each paper resulting from data obtained from the HAP should be sent to the STScI. "]
data_table.meta["h08"] = [" #=================================================================================================="]
data_table.meta["WCSNAME"] = self.image.keyword_dict["wcs_name"]
data_table.meta["WCSTYPE"] = self.image.keyword_dict["wcs_type"]
data_table.meta["Proposal ID"] = self.image.keyword_dict["proposal_id"]
data_table.meta["Image File Name"] = self.image.keyword_dict['image_file_name']
data_table.meta["Target Name"] = self.image.keyword_dict["target_name"]
data_table.meta["Date Observed"] = self.image.keyword_dict["date_obs"]
data_table.meta["Time Observed"] = self.image.keyword_dict["time_obs"]
data_table.meta["Instrument"] = self.image.keyword_dict["instrument"]
data_table.meta["Detector"] = self.image.keyword_dict["detector"]
data_table.meta["Target RA"] = self.image.keyword_dict["target_ra"]
data_table.meta["Target DEC"] = self.image.keyword_dict["target_dec"]
data_table.meta["Orientation"] = self.image.keyword_dict["orientation"]
data_table.meta["Aperture RA"] = self.image.keyword_dict["aperture_ra"]
data_table.meta["Aperture DEC"] = self.image.keyword_dict["aperture_dec"]
data_table.meta["Aperture PA"] = self.image.keyword_dict["aperture_pa"]
data_table.meta["Exposure Start"] = self.image.keyword_dict["expo_start"]
data_table.meta["Total Exposure Time"] = self.image.keyword_dict["texpo_time"]
if self.image.keyword_dict["detector"].upper() != "SBC":
data_table.meta["CCD Gain"] = self.image.keyword_dict["ccd_gain"]
if product.lower() == "tdp" or self.image.keyword_dict["instrument"].upper() == "WFC3":
data_table.meta["Filter 1"] = self.image.keyword_dict["filter1"]
data_table.meta["Filter 2"] = ""
else:
data_table.meta["Filter 1"] = self.image.keyword_dict["filter1"]
data_table.meta["Filter 2"] = self.image.keyword_dict["filter2"]
num_sources = len(data_table)
data_table.meta["Number of sources"] = num_sources
proc_type = proc_type.lower()
ci_lower = float(param_dict_qc['ci filter'][proc_type]['ci_lower_limit'])
ci_upper = float(param_dict_qc['ci filter'][proc_type]['ci_upper_limit'])
data_table.meta["h09"] = ["#================================================================================================="]
data_table.meta["h10"] = ["IMPORTANT NOTES"]
data_table.meta["h11"] = ["The X and Y coordinates in this table are 0-indexed (i.e. the origin is (0,0))."]
data_table.meta["h12"] = ["RA and Dec values in this table are in sky coordinates (i.e. coordinates at the epoch of observation"]
data_table.meta["h12.1"] = ["and an {}).".format(self.image.keyword_dict["wcs_type"])]
data_table.meta["h13"] = ["Magnitude values in this table are in the ABMAG system."]
data_table.meta["h14"] = ["Column titles in this table ending with Ap1 refer to the inner photometric aperture "]
data_table.meta["h14.1"] = ["(radius = {} pixels, {} arcsec.".format(self.aper_radius_list_pixels[0],
self.aper_radius_arcsec[0])]
data_table.meta["h15"] = ["Column titles in this table ending with Ap2 refer to the outer photometric aperture "]
data_table.meta["h15.1"] = ["(radius = {} pixels, {} arcsec.".format(self.aper_radius_list_pixels[1],
self.aper_radius_arcsec[1])]
data_table.meta["h16"] = ["CI = Concentration Index (CI) = MagAp1 - MagAp2."]
data_table.meta["h17"] = ["Flag Value Identification:"]
data_table.meta["h17.1"] = [" 0 - Stellar Source ({} < CI < {})".format(ci_lower, ci_upper)]
data_table.meta["h17.2"] = [" 1 - Extended Source (CI > {})".format(ci_upper)]
data_table.meta["h17.3"] = [" 2 - Questionable Photometry (Single-Pixel Saturation)"]
data_table.meta["h17.4"] = [" 4 - Questionable Photometry (Multi-Pixel Saturation)"]
data_table.meta["h17.5"] = [" 8 - Faint Detection Limit"]
data_table.meta["h17.6"] = [" 16 - Hot pixels (CI < {})".format(ci_lower)]
data_table.meta["h17.7"] = [" 32 - False Detection Swarm Around Saturated Source"]
data_table.meta["h17.8"] = [" 64 - False Detections Near Image Edge"]
data_table.meta["h17.9"] = [" 128 - Bleeding and Cosmic Rays"]
data_table.meta["h18"] = ["#================================================================================================="]
if proc_type is "segment":
if self.is_big_island:
data_table.meta["h19"] = ["WARNING: Segmentation catalog is considered to be of poor quality due to a crowded field or large segments."]
return (data_table)
# --------------------------------------------------------------------------------------------------------
class HAPPointCatalog(HAPCatalogBase):
"""Generate photometric sourcelist(s) for specified image(s) using aperture photometry of point sources.
"""
catalog_suffix = "_point-cat.ecsv"
catalog_type = 'aperture'
def __init__(self, image, param_dict, param_dict_qc, diagnostic_mode, tp_sources):
super().__init__(image, param_dict, param_dict_qc, diagnostic_mode, tp_sources)
# Defined in measure_sources
self.subset_filter_source_cat = None
def identify_sources(self, **pars):
"""Create a master coordinate list of sources identified in the specified total detection product image
"""
source_fwhm = self.image.kernel_fwhm
# read in sci, wht extensions of drizzled product
image = np.nan_to_num(self.image.data, copy=True, nan=0.0)
# Create the background-subtracted image
image -= self.image.bkg_background_ra
image = np.clip(image, 0, image.max()) # Insure there are no neg pixels to trip up StarFinder
if 'drz.fits' in self.image.imgname:
reg_suffix = 'drz.fits'
else:
reg_suffix = 'drc.fits'
if not self.tp_sources:
# Report configuration values to log
log.info("{}".format("=" * 80))
log.info("")
log.info("Point-source finding settings")
log.info("Total Detection Product - Input Parameters")
log.info("INPUT PARAMETERS")
log.info("image name: {}".format(self.imgname))
log.info("{}: {}".format("self.param_dict['dao']['bkgsig_sf']", self.param_dict["dao"]["bkgsig_sf"]))
log.info("{}: {}".format("self.param_dict['dao']['kernel_sd_aspect_ratio']",
self.param_dict['dao']['kernel_sd_aspect_ratio']))
log.info("{}: {}".format("self.param_dict['simple_bkg']", self.param_dict['simple_bkg']))
log.info("{}: {}".format("self.param_dict['nsigma']", self.param_dict['nsigma']))
log.info("{}: {}".format("self.image.bkg_rms_median", self.image.bkg_rms_median))
log.info("DERIVED PARAMETERS")
log.info("{}: {}".format("source_fwhm", source_fwhm))
log.info("{}: {}".format("threshold", self.param_dict['nsigma'] * self.image.bkg_rms_median))
log.info("")
log.info("{}".format("=" * 80))
sources = None
for masknum, mask in enumerate(self.tp_masks):
# apply mask for each separate range of WHT values
region = image * mask['mask']
# Compute separate threshold for each 'region'
reg_rms = self.image.bkg_rms_ra * np.sqrt(mask['mask'] / mask['rel_weight'].max())
reg_rms_median = np.nanmedian(reg_rms[reg_rms > 0])
log.info("Mask {}: rel = {}".format(mask['wht_limit'], mask['rel_weight'].max()))
# find ALL the sources!!!
if self.param_dict["starfinder_algorithm"] == "dao":
log.info("DAOStarFinder(fwhm={}, threshold={}*{})".format(source_fwhm, self.param_dict['nsigma'],
reg_rms_median))
daofind = DAOStarFinder(fwhm=source_fwhm,
threshold=self.param_dict['nsigma'] * reg_rms_median)
reg_sources = daofind(region, mask=self.image.inv_footprint_mask)
elif self.param_dict["starfinder_algorithm"] == "iraf":
log.info("IRAFStarFinder(fwhm={}, threshold={}*{})".format(source_fwhm, self.param_dict['nsigma'],
reg_rms_median))
isf = IRAFStarFinder(fwhm=source_fwhm, threshold=self.param_dict['nsigma'] * reg_rms_median)
reg_sources = isf(region, mask=self.image.inv_footprint_mask)
elif self.param_dict["starfinder_algorithm"] == "psf":
log.info("UserStarFinder(fwhm={}, threshold={}*{})".format(source_fwhm, self.param_dict['nsigma'],
reg_rms_median))
# Perform manual detection of sources using theoretical PSFs
# Initial test data: ictj65
try:
# Subtract the detection threshold image so that detection is anything > 0
region -= (reg_rms * self.param_dict['nsigma'])
# insure no negative values for deconvolution
region = np.clip(region, 0., region.max())
user_peaks, source_fwhm = decutils.find_point_sources(self.image.imgname,
data=region,
def_fwhm=source_fwhm,
box_size=self.param_dict['region_size'],
mask=self.image.footprint_mask,
block_size=self.param_dict['block_size'],
diagnostic_mode=self.diagnostic_mode)
except Exception:
# In case we run into out-of-memory error, or any other exception with
# PSF use (like CTE or horribly mismatched PSFs), fail-over to using
# DAOFind mode instead
log.warning("Exception thrown when trying to use PSFs to find sources with UserStarFinder.")
user_peaks = None
if user_peaks is not None and len(user_peaks) > 0:
log.info("UserStarFinder identified {} sources".format(len(user_peaks)))
if self.diagnostic_mode:
peak_name = "{}_peaks{}.reg".format(self.image.imgname.split('.')[0], masknum)
peak_reg = user_peaks['x_peak', 'y_peak']
peak_reg['x_peak'] += 1
peak_reg['y_peak'] += 1
peak_reg.write(peak_name,
format='ascii.fast_no_header',
overwrite=True)
daofind = decutils.UserStarFinder(fwhm=source_fwhm,
coords=user_peaks,
threshold=0.0,
sharphi=0.9, sharplo=0.4)
_region_name = self.image.imgname.replace(reg_suffix, 'region{}.fits'.format(masknum))
if self.diagnostic_mode:
fits.PrimaryHDU(data=region).writeto(_region_name, overwrite=True)
reg_sources = daofind(region,
mask=self.image.inv_footprint_mask)
_region_name = self.image.imgname.replace(reg_suffix, 'starfind_sources{}.ecsv'.format(masknum))
if self.diagnostic_mode:
reg_sources.write(_region_name, format='ascii.ecsv', overwrite=True)
else:
# No sources found to match the PSF model, perhaps due to CTE.
# Try standard daofind instead
log.info("Reverting to DAOStarFinder(fwhm={}, threshold={}*{})".format(source_fwhm, self.param_dict['nsigma'],
reg_rms_median))
daofind = DAOStarFinder(fwhm=source_fwhm,
threshold=self.param_dict['nsigma'] * reg_rms_median)
reg_sources = daofind(region, mask=self.image.inv_footprint_mask)
else:
err_msg = "'{}' is not a valid 'starfinder_algorithm' parameter input in the catalog_generation parameters json file. Valid options are 'dao' for photutils.detection.DAOStarFinder() or 'iraf' for photutils.detection.IRAFStarFinder().".format(self.param_dict["starfinder_algorithm"])
log.error(err_msg)
raise ValueError(err_msg)
log.info("{}".format("=" * 80))
# Concatenate sources found in each region.
if reg_sources is not None:
if sources is None:
sources = reg_sources
else:
sources = vstack([sources, reg_sources])
# If there are no detectable sources in the total detection image, return as there is nothing more to do.
if not sources:
log.warning("No point sources were found in Total Detection Product, {}.".format(self.imgname))
log.warning("Processing for point source catalogs for this product is ending.")
return
log.info("Measured {} sources in {}".format(len(sources), self.image.imgname))
log.info(" colnames: {}".format(sources.colnames))
# calculate and add RA and DEC columns to table
ra, dec = self.transform_list_xy_to_ra_dec(sources["xcentroid"], sources["ycentroid"], self.imgname)
ra_col = Column(name="RA", data=ra, dtype=np.float64)
dec_col = Column(name="DEC", data=dec, dtype=np.float64)
sources.add_column(ra_col, index=3)
sources.add_column(dec_col, index=4)
for col in sources.colnames:
sources[col].info.format = '.8g' # for consistent table output
# format output table columns
final_col_format = {"xcentroid": "10.3f", "ycentroid": "10.3f", "RA": "13.7f", "DEC": "13.7f", "id": "7d"}
for fcf_key in final_col_format.keys():
sources[fcf_key].format = final_col_format[fcf_key]
# descriptions
final_col_descrip = {"xcentroid": "Pixel Coordinate", "ycentroid": "Pixel Coordinate",
"RA": "Sky coordinate at epoch of observation",
"DEC": "Sky coordinate at epoch of observation",
"id": "Catalog Object Identification Number"}
for fcd_key in final_col_descrip.keys():
sources[fcd_key].description = final_col_descrip[fcd_key]
# add units to columns
final_col_units = {"xcentroid": "pixels", "ycentroid": "pixels", "RA": "degrees", "DEC": "degrees",
"id": "unitless"}
for col_title in final_col_units:
sources[col_title].unit = final_col_units[col_title]
if self.diagnostic_mode:
sources.write(self.image.imgname.replace(reg_suffix,'raw-point-cat.ecsv'), format='ascii.ecsv', overwrite=True)
self.sources = sources
# if processing filter product, use sources identified by parent total drizzle product identify_sources() run
if self.tp_sources:
self.sources = self.tp_sources['aperture']['sources']
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def measure_sources(self, filter_name):
"""Perform aperture photometry on identified sources
"""
log.info("Performing aperture photometry on identified point-sources")
# Open and background subtract image
image = self.image.data.copy()
# load in coords of sources identified in total product
try:
positions = (self.sources['xcentroid'], self.sources['ycentroid'])
except Exception:
positions = (self.sources['X-Center'], self.sources['Y-Center'])
pos_xy = np.vstack(positions).T
# define list of background annulii
bg_apers = CircularAnnulus(pos_xy,
r_in=self.param_dict['skyannulus_arcsec']/self.image.imgwcs.pscale,
r_out=(self.param_dict['skyannulus_arcsec'] +
self.param_dict['dskyannulus_arcsec'])/self.image.imgwcs.pscale)
# Create the list of photometric apertures to measure
phot_apers = [CircularAperture(pos_xy, r=r) for r in self.aper_radius_list_pixels]
# Perform aperture photometry - the input data should NOT be background subtracted
photometry_tbl = photometry_tools.iraf_style_photometry(phot_apers,
bg_apers,
data=image,
photflam=self.image.keyword_dict['photflam'],
photplam=self.image.keyword_dict['photplam'],
error_array=self.image.bkg_rms_ra,
bg_method=self.param_dict['salgorithm'],
epadu=self.gain)
# calculate and add RA and DEC columns to table
ra, dec = self.transform_list_xy_to_ra_dec(photometry_tbl["X-Center"], photometry_tbl["Y-Center"], self.imgname) # TODO: replace with all_pix2sky or somthing at a later date
ra_col = Column(name="RA", data=ra, dtype=np.float64)
dec_col = Column(name="DEC", data=dec, dtype=np.float64)
photometry_tbl.add_column(ra_col, index=2)
photometry_tbl.add_column(dec_col, index=3)
log.info('Obtained photometry measurements for {} sources'.format(len(photometry_tbl)))
try:
# Calculate and add concentration index (CI) column to table
ci_data = photometry_tbl["MagAp1"].data - photometry_tbl["MagAp2"].data
except Exception:
log.info("Wrote catalog info to file 'catalog.pickle'.")
pickle_out = open("catalog.pickle", "wb")
pickle.dump(photometry_tbl, pickle_out)
pickle_out.close()
ci_mask = np.logical_and(np.abs(ci_data) > 0.0, np.abs(ci_data) < 1.0e-30)
big_bad_index = np.where(abs(ci_data) > 1.0e20)
ci_mask[big_bad_index] = True
ci_col = MaskedColumn(name="CI", data=ci_data, dtype=np.float64, mask=ci_mask)
photometry_tbl.add_column(ci_col)
# Add zero-value "Flags" column in preparation for source flagging
flag_col = Column(name="Flags", data=np.zeros_like(photometry_tbl['ID']), dtype=np.int64)
photometry_tbl.add_column(flag_col)
# build final output table
final_col_order = ["X-Center", "Y-Center", "RA", "DEC", "ID", "MagAp1", "MagErrAp1", "MagAp2", "MagErrAp2",
"MSkyAp2", "StdevAp2", "FluxAp2", "CI", "Flags"]
output_photometry_table = photometry_tbl[final_col_order]
# format output table columns
final_col_format = {"X-Center": "10.3f", "Y-Center": "10.3f", "RA": "13.7f", "DEC": "13.7f", "ID": "7d",
"MagAp1": '7.3f', "MagErrAp1": '7.3f', "MagAp2": '7.3f',
"MagErrAp2": '7.3f', "MSkyAp2": '7.3f', "StdevAp2": '7.3f',
"FluxAp2": '10.4f', "CI": "7.3f", "Flags": "5d"} # TODO: Standardize precision
for fcf_key in final_col_format.keys():
output_photometry_table[fcf_key].format = final_col_format[fcf_key]
# column descriptions
final_col_descrip = {"ID": "Catalog Object Identification Number",
"X-Center": "Pixel Coordinate",
"Y-Center": "Pixel Coordinate",
"RA": "Sky coordinate at epoch of observation",
"DEC": "Sky coordinate at epoch of observation",
"MagAp1": "ABMAG of source based on the inner (smaller) aperture",
"MagErrAp1": "Error of MagAp1",
"MagAp2": "ABMAG of source based on the outer (larger) aperture",
"MagErrAp2": "Error of MagAp2",
"MSkyAp2": "ABMAG of sky based on outer (larger) aperture",
"StdevAp2": "Standard deviation of sky measurement in outer (larger) aperture",
"FluxAp2": "Flux of source based on the outer (larger) aperture",
"CI": "Concentration Index",
"Flags": "Numeric encoding for conditions on detected sources"}
for fcd_key in final_col_descrip.keys():
output_photometry_table[fcd_key].description = final_col_descrip[fcd_key]
# add units to columns
final_col_units = {"X-Center": "pixels", "Y-Center": "pixels", "RA": "degrees", "DEC": "degrees",
"ID": "unitless", "MagAp1": "ABMAG", "MagErrAp1": "ABMAG", "MagAp2": "ABMAG",
"MagErrAp2": "ABMAG", "MSkyAp2": "ABMAG", "StdevAp2": "ABMAG",
"FluxAp2": "electrons/sec", "CI": "ABMAG", "Flags": "unitless"}
for col_title in final_col_units:
output_photometry_table[col_title].unit = final_col_units[col_title]
# Capture specified columns in order to append to the total detection table
self.subset_filter_source_cat = output_photometry_table["ID", "MagAp2", "CI", "Flags"]
self.subset_filter_source_cat.rename_column("MagAp2", "MagAP2_" + filter_name)
self.subset_filter_source_cat.rename_column("CI", "CI_" + filter_name)
self.subset_filter_source_cat.rename_column("Flags", "Flags_" + filter_name)
# Add the header information to the table
self.source_cat = self.annotate_table(output_photometry_table,
self.param_dict_qc,
proc_type = "aperture",
product=self.image.ghd_product)
log.info("Saved photometry table with {} sources".format(len(self.source_cat)))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def write_catalog(self, reject_catalogs):
"""Write specified catalog to file on disk
Regardless of the setting for reject_catalogs, the regions file will be written
solely based upon the setting of diagnostic_mode.
Parameters
----------
reject_catalogs : bool
Indicator as to whether or not the catalogs (*.ecsv) should be written.
Returns
-------
Nothing!
"""
if not reject_catalogs:
# Write out catalog to ecsv file
self.source_cat = self.annotate_table(self.source_cat, self.param_dict_qc, proc_type="aperture", product=self.image.ghd_product)
# self.source_cat.meta['comments'] = \
# ["NOTE: The X and Y coordinates in this table are 0-indexed (i.e. the origin is (0,0))."]
self.source_cat.write(self.sourcelist_filename, format=self.catalog_format)
log.info("Wrote catalog file '{}' containing {} sources".format(self.sourcelist_filename, len(self.source_cat)))
# Write out region file if in diagnostic_mode.
if self.diagnostic_mode:
out_table = self.source_cat.copy()
if 'xcentroid' in out_table.keys(): # for point-source source catalogs
# Remove all other columns besides xcentroid and ycentroid
out_table.keep_columns(['xcentroid', 'ycentroid'])
# Add offset of 1.0 in X and Y to line up sources in region file with image displayed in ds9.
out_table['xcentroid'].data[:] += np.float64(1.0)
out_table['ycentroid'].data[:] += np.float64(1.0)
elif 'X-Center' in out_table.keys(): # for aperture photometric catalogs
# Remove all other columns besides 'X-Center and Y-Center
out_table.keep_columns(['X-Center', 'Y-Center'])
# Add offset of 1.0 in X and Y to line up sources in region file with image displayed in ds9.
out_table['X-Center'].data[:] += np.float64(1.0)
out_table['Y-Center'].data[:] += np.float64(1.0)
else: # Bail out if anything else is encountered.
log.info("Error: unrecognized catalog format. Skipping region file generation.")
return()
reg_filename = self.sourcelist_filename.replace("." + self.catalog_suffix.split(".")[1],
self.catalog_region_suffix)
out_table.write(reg_filename, format="ascii")
log.info("Wrote region file '{}' containing {} sources".format(reg_filename, len(out_table)))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def transform_list_xy_to_ra_dec(self, list_of_x, list_of_y, drizzled_image):
"""Transform lists of X and Y coordinates to lists of RA and Dec coordinates
This is a temporary solution until somthing like pix2sky or pix2world can be implemented in measure_sources.
directly lifted from hla classic subroutine hla_sorucelist.Transform_list_xy_to_RA_Dec()
Tested.
Parameters
----------
list_of_x : list
list of x coordinates to convert
list_of_y :
list of y coordinates to convert
drizzled_image : str
Name of the image that corresponds to the table from DAOPhot. This image is used to re-write x and y
coordinates in RA and Dec.
Returns
-------
ra: list
list of right ascension values
dec : list
list of declination values
"""
import stwcs
wcs1_drz = stwcs.wcsutil.HSTWCS(drizzled_image + "[1]")
origin = 0
# *origin* is the coordinate in the upper left corner of the
# image. In FITS and Fortran standards, this is 1. In Numpy and C
# standards this is 0.
try:
skyposish = wcs1_drz.all_pix2sky(list_of_x, list_of_y, origin)
except AttributeError:
skyposish = wcs1_drz.all_pix2world(list_of_x, list_of_y, origin)
ra = skyposish[0]
dec = skyposish[1]
return ra, dec
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def combine_tables(self, subset_table):
"""Append specified measurements from the filter table to the total detection table.
The "ID" column is used to map the filter table measurements to the total detection table
Parameters
----------
subset_table : Astropy table
A table containing a subset of columns from a filter catalog.
"""
# Evaluate self.sources (the total product list) even though len(self.sources) should not be possible
if len(subset_table) == 0 or len(self.sources) == 0:
log.error("No sources found in the current filter table nor in the total source table.")
return
# Keep all the rows in the original total detection table and add columns from the filter
# table where a matching "id" key is present. The key must match in case.
if 'xcentroid' in self.sources.colnames:
self.sources.rename_column('xcentroid', 'X-Center')
if 'ycentroid' in self.sources.colnames:
self.sources.rename_column('ycentroid', 'Y-Center')
if 'id' in self.sources.colnames:
self.sources.rename_column("id", "ID")
for col2del in ['sharpness', 'roundness1', 'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag']:
if col2del in self.sources.colnames:
self.sources.remove_column(col2del)
self.sources = join(self.sources, subset_table, keys="ID", join_type="left")
# ----------------------------------------------------------------------------------------------------------------------
class HAPSegmentCatalog(HAPCatalogBase):
"""Generate a sourcelist for a specified image by detecting both point and extended
sources using the image segmentation process.
Parameters
----------
image : CatalogImage object
The white light (aka total detection) or filter drizzled image
param_dict : dictionary
Configuration values for catalog generation based upon input JSON files
diagnostic_mode : bool
Specifies whether or not to generate the regions file used for ds9 overlay
tp_sources: dictionary
Dictionary containing computed information for each catalog type
"""
catalog_suffix = "_segment-cat.ecsv"
catalog_type = 'segment'
# Class variable which indicates to the Filter object the Total object had to determine
# the image background by the sigma_clipped alternate algorithm
using_sigma_clipped_bkg = False
def __init__(self, image, param_dict, param_dict_qc, diagnostic_mode, tp_sources):
super().__init__(image, param_dict, param_dict_qc, diagnostic_mode, tp_sources)
# Get the instrument/detector-specific values from the self.param_dict
self._fwhm = self.param_dict["sourcex"]["fwhm"]
self._size_source_box = self.param_dict["sourcex"]["source_box"]
self._nlevels = self.param_dict["sourcex"]["nlevels"]
self._contrast = self.param_dict["sourcex"]["contrast"]
self._border = self.param_dict["sourcex"]["border"]
self._nsigma = self.param_dict["sourcex"]["segm_nsigma"]
self._rw2d_size = self.param_dict["sourcex"]["rw2d_size"]
self._rw2d_nsigma = self.param_dict["sourcex"]["rw2d_nsigma"]
self._rw2d_biggest_source = self.param_dict["sourcex"]["rw2d_biggest_source"]
self._rw2d_source_fraction = self.param_dict["sourcex"]["rw2d_source_fraction"]
self._bs_deblend_limit = self.param_dict["sourcex"]["biggest_source_deblend_limit"]
self._sf_deblend_limit = self.param_dict["sourcex"]["source_fraction_deblend_limit"]
self._ratio_bigsource_limit = self.param_dict["sourcex"]["ratio_bigsource_limit"]
# Columns to include from the computation of source properties to save
# computation time from computing values which are not used
self.include_filter_cols = ['area', bac_colname, 'bbox_xmax', 'bbox_xmin', 'bbox_ymax', 'bbox_ymin',
'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy',
'ellipticity', 'elongation', id_colname, 'orientation', 'sky_centroid_icrs',
flux_colname, ferr_colname, 'xcentroid', 'ycentroid']
# Initialize attributes to be computed later
self.segm_img = None # Segmentation image
# Defined in measure_sources
self.subset_filter_source_cat = None
# Default kernel which may be the custom kernel based upon the actual image
# data or a Gaussian 2D kernel. This may be over-ridden in identify_sources().
self.kernel = copy.deepcopy(self.image.kernel)
# Attribute computed when generating the segmentation image. If the segmentation image
# is deemed to be of poor quality, make sure to add documentation to the output catalog.
self.is_big_island = False
def identify_sources(self, **pars):
"""Use photutils to find sources in image based on segmentation.
Returns
-------
self.sources
self.source_cat
Defines
-------
self.segm_img : `photutils.segmentation.SegmentationImage`
Two-dimensional segmentation image where found source regions are labeled with
unique, non-zero positive integers.
"""
# If the total product sources have not been identified, then this needs to be done!
if not self.tp_sources:
# Report configuration values to log
log.info("{}".format("=" * 80))
log.info("")
log.info("SExtractor-like source finding settings - Photutils segmentation")
log.info("Total Detection Product - Input Parameters")
log.info("Image: {}".format(self.imgname))
log.info("FWHM: {}".format(self._fwhm))
log.info("size_source_box (no. of connected pixels needed for a detection): {}".format(self._size_source_box))
log.info("nsigma (threshold = nsigma * background_rms): {}".format(self._nsigma))
log.info("nlevels (no. of multi-thresholding levels for deblending): {}".format(self._nlevels))
log.info("contrast (frac. flux for peak to be separate object, 0=max. deblend, 1=no deblend): {}".format(self._contrast))
log.info("RickerWavelet nsigma (threshold = nsigma * background_rms): {}".format(self._rw2d_nsigma))
log.info("RickerWavelet kernel X- and Y-dimension: {}".format(self._rw2d_size))
log.info("Percentage limit on biggest source (criterion for RickerWavelet kernel): {}".format(100.0 * self._rw2d_biggest_source))
log.info("Percentage limit on source fraction over the image (criterion for RickerWavelet kernel): {}".format(100.0 * self._rw2d_source_fraction))
log.info("Percentage limit on biggest source deblending limit: {}".format(100.0 * self._bs_deblend_limit))
log.info("Percentage limit on source fraction deblending limit: {}".format(100.0 * self._sf_deblend_limit))
# Get the SCI image data
imgarr = copy.deepcopy(self.image.data)
# Custom or Gaussian kernel depending upon the results of CatalogImage build_kernel()
g2d_kernel = self.image.kernel
# Write out diagnostic data
if self.diagnostic_mode:
# Exclusion mask
outname = self.imgname.replace(".fits", "_mask.fits")
fits.PrimaryHDU(data=self.image.inv_footprint_mask.astype(np.uint16)).writeto(outname)
# Background image
outname = self.imgname.replace(".fits", "_bkg.fits")
fits.PrimaryHDU(data=self.image.bkg_background_ra).writeto(outname)
# filter kernel as well
outname = self.imgname.replace(".fits", "_kernel.fits")
fits.PrimaryHDU(data=g2d_kernel).writeto(outname)
# Detect segments and evaluate the detection in terms of big sources/islands or crowded fields
# Round 1
ncount = 0
log.info("")
log.info("Using Custom kernel or Gaussian to generate a segmentation map.")
g_segm_img, g_is_big_crowded, g_bs, g_sf = self.detect_and_eval_segments(imgarr,
g2d_kernel,
ncount,
self._size_source_box,
self._nsigma,
self.image.bkg_background_ra,
self.image.bkg_rms_ra,
check_big_island_only=False,
rw2d_biggest_source=self._rw2d_biggest_source,
rw2d_source_fraction=self._rw2d_source_fraction)
# If the science field via the segmentation map is deemed crowded or has big sources/islands, compute the
# RickerWavelet2DKernel and call detect_and_eval_segments() again. Still use the custom fwhm as it
# should be better than a generic fwhm as it is based upon the data.
# Note: the fwhm might be a default if the custom algorithm had to fall back to a Gaussian.
if g_is_big_crowded and g_segm_img:
log.info("")
log.info("The segmentation map contains big sources/islands or a large source fraction of segments.")
log.info("Using RickerWavelet2DKernel to generate an alternate segmentation map.")
rw2d_kernel = RickerWavelet2DKernel(self.image.kernel_fwhm,
x_size=self._rw2d_size,
y_size=self._rw2d_size)
rw2d_kernel.normalize()
# Detect segments and evaluate the detection in terms of big sources/islands or crowded fields
# Round 1
ncount += 1
rw_segm_img, rw_is_big_crowded, rw_bs, rw_sf = self.detect_and_eval_segments(imgarr,
rw2d_kernel,
ncount,
self._size_source_box,
self._rw2d_nsigma,
self.image.bkg_background_ra,
self.image.bkg_rms_ra,
check_big_island_only=True,
rw2d_biggest_source=self._rw2d_biggest_source,
rw2d_source_fraction=self._rw2d_source_fraction)
# Compute the ratio of big sources/islands using Custom/Gaussian kernel vs Rickerwavelet kernel
# This value can be used as a discriminant between overlapping point sources and nebulousity fields
ratio_cg2rw_bigsource = 3.0
if rw_bs > 0.0:
ratio_cg2rw_bigsource = g_bs / rw_bs
# Check if the RickerWavelet segmentation image still seems to be problematic
if rw_is_big_crowded and rw_segm_img:
# Before giving up, check the type of background computed for the detection image,
# and proceed based upon the type. If a "sigma-clipped background" is in use, compute
# a "2D background" instead. If a "2D background" is in use, increase the
# threshold for source detection.
log.info("")
log.info("RickerWavelet computed segmentation image still contains big sources/islands.")
log.info("Recomputing the threshold or background image for improved segmentation detection.")
# Make sure to be working with the unmodified image data
imgarr = copy.deepcopy(self.image.data)
# Background types: zero_background, sigma_clipped_background, twod_background
# Compute a twod_background
if (self.image.bkg_type.lower().startswith('sigma')):
log.info("Recomputing the background image from a sigma-clipped background to a Background2D.")
# In order to force the use of a background2D, some configuration values will be
# re-set (i.e., bkg_skew_threshold and negative_percent).
self.image.compute_background(self.param_dict['bkg_box_size'],
self.param_dict['bkg_filter_size'],
bkg_skew_threshold=0.0,
negative_percent=100.0)
if self.diagnostic_mode:
outname = self.imgname.replace(".fits", "_bkg1.fits")
fits.PrimaryHDU(data=self.image.bkg_background_ra).writeto(outname)
# Need to remake image kernel as it has a dependence on self.bkg_rms_ra
self.image.build_kernel(self.param_dict['bkg_box_size'],
self.param_dict['bkg_filter_size'],
self.param_dict['dao']['TWEAK_FWHMPSF'])
# Reset the local version of the Custom/Gaussian kernel and the RickerWavelet
# kernel when the background type changes
g2d_kernel = self.image.kernel
rw2d_kernel = RickerWavelet2DKernel(self.image.kernel_fwhm,
x_size=self._rw2d_size,
y_size=self._rw2d_size)
rw2d_kernel.normalize()
sigma_for_threshold = self._nsigma
rw2d_sigma_for_threshold = self._rw2d_nsigma
# Re-compute a background2D with a higher threshold by increasing the nsigma used
elif (self.image.bkg_type.lower().startswith('twod')):
log.info("Increasing the threshold image (bkg + nsigma * 2.0) for improved source detection.")
sigma_for_threshold = self._nsigma * 2.0
rw2d_sigma_for_threshold = self._rw2d_nsigma * 2.0
# Detect segments and evaluate the detection in terms of big sources/islands or crowded fields
# Round 2
ncount += 1
log.info("")
log.info("With alternate background...using Custom/Gaussian kernel to generate a segmentation map.")
del g_segm_img
g_segm_img, g_is_big_crowded, g_bs, g_sf = self.detect_and_eval_segments(imgarr,
g2d_kernel,
ncount,
self._size_source_box,
sigma_for_threshold,
self.image.bkg_background_ra,
self.image.bkg_rms_ra,
check_big_island_only=False,
rw2d_biggest_source=self._rw2d_biggest_source,
rw2d_source_fraction=self._rw2d_source_fraction)
# Check again for big sources/islands or a large source fraction
if g_is_big_crowded:
log.info("")
log.info("The segmentation map contains big sources/islands or a large source fraction of segments.")
log.info("With alternate background...using RickerWavelet2DKernel to generate an alternate segmentation map.")
# Detect segments and evaluate the detection in terms of big sources/islands or crowded fields
# Note the biggest source and source fraction limits are the much larger "deblend" values.
# Round 2
ncount += 1
del rw_segm_img
rw_segm_img, rw_is_big_crowded, rw_bs, rw_sf = self.detect_and_eval_segments(imgarr,
rw2d_kernel,
ncount,
self._size_source_box,
rw2d_sigma_for_threshold,
self.image.bkg_background_ra,
self.image.bkg_rms_ra,
check_big_island_only=False,
rw2d_biggest_source=self._bs_deblend_limit,
rw2d_source_fraction=self._sf_deblend_limit)
# Last chance - The larger "deblend" limits were used in this last detection
# attempt based upon the the statistics of processing lots of data - looking
# for a balance between not being able to generate segmentation catalogs versus
# deblending for an unreasonable amount of time (days).
#
# Also, the ratio_cg2rw_bigsource is indicative of overlapping PSFs versus large
# areas of nebulousity. If this ratio is approximately > 2, then deblending can be
# quite efficient and successful for the overlapping PSF case.
#
# Use the Round 2 RickerWavelet segmentation image
if not rw_is_big_crowded or (rw_is_big_crowded and (ratio_cg2rw_bigsource > self._ratio_bigsource_limit)):
log.info("The Round 2 of segmentation images may still contain big sources/islands.\n"
"However, the ratio between the Custom/Gaussian and Rickerwavelet biggest source is\n"
"indicative of overlapping PSFs vs nebulousity.")
log.info("Proceeding as the time to deblend should be nominal.")
self.kernel = rw2d_kernel
segm_img = copy.deepcopy(rw_segm_img)
del rw_segm_img
# The segmentation image is problematic and the big island/source fraction limits are exceeded,
# so deblending could take days, and the results would not be viable in any case.
else:
log.warning("")
log.warning("The Round 2 of segmentation images still contain big sources/islands or a\n"
"large source fraction of segments.")
log.warning("The segmentation algorithm is unable to continue and no segmentation catalog will be produced.")
del g_segm_img
del rw_segm_img
return
# Use the second round custom/Gaussian segmentation image
else:
self.kernel = g2d_kernel
segm_img = copy.deepcopy(g_segm_img)
del g_segm_img
# The first round RickerWavelet segmentation image is good, continue with the processing
elif not rw_is_big_crowded and rw_segm_img:
self.kernel = rw2d_kernel
segm_img = copy.deepcopy(rw_segm_img)
del rw_segm_img
del g_segm_img
# No segments were detected in the total data product - no further processing done for this TDP,
# but processing of another TDP should proceed.
elif not rw_segm_img:
return
# The first round custom/Gaussian segmentation image is good, continue with the processing
elif not g_is_big_crowded and g_segm_img:
self.kernel = g2d_kernel
segm_img = copy.deepcopy(g_segm_img)
del g_segm_img
# No segments were detected in the total data product - no further processing done for this TDP,
# but processing of another TDP should proceed.
elif not g_segm_img:
return
# Deblend the segmentation image
ncount += 1
self.deblend_segments(segm_img,
imgarr,
ncount,
filter_kernel=self.kernel,
source_box=self._size_source_box)
# The total product catalog consists of at least the X/Y and RA/Dec coordinates for the detected
# sources in the total drizzled image. All the actual measurements are done on the filtered drizzled
# images using the coordinates determined from the total drizzled image. Measure the coordinates now.
log.info("Identifying sources in total detection image.")
self.segm_img = copy.deepcopy(segm_img)
del segm_img
if OLD_PHOTUTILS:
self.source_cat = source_properties(imgarr, self.segm_img, background=self.image.bkg_background_ra,
filter_kernel=self.kernel, wcs=self.image.imgwcs)
else:
self.source_cat = SourceCatalog(imgarr, self.segm_img, background=self.image.bkg_background_ra,
kernel=self.kernel, wcs=self.image.imgwcs)
# Convert source_cat which is a SourceCatalog to an Astropy Table - need the data in tabular
# form to filter out bad rows and correspondingly bad segments before the filter images are processed.
total_measurements_table = Table(self.source_cat.to_table(columns=['label', 'xcentroid', 'ycentroid', 'sky_centroid_icrs']))
# Filter the table to eliminate nans or inf based on the coordinates, then remove these segments from
# the segmentation image too
good_rows = []
bad_segm_rows_by_id = []
updated_table = None
for i, old_row in enumerate(total_measurements_table):
if np.isfinite(old_row["xcentroid"]):
good_rows.append(old_row)
else:
bad_segm_rows_by_id.append(total_measurements_table['label'][i])
updated_table = Table(rows=good_rows, names=total_measurements_table.colnames)
if self.diagnostic_mode and bad_segm_rows_by_id:
log.info("Bad segments removed from segmentation image for Total detection image {}.".format(self.imgname))
# Remove the bad segments from the image
self.segm_img.remove_labels(bad_segm_rows_by_id, relabel=True)
# Clean up the existing column names, format, and descriptions
self.source_cat = self._define_total_table(updated_table)
# self.sources needs to be passed to a filter catalog object based on code in hapsequencer.py
# (create_catalog_products()). This is the way the independent catalogs of total and filter products
# process the same segmentation image.
# BEWARE: self.sources for "segmentation" is a SegmentationImage, but for "point" it is an Astropy table
self.sources = copy.deepcopy(self.segm_img)
log.info("Done identifying sources in total detection image for the segmentation catalog.")
log.info("")
log.info("{}".format("=" * 80))
log.info("")
# If filter product, use sources identified in total detection product previously generated
else:
self.sources = self.tp_sources['segment']['sources']
self.kernel = self.tp_sources['segment']['kernel']
self.total_source_table = self.tp_sources['segment']['source_cat']
# For debugging purposes only, create a "regions" files to use for ds9 overlay of the segm_img.
# Create the image regions file here in case there is a failure. This diagnostic portion of the
# code should only be invoked when working on the total object catalog (self.segm_img is defined).
if self.diagnostic_mode and self.segm_img:
# Copy out only the X and Y coordinates to a "diagnostic_mode table" and cast as an Astropy Table
# so a scalar can be added to the centroid coordinates
tbl = self.source_cat["X-Centroid", "Y-Centroid"]
# Construct the diagnostic_mode output filename and write the regions file
indx = self.sourcelist_filename.find("ecsv")
outname = self.sourcelist_filename[0:indx-1] + "_all.reg"
tbl["X-Centroid"].info.format = ".10f"
tbl["Y-Centroid"].info.format = ".10f"
# Add one to the X and Y table values to put the data onto a one-based system,
# particularly for display with ds9
tbl["X-Centroid"] = tbl["X-Centroid"] + 1
tbl["Y-Centroid"] = tbl["Y-Centroid"] + 1
tbl.write(outname, format="ascii.commented_header")
log.info("Wrote region file '{}' containing {} sources".format(outname, len(tbl)))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def detect_and_eval_segments(self, imgarr, kernel, ncount, size_source_box, nsigma_above_bkg, background_img, background_rms, check_big_island_only=False, rw2d_biggest_source=0.015, rw2d_source_fraction=0.075):
# Compute the threshold to use for source detection
threshold = self.compute_threshold(nsigma_above_bkg, background_img, background_rms)
# Write out diagnostic data
if self.diagnostic_mode:
outname = self.imgname.replace(".fits", "_threshold" + str(ncount) + ".fits")
fits.PrimaryHDU(data=threshold).writeto(outname)
# Generate the segmentation map by detecting "sources" using the nominal settings.
# Use all the parameters here developed for the "custom kernel". Note: if the
# "custom kernel" did not work out, build_auto_kernel() drops back to a Gaussian.
# log.info('Kernel shape: {} source_box: {}'.format(g2d.shape, self._size_source_box))
segm_img = self.detect_segments(imgarr,
threshold,
ncount,
filter_kernel=kernel,
source_box=size_source_box,
mask=self.image.inv_footprint_mask)
# Check if custom_segm_image is None indicating there are no detectable sources in this
# total detection image. If value is None, a warning has already been issued. Issue
# a final message for this particular total detection product and return.
if segm_img is None:
log.warning("End processing for the segmentation catalog due to no sources detected with the current kernel.")
log.warning("No segmentation catalog will be produced for this total detection product, {}.".format(self.imgname))
is_big_crowded = True
big_island = 1.0
source_fraction = 1.0
else:
# Determine if the segmentation image is filled with big sources/islands (bs) or is crowded with a large
# source fraction (sf). Depending upon these measurements, it can take a very, very long time to deblend
# the sources.
is_big_crowded = True
is_big_crowded, big_island, source_fraction = self._evaluate_segmentation_image(segm_img,
imgarr,
big_island_only=check_big_island_only,
max_biggest_source=rw2d_biggest_source,
max_source_fraction=rw2d_source_fraction)
return segm_img, is_big_crowded, big_island, source_fraction
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def compute_threshold(self, nsigma, bkg_mean, bkg_rms):
"""Compute the threshold value above which sources are deemed detected.
Parameters
----------
nsigma : float
Multiplicative factor for the background RMS
bkg_mean : float image
Mean of the background determined image
bkg_rms : float image
RMS of the background determined image
Returns
-------
threshold: float image
Image which defines, on a pixel-by-pixel basis, the low limit above which
sources are detected.
"""
log.info("Computing the threshold value used for source detection.")
if not self.tp_masks:
threshold = bkg_mean + (nsigma * bkg_rms)
else:
threshold =
|
np.zeros_like(self.tp_masks[0]['rel_weight'])
|
numpy.zeros_like
|
import os
from termcolor import colored
import numpy as np
import matplotlib.pyplot as plt
import dill as pickle
import cv2
from helper_code.registration_funcs import get_background
# data labels
mice = ['P.1','P.2','P.3','P.4','P.5']
days = ['191127', '191128', '191129', '191130', '191201', '191202'] #'191126',
base_folder = 'D:\\Dropbox (UCL - SWC)\\DAQ\\upstairs_rig\\PS_mousetraining'
summary_plots_folder = 'D:\\data\\Summary Plots'
# options
plot_sessions = False
plot_data = True
show_video = False
plot_trajectory = True
bins = np.arange(-63, 64, 4.5) # idx 14 and 15 are stimulus
bins = np.arange(-63, 64, 2.25) # 29 and 30 are stimulus
trials_to_examine = 50
# open data dictionary
save_file = os.path.join(base_folder, 'foraging_data_IV')
with open(save_file, 'rb') as dill_file: foraging_dict = pickle.load(dill_file)
def get_biggest_contour(frame):
_, contours, _ = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cont_count = len(contours)
big_cnt_ind = 0
if cont_count > 1:
areas = np.zeros(cont_count)
for c in range(cont_count):
areas[c] = cv2.contourArea(contours[c])
big_cnt_ind = np.argmax(areas)
cnt = contours[big_cnt_ind]
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
return contours, big_cnt_ind, cx, cy, cnt
'''
examine the behavior video
'''
if show_video:
# foraging_dict['video trajectories'] = {}
# foraging_dict['trajectories'] = {}
# day = '191129'
# mouse = 'P.2'
# vid_path = 'D:\\Dropbox (UCL - SWC)\\DAQ\\upstairs_rig\\PS_mousetraining\\191129\\P.2\\Camera_rig5.mp4'
for mouse in mice:
for day in days:
session = day + '_' + mouse
if session in foraging_dict['video trajectories'] and session in foraging_dict['trajectories']: continue
vid_path = 'D:\\Dropbox (UCL - SWC)\\DAQ\\upstairs_rig\\PS_mousetraining\\' + day + '\\' + mouse + '\\Camera_rig5.mp4'
vid = cv2.VideoCapture(vid_path)
ret, frame = vid.read()
if not ret: continue
else: print(session)
vid_window = [-1, 9]
fps = 40
end_vids = False
save_vid = False
get_trajectory = True
# get stim times
sound_times = foraging_dict['sound_times'][session]
pump_times = foraging_dict['pump_times'][session]
lick_times = foraging_dict['lick_times_II'][session]
# camera_times = foraging_dict['camera_times'][session]
trajectories = np.zeros((2, 40*9, len(sound_times))) # x/y, 9 seconds, number of trials
# extract background
if get_trajectory:
background, _, _ = get_background(vid_path, start_frame=0, avg_over=100)
mask_thresh = .8 # .7
kernel = [4, 3]
kernel_er = np.ones((kernel[0], kernel[0]), np.uint8)
kernel_dil = np.ones((kernel[1], kernel[1]), np.uint8)
vid_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
video_trajectory = np.zeros(vid_frames)
vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
''' go thru entire thing '''
# for frame_num in range(vid_frames):
# ret, frame = vid.read()
# if ret:
# frame_norm = (frame[:, :, 0] / background)
# # use the thresholds, erosion, and dilation set above to extract a mask coinciding with the mouse
# frame_norm_mask = (frame_norm < mask_thresh).astype(np.uint8)
# # frame_norm_mask = cv2.erode(frame_norm_mask, kernel_er, iterations=1)
# # frame_norm_mask = cv2.dilate(frame_norm_mask, kernel_dil, iterations=3)
# # extract the largest contour in this mask -- this should correspond to the mouse
# try: _, _, x_center, y_center, _ = get_biggest_contour(frame_norm_mask)
# except: print('Contour failure')
# video_trajectory[frame_num] = x_center
# foraging_dict['video trajectories'][session] = video_trajectory
# loop across trials
for i, sound_time in enumerate(sound_times):
# if end_vids or i > 20: break
if save_vid:
# set up video writer
fourcc_data = cv2.VideoWriter_fourcc(*"XVID") # LJPG for lossless, XVID for compressed
save_video = cv2.VideoWriter('D:\\Dropbox (UCL - SWC)\\DAQ\\upstairs_rig\\PS_mousetraining\\191129\\P.2\\tone trial ' + str(i) + '.mp4', fourcc_data, 40, (1280, 1024), False)
# get relevant lick and pump times
relevant_pump_idx = ((pump_times - sound_time) > vid_window[0]) * ((pump_times - sound_time) < vid_window[1])
relevant_lick_idx = ((lick_times - sound_time) > vid_window[0]) * ((lick_times - sound_time) < vid_window[1])
relevant_pump_times = np.ceil((pump_times[relevant_pump_idx] - sound_time) * fps)
relevant_lick_times = np.ceil((lick_times[relevant_lick_idx] - sound_time) * fps)
pump_on = 0
lick_on = 0
stim_frame = np.round(sound_time * fps)
# proper_frame = np.argmin(abs(camera_times - sound_time))
# print(str(int(stim_frame)) + ' -- ' + str(proper_frame))
vid.set(cv2.CAP_PROP_POS_FRAMES, stim_frame)
# loop across frames
for rel_frame_num in range(vid_window[0]*fps, vid_window[1]*fps):
ret, frame = vid.read()
if ret:
''' get centroid of mouse '''
if get_trajectory and rel_frame_num >= 0 and rel_frame_num < 40*9:
frame_norm = (frame[:,:,0] / background)
# use the thresholds, erosion, and dilation set above to extract a mask coinciding with the mouse
frame_norm_mask = (frame_norm < mask_thresh).astype(np.uint8)
frame_norm_mask = cv2.erode(frame_norm_mask, kernel_er, iterations=1)
frame_norm_mask = cv2.dilate(frame_norm_mask, kernel_dil, iterations=3)
# extract the largest contour in this mask -- this should correspond to the mouse
_, _, x_center, y_center, _ = get_biggest_contour(frame_norm_mask)
trajectories[:, rel_frame_num, i] = np.array([x_center, y_center])
else: x_center, y_center = 0, 0
if save_vid:
# add text of seconds rel to stim
cv2.putText(frame, str(rel_frame_num / fps), (20, 50), 0, 1, 255, thickness=2)
# say when pump goes
if rel_frame_num in relevant_pump_times or pump_on:
cv2.putText(frame, 'GET PUMPED!!!', (20, 100), 0, 1, 255, thickness=2)
if pump_on: pump_on -= 1
else: pump_on = 80
# say when TONGUE goes
if rel_frame_num in relevant_lick_times or lick_on:
cv2.putText(frame, '~~LICK DAT~~', (20, 200), 0, 1, 255, thickness=2)
if lick_on: lick_on -= 1
else: lick_on = 5
if rel_frame_num > 0:
cv2.putText(frame, '----------SOUND ON-----------------------', (20, 400), 0, 1, 255, thickness=2)
if x_center:
cv2.circle(frame, (x_center, y_center), 10, 255, -1)
# show frame
cv2.imshow(session + ' - trial ' + str(i), frame)
save_video.write(frame[:,:,0])
# time frames / stop
if cv2.waitKey(10) & 0xFF == ord('q'):
end_vids = True
break
if save_vid: save_video.release()
foraging_dict['trajectories'][session] = trajectories
save_file = os.path.join(base_folder, 'foraging_data_IV')
with open(save_file, "wb") as dill_file: pickle.dump(foraging_dict, dill_file)
''' PLOT TRAJCTORIES '''
show_trajectories = False
traj_success_array = np.zeros((len(mice), len(days)-1))
pseudo_traj_success_array = np.zeros((len(mice), len(days)-1))
seconds_to_examine = 6.75
lick_location = 950 #950
start_location = 500 #550
if plot_trajectory:
for m, mouse in enumerate(mice):
for d, day in enumerate(days):
session = day + '_' + mouse
if session in foraging_dict['trajectories']: print(session)
else: continue
# get trajectories
trajectories = foraging_dict['trajectories'][session][:,:,:50]
num_trials = trajectories.shape[2]
if show_trajectories:
# create figure
fig, ax = plt.subplots(figsize = (9,6))
ax.set_title(session + ' trajectories')
ax.set_ylim([num_trials, -1])
shift = 0
# x/y, 9 seconds, number of trials
for trial in range(num_trials):
# get the start x pos
x_start = np.min((1000, trajectories[0,0,trial]))
if x_start > 900: shift+=1; continue
# get the rightmost x pos in 9 sec
x_furthest = np.min((1000, np.max(trajectories[0,:int(4.5*40),trial])))
# plot a line between the two
ax.plot([x_start, x_furthest], [trial - shift, trial - shift], color = [0,0,0])
# get stats
# num of trials starting on the left
eligible_trial_idx = trajectories[0,0,:] < start_location
num_eligible_trials = np.sum(eligible_trial_idx)
# get rightmost point
rightmost_point = np.max(trajectories[0,:int(seconds_to_examine*40),:][:, eligible_trial_idx], axis = 0)
# num of trials going to right
num_go_right_trials = np.sum(rightmost_point > lick_location)
# print/save results
print(session + ': ' + str(num_go_right_trials) + ' / ' + str(num_eligible_trials))
if d > 1 and mouse == 'P.2': d-=1
elif d > 2: d-= 1
traj_success_array[m, d] = num_go_right_trials / num_eligible_trials
# get stats
# points where start < 500
num_pseudo_trials = 10000
pseudo_trajectories = np.zeros((9*40, num_pseudo_trials))
video_trajectory = foraging_dict['video trajectories'][session]
eligible_frame_idx = np.where(video_trajectory[:-9*40] < start_location)[0]
# choose start frames for trials
pseudo_trial_idx = np.random.choice(eligible_frame_idx, num_pseudo_trials)
for p, pt in enumerate(pseudo_trial_idx):
pseudo_trajectories[:, p] = video_trajectory[pt:pt+9*40]
# get rightmost point
pseudo_rightmost_point = np.max(pseudo_trajectories[:int(seconds_to_examine*40),:], axis = 0)
# num of trials going to right
psuedo_num_go_right_trials = np.sum(pseudo_rightmost_point > lick_location)
# print/save results
print(session + ': ' + str(psuedo_num_go_right_trials) + ' / ' + str(num_pseudo_trials))
pseudo_traj_success_array[m, d] = psuedo_num_go_right_trials / num_pseudo_trials
# plot stats
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title('% successful trajectories')
for d in range(traj_success_array.shape[1]):
# plot mean
ax.scatter(d + 1, np.mean(traj_success_array[traj_success_array[:, d]>0, d]), s = 30, color = [0,1,1,.5])
# plot all points
ax.scatter(np.ones(len(mice))*d+1, traj_success_array[:, d], s = 10, color = [0,0,0])
# plot stats
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title('Relative likelihood of running to lick port')
ax.set_ylim([.3, 2.6])
ax.set_xlim([.8, 5.2])
ax.plot([0, 6], [1, 1], color=[0, 0, 0, .3], linestyle='--')
for m in range(traj_success_array.shape[0]):
# plot progression
ax.plot(np.arange(1, 6), traj_success_array[m, :] / pseudo_traj_success_array[m, :], color=[0, .6, .6, .7], linewidth=3)
# plot mean across all mice
mean_rel_success = np.nanmean(traj_success_array / pseudo_traj_success_array, axis = 0)
ax.plot(np.arange(1, 6), mean_rel_success, color=[0, 0, 1], linewidth = 5)
plt.show()
day1 = [[t] for t in traj_success_array[:, 0] / pseudo_traj_success_array[:, 0]]
day5 = [[t] for t in traj_success_array[:, -1] / pseudo_traj_success_array[:, -1]]
from important_code.shuffle_test import permutation_test_paired, permutation_test
permutation_test(day5, day1, iterations = 10000, two_tailed = False)
''' MAKE LICK PROB ARRAYS '''
# initialize data arrays
if plot_data:
lick_counts_all = {}
lick_prob_all = {}
foraging_dict['lick_times_II'] = {}
for mouse in mice:
lick_counts_all[mouse] = np.zeros((5, len(bins) - 1))
lick_prob_all[mouse] = np.zeros((5, len(bins) - 1))
# loop over mice
for mouse in mice:
day_num = 0
# loop over days
for day in days:
# get session name
session = day + '_' + mouse
print(session)
# skip sessions that didn't happen
if not session in foraging_dict['session_duration']: continue
# extract data
session_duration = foraging_dict['session_duration'][session]
pump_times = foraging_dict['pump_times'][session]
lick_times = foraging_dict['lick_times'][session]
lick_times_all = foraging_dict['lick_times_II'][session]
lick_durations = foraging_dict['lick_duration'][session]
sound_times = foraging_dict['sound_times'][session]
sound_durations = foraging_dict['sound_duration'][session]
num_trials = len(sound_times)
# print(np.median(np.diff(sound_times[:trials_to_examine] / 60)))
if plot_data:
lick_counts = np.zeros(len(bins)-1)
lick_prob = np.zeros(len(bins)-1)
for i, sound_time in enumerate(sound_times[:trials_to_examine]):
# find relevant LICKS
relevant_lick_idx = ((lick_times - sound_time) > -65) * ((lick_times - sound_time) < 65)
relevant_lick_times = lick_times[relevant_lick_idx]
relevant_lick_durations = lick_durations[relevant_lick_idx]
added_licks = np.array([])
for j, lick in enumerate(relevant_lick_times):
duration = relevant_lick_durations[j]
if duration > .2:
lick_times_all = np.concatenate((lick_times_all, lick + np.arange(.2, duration, .2)))
relevant_lick_idx = ((lick_times_all - sound_time) > -65) * ((lick_times_all - sound_time) < 65)
relevant_lick_times = lick_times_all[relevant_lick_idx]
# copy licks corresponding to how long the lick bout is
# relevant_lick_times_II = np.array([])
# for j, time in enumerate(relevant_lick_times):
# num_licks = int(min(2.25, relevant_lick_durations[j]) * 7)
# relevant_licks_copied = np.ones(num_licks) * time
# relevant_lick_times_II = np.concatenate((relevant_lick_times_II, relevant_licks_copied))
# get hist of relevant LICKS
counts, bins = np.histogram(relevant_lick_times-sound_time, bins = bins)
# put into data arrays
lick_counts = lick_counts + counts
lick_prob = lick_prob + (counts>0)/num_trials
# add to global data arrays
lick_counts_all[mouse][day_num, :] = lick_counts
lick_prob_all[mouse][day_num, :] = lick_prob
day_num += 1
foraging_dict['lick_times_II'][session] = lick_times_all
if plot_sessions:
# plot raster of sound, pump, lick
fig1, ax1 = plt.subplots(figsize=(12, 8))
ax1.set_ylim([len(sound_times)+1, -1])
ax1.set_xlim([-20, 20])
ax1.set_xlabel('Time relative to sound onset (sec)')
ax1.set_ylabel('Trial number')
ax1.set_title(session + ' training results IV')
for i, sound_time in enumerate(sound_times):
# find relevant PUMP TIMES
relevant_pump_idx = ((pump_times - sound_time) > 0) * ((pump_times - sound_time) < 10)
relevant_pump_times = pump_times[relevant_pump_idx]
# plot TONE (different color depending on trial result)
if not np.sum(relevant_pump_idx): color = [0, 0, 0, .1]
elif np.sum(relevant_pump_idx): color = [0, 1, 0, .2]
# find relevant LICK TIMES
relevant_lick_idx = ((lick_times_all - sound_time) > 0) * ((lick_times_all - sound_time) < 9)
relevant_lick_times = lick_times_all[relevant_lick_idx]
# plot TONE (different color depending on trial result)
if not np.sum(relevant_lick_idx): color = [0, 0, 0, .1]
elif np.sum(relevant_lick_idx): color = [0, 1, 0, .2]
tone_on = plt.Rectangle((0, i),9, .8, color=color, linewidth = 0, fill=True) # sound_durations[i] -> 9
ax1.add_artist(tone_on)
# find relevant LICKS
relevant_lick_idx = ((lick_times_all - sound_time) > -60) * ((lick_times_all - sound_time) < 30)
relevant_lick_times = lick_times_all[relevant_lick_idx]
# plot the LICKS
ax1.eventplot(relevant_lick_times - sound_time, color=[0, 0, 0], lineoffsets=i + .4, linelengths=.8)
# plot the PUMP ACTION
ax1.eventplot(relevant_pump_times - sound_time, color=[1,0,0], lineoffsets=i + .4, linelengths=.8)
# save figure
fig1.savefig(os.path.join(summary_plots_folder, session + ' training IV.png'), format='png')
fig1.savefig(os.path.join(summary_plots_folder, session + ' training IV.eps'), format='eps')
'''
PLOT OVERALL TRAINING DATA
'''
if plot_data:
# plot relative lick rates
fig3, ax3 = plt.subplots(figsize=(12, 6))
ax3.set_ylim([0, 5.1])
ax3.set_xlim([.8, 5.2])
ax3.plot([0,6],[1,1],color=[0,0,0,.3], linestyle = '--')
ax3.set_xlabel('Session no.')
ax3.set_xticks(np.arange(1, 6))
ax3.set_ylabel('Lick probability during stimulus relative to baseline period')
ax3.set_title('Lick prob across days')
bins_pre = np.arange(0, 7)
bins_pre = np.arange(11,14)
bins_tone = 14
bins_pre = np.arange(20,29)
bins_tone = [29,30]
# bins_pre = np.concatenate((np.arange(20,29), np.arange(33, 42)))
relative_licks_all = np.zeros((len(mice), 5))
# loop across mice
for m, mouse in enumerate(mice):
# get licks data
if len(bins_tone) == 1: licks_tone = lick_prob_all[mouse][:, bins_tone[0]]
else: licks_tone = np.mean(lick_prob_all[mouse][:, bins_tone], 1)
licks_pre =
|
np.mean(lick_prob_all[mouse][:, bins_pre], 1)
|
numpy.mean
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-member
""" Functions
__author__: <NAME>, <NAME>, <NAME>
"""
import itertools
import numpy as np
import torch
from scipy import special
from inferactively.core import utils
def spm_dot(X, x, dims_to_omit=None, obs_mode=False):
""" Dot product of a multidimensional array with `x`
The dimensions in `dims_to_omit` will not be summed across during the dot product
@TODO: we should look for an alternative to obs_mode
Parameters
----------
- `x` [1D numpy.ndarray] - either vector or array of arrays
The alternative array to perform the dot product with
- `dims_to_omit` [list :: int] (optional)
Which dimensions to omit
Returns
-------
- `Y` [1D numpy.ndarray] - the result of the dot product
"""
# construct dims to perform dot product on
if utils.is_arr_of_arr(x):
dims = (np.arange(0, len(x)) + X.ndim - len(x)).astype(int)
else:
if obs_mode is True:
"""
@NOTE Case when you're getting the likelihood of an observation under the generative model.
Equivalent to something like self.values[np.where(x),:]
when `x` is a discrete 'one-hot' observation vector
"""
dims = np.array([0], dtype=int)
else:
"""
@NOTE Case when `x` leading dimension matches the lagging dimension of `values`
E.g. a more 'classical' dot product of a likelihood with hidden states
"""
dims = np.array([1], dtype=int)
x = utils.to_arr_of_arr(x)
# delete ignored dims
if dims_to_omit is not None:
if not isinstance(dims_to_omit, list):
raise ValueError("`dims_to_omit` must be a `list` of `int`")
dims = np.delete(dims, dims_to_omit)
if len(x) == 1:
x = np.empty([0], dtype=object)
else:
x = np.delete(x, dims_to_omit)
# compute dot product
for d in range(len(x)):
s = np.ones(np.ndim(X), dtype=int)
s[dims[d]] = np.shape(x[d])[0]
X = X * x[d].reshape(tuple(s))
X = np.sum(X, axis=dims[d], keepdims=True)
Y = np.squeeze(X)
# check to see if `Y` is a scalar
if np.prod(Y.shape) <= 1.0:
Y = Y.item()
Y = np.array([Y]).astype("float64")
return Y
def spm_cross(X, x=None, *args):
""" Multi-dimensional outer product
Parameters
----------
- `x` [np.ndarray] || [Categorical] (optional)
The values to perfrom the outer-product with. If empty, then the
outer-product is taken between X and itself. If x is not empty, then outer product is
taken between X and the various dimensions of x.
- `args` [np.ndarray] || [Categorical] (optional)
Remaining arrays to perform outer-product with. These extra arrays are recursively multiplied
with the 'initial' outer product (that between X and x).
Returns
-------
- `y` [np.ndarray] || [Categorical]
The result of the outer-product
"""
if len(args) == 0 and x is None:
if utils.is_arr_of_arr(X):
Y = spm_cross(*list(X))
elif np.issubdtype(X.dtype, np.number):
Y = X
return Y
if utils.is_arr_of_arr(X):
X = spm_cross(*list(X))
if x is not None and utils.is_arr_of_arr(x):
x = spm_cross(*list(x))
reshape_dims = tuple(list(X.shape) + list(np.ones(x.ndim, dtype=int)))
A = X.reshape(reshape_dims)
reshape_dims = tuple(list(np.ones(X.ndim, dtype=int)) + list(x.shape))
B = x.reshape(reshape_dims)
Y = np.squeeze(A * B)
for x in args:
Y = spm_cross(Y, x)
return Y
def spm_norm(A):
"""
Returns normalization of Categorical distribution,
stored in the columns of A.
"""
A = A + 1e-16
normed_A = np.divide(A,A.sum(axis=0))
return normed_A
def spm_wnorm(A):
"""
Returns Expectation of logarithm of Dirichlet parameters over a set of Categorical distributions,
stored in the columns of A.
"""
A = A + 1e-16
norm = np.divide(1.0, np.sum(A, axis=0))
avg = np.divide(1.0, A)
wA = norm - avg
return wA
def spm_betaln(z):
""" Log of the multivariate beta function of a vector.
@NOTE this function computes across columns if `z` is a matrix
"""
return np.sum(special.gammaln(z), axis=0) - special.gammaln(np.sum(z, axis=0))
def calc_free_energy(qs, prior, n_factors, likelihood=None):
""" Calculate variational free energy
@TODO Primarily used in FPI algorithm, needs to be made general
"""
free_energy = 0
for factor in range(n_factors):
negH_qs = qs[factor].dot(np.log(qs[factor][:, np.newaxis] + 1e-16)) # (neg)entropy of posterior marginal H(q[f])
xH_qp = -qs[factor].dot(prior[factor][:, np.newaxis]) # cross entropy of posterior marginal with prior marginal H(q[f],p[f])
free_energy += negH_qs + xH_qp
if likelihood is not None:
accuracy = spm_dot(likelihood, qs)[0]
free_energy -= accuracy
return free_energy
def softmax(dist, return_numpy=True):
""" Computes the softmax function on a set of values
"""
if utils.is_distribution(dist):
if dist.IS_AOA:
output = []
for i in range(len(dist.values)):
output[i] = softmax(dist.values[i], return_numpy=True)
output = utils.to_categorical(np.array(output))
else:
dist = np.copy(dist.values)
output = dist - dist.max(axis=0)
output = np.exp(output)
output = output / np.sum(output, axis=0)
if return_numpy:
return output
else:
return utils.to_categorical(output)
def kl_divergence(q, p):
""" Calculate KL divdivergence between two distributions
@TODO: make this work for multi-dimensional arrays
"""
q.remove_zeros()
p.remove_zeros()
q = np.copy(q.values)
p = np.copy(p.values)
kl = np.sum(q * np.log(q / p), axis=0)[0]
return kl
def spm_MDP_G(A, x):
"""
Calculates the Bayesian surprise in the same way as spm_MDP_G.m does in
the original matlab code.
Parameters
----------
A (numpy ndarray or array-object):
array assigning likelihoods of observations/outcomes under the various hidden state configurations
x (numpy ndarray or array-object):
Categorical distribution presenting probabilities of hidden states (this can also be interpreted as the
predictive density over hidden states/causes if you're calculating the
expected Bayesian surprise)
Returns
-------
G (float):
the (expected or not) Bayesian surprise under the density specified by x --
namely, this scores how much an expected observation would update beliefs about hidden states
x, were it to be observed.
"""
if A.dtype == "object":
Ng = len(A)
AOA_flag = True
else:
Ng = 1
AOA_flag = False
# probability distribution over the hidden causes: i.e., Q(x)
qx = spm_cross(x)
G = 0
qo = 0
idx = np.array(np.where(qx > np.exp(-16))).T
if AOA_flag:
# accumulate expectation of entropy: i.e., E[lnP(o|x)]
for i in idx:
# probability over outcomes for this combination of causes
po = np.ones(1)
for g in range(Ng):
index_vector = [slice(0, A[g].shape[0])] + list(i)
po = spm_cross(po, A[g][tuple(index_vector)])
po = po.ravel()
qo += qx[tuple(i)] * po
G += qx[tuple(i)] * po.dot(np.log(po +
|
np.exp(-16)
|
numpy.exp
|
# coding: utf-8
# # Multiple variable linear regression
# ## Make necessary imports
# In[1]:
import numpy as np
np.set_printoptions(precision=3, linewidth=200, suppress=True)
import library.data as data
import library.linear_regression as lr
import math
from copy import deepcopy
print_output = True
# ## Make Prediction on test data
# In[2]:
def make_predictions(clf, test_id, test_features, output_file_name, verbose=False):
print('Predict the output for test set')
test_labels = clf.predict(test_features)
submit_test = np.column_stack((test_id, test_labels))
# fmt = ','.join(['%d'] + ['%3.3f'])
if verbose is True:
print('Write predictions to ' + output_file_name)
print(submit_test[:5,:])
fmt = ','.join(['%d'] + ['%3.3f'] * (submit_test.shape[1]-1))
return submit_test, fmt
# ## Validate your classifier against validation data
# In[3]:
def validate_classifier(clf, validate_features, validate_labels, verbose=False):
print('Predict the output for validate set')
predicted_labels = clf.predict(validate_features)
if verbose is True:
print('Coefficients: ' + str(clf.theta.flatten()))
print('Calculating score of the regressor')
clf.error_type = 'r2'; r2_value = clf.score(validate_labels, predicted_labels)
clf.error_type = 'exp_var'; exp_var_value = clf.score(validate_labels, predicted_labels)
clf.error_type = 'mean_abs'; med_abs_value = clf.score(validate_labels, predicted_labels )
clf.error_type = 'rmse'; rmse_value = clf.score(validate_labels, predicted_labels)
clf.error_type = 'med_abs'; med_abs_value = clf.score(validate_labels, predicted_labels)
if verbose is True:
print('R^2 score: %.3f' % (r2_value))
print('Explained variance score: %.3f' % (exp_var_value))
print('Mean absolute error: %.3f' % (med_abs_value))
print('Root mean squared error: %.3f' % (rmse_value))
print('Median absolute error: %.3f' % (med_abs_value))
print()
return rmse_value
# ## Import data from csv file
# In[4]:
train_file_name = './data/train.csv'
print('Importing data from \'%s\'' %train_file_name)
print('Reading train dataset from \'%s\' ' % train_file_name )
features, labels, attribute_names = data.read_data_from_csv(csv_file=train_file_name, label_name='MEDV')
attribute_list = list(attribute_names)
ids = features[:, 0]
features = features[:, 1:]
features_list = list(range(0,13))
if print_output is True:
print('Size of features: ' + str(features.shape))
print('Size of labels: ' + str(labels.shape))
print('Features')
print(attribute_list)
print(features[:5,:])
print()
# ## Remove outliers
# In[5]:
index_outliers = [369, 373, 372, 413]
print('Removing the following outliers: ', end='')
print(index_outliers)
features = np.delete(features, index_outliers, axis=0)
labels = np.delete(labels, index_outliers, axis=0)
ids = np.delete(ids, index_outliers, axis=0)
# ## Read test data
# In[6]:
test_file_name = './data/test.csv'
print('Reading test dataset from \'%s\' ' % test_file_name )
test_features, test_labels, attribute_names = data.read_data_from_csv(csv_file=test_file_name, label_name='')
if print_output is True:
print('Size of test_features: ' + str(test_features.shape))
print('Test features')
print(test_features[:5,:])
test_id = test_features[:,0]
test_features = test_features[:,1:]
test_features = test_features[:, features_list]
if print_output is True:
print('Test IDs')
print(test_id[1:5])
print('Test features')
print(test_features[1:5,:])
print()
# ## Run the linear regressor with cross validation data using L<sub>2</sub> norm gradient descent
# In[8]:
print('Performing linear regression with cross validation data using L2 norm gradient descent')
num_folds = 5
tf_s, tl_s, vf_s, vl_s = data.cross_validate_split(ids, features, labels, num_folds=num_folds)
num_splits = tf_s.shape[0]
clf = lr.LinearRegression(bias=True, iterations=100000, alpha=0.9, reg_const=0.009,
method='batch', norm_data= False, regularize=True,
norm=2, verbose=False, w_init='uniform', tolerance=0.0000000001)
clf.print_parameters()
# Normalizing the data
norm_test_features = data.normalize_data(test_features)
# Squaring the features
sq_test_features = deepcopy(norm_test_features)
sq_test_features = np.power(sq_test_features, 2)
# Appending the normalized original data to the square normalized data
new_test_features = np.column_stack((norm_test_features, sq_test_features))
split = 0
min_rmse = math.inf
best_split = 0
for i in range(num_splits):
print('Split no.: ' + str(i+1))
print('Estimate parameters for the train data')
tf = tf_s[i, :, :]
tl = tl_s[i, :].reshape(tl_s[i, :].shape[0], 1)
vf = vf_s[i, :, :]
vl = vl_s[i, :].reshape(vl_s[i, :].shape[0], 1)
norm_tf = data.normalize_data(tf)
norm_vf = data.normalize_data(vf)
sq_tf = np.power(norm_tf, 2)
sq_vf = np.power(norm_vf, 2)
new_tf = np.column_stack((norm_tf, sq_tf))
new_vf = np.column_stack((norm_vf, sq_vf))
clf.fit(new_tf, tl)
rmse_value = validate_classifier(clf, new_vf, vl)
if rmse_value < min_rmse:
best_split = i
output_file_name = './output.csv'
submit_test, fmt = make_predictions(clf, test_id, new_test_features, output_file_name)
data.write_data_to_csv(submit_test, fmt, output_file_name, 'ID,MEDV\n')
min_rmse = rmse_value
print()
clf.theta_init = 'uniform'
print('Best split occurs at %d split, Min. RMSE: %.4f' %(best_split,min_rmse))
# ## Output for L<sub>p</sub> norm for various p
# In[9]:
print('Printing linear regression for Lp norms')
num_folds = 5
tf_s, tl_s, vf_s, vl_s = data.cross_validate_split(ids, features, labels, num_folds=num_folds)
num_splits = tf_s.shape[0]
clf = lr.LinearRegression(bias=True, iterations=52000, alpha=0.9, reg_const=0.009,
method='batch', norm_data= False, regularize=True,
norm=2, verbose=False, w_init='uniform')
# Normalizing the data
norm_test_features = data.normalize_data(test_features)
# Squaring the features
sq_test_features = np.power(norm_test_features, 2)
# Appending the normalized original data to the square normalized data
new_test_features = np.column_stack((norm_test_features, sq_test_features))
clf_norm = lr.LinearRegression(bias=True, iterations=52000, alpha=0.9, reg_const=0.009,
method='batch', norm_data= False, regularize=True,
norm=2, verbose=False, w_init='uniform')
p_values = [1.2, 1.5, 1.8]
iter_p = 1
for p in p_values:
print('Finding best weights for L_' + str(p))
min_rmse = math.inf
for i in range(num_splits):
print('Split no.: ' + str(i+1))
print('Estimate parameters for the train data')
tf = tf_s[i, :, :]
tl = tl_s[i, :].reshape(tl_s[i, :].shape[0], 1)
vf = vf_s[i, :, :]
vl = vl_s[i, :].reshape(vl_s[i, :].shape[0], 1)
norm_tf = data.normalize_data(tf)
norm_vf = data.normalize_data(vf)
sq_tf = np.power(norm_tf, 2)
sq_vf = np.power(norm_vf, 2)
new_tf = np.column_stack((norm_tf, sq_tf))
new_vf = np.column_stack((norm_vf, sq_vf))
clf.theta_init = 'uniform'
clf_norm.norm = p
clf_norm.print_parameters()
print('Estimate parameters for the train data')
clf_norm.fit(new_tf, tl)
print('Coefficients: ' + str(clf_norm.theta.flatten()))
rmse_value = validate_classifier(clf_norm, new_vf, vl)
if rmse_value < min_rmse:
output_file_name = './output_p' + str(iter_p) + '.csv'
submit_test, fmt = make_predictions(clf_norm, test_id, new_test_features, output_file_name)
data.write_data_to_csv(submit_test, fmt, output_file_name, 'ID,MEDV\n')
min_rmse = rmse_value
print()
print('RMSE for L_%.1f is %.4f' %(p,min_rmse))
iter_p += 1
# ## Gradient descent for L<sub>2</sub> vs matrix inversion
# In[10]:
print('Comparing between L2 linear regression and matrix inversion')
print('L2 Linear Regression')
num_folds_array = [5]
min_rmse = math.inf
for num_folds in num_folds_array:
tf_s, tl_s, vf_s, vl_s = data.cross_validate_split(ids, features, labels, num_folds=num_folds)
num_splits = tf_s.shape[0]
clf = lr.LinearRegression(bias=True, iterations=52000, alpha=0.9, reg_const=0.009,
method='batch', norm_data= False, regularize=True,
norm=2, verbose=False, w_init='uniform')
for i in range(num_splits):
print('Split no.: ' + str(i+1))
print('Estimate parameters for the train data')
tf = tf_s[i, :, :]
tl = tl_s[i, :].reshape(tl_s[i, :].shape[0], 1)
vf = vf_s[i, :, :]
vl = vl_s[i, :].reshape(vl_s[i, :].shape[0], 1)
norm_tf = data.normalize_data(tf)
norm_vf = data.normalize_data(vf)
sq_tf = np.power(norm_tf, 2)
sq_vf = np.power(norm_vf, 2)
new_tf = np.column_stack((norm_tf, sq_tf))
new_vf = np.column_stack((norm_vf, sq_vf))
clf.fit(new_tf, tl)
rmse_value = validate_classifier(clf_norm, new_vf, vl)
if rmse_value < min_rmse:
output_file_name = ''
submit_test, fmt = make_predictions(clf_norm, test_id, new_test_features, output_file_name)
min_rmse = rmse_value
print('Min RMSE: %.4f' %min_rmse)
print()
# In[11]:
print('Matrix inversion')
num_folds_array = [5]
min_rmse = math.inf
for num_folds in num_folds_array:
tf_s, tl_s, vf_s, vl_s = data.cross_validate_split(ids, features, labels, num_folds=num_folds)
num_splits = tf_s.shape[0]
for i in range(num_splits):
clf_norm = lr.LinearRegression(bias=True, iterations=52000, reg_const=0.009,
method='matinv', norm_data= True, regularize=True, verbose=False)
print('Estimate parameters for the train data')
tf = tf_s[i, :, :]
tl = tl_s[i, :].reshape(tl_s[i, :].shape[0], 1)
vf = vf_s[i, :, :]
vl = vl_s[i, :].reshape(vl_s[i, :].shape[0], 1)
norm_tf = data.normalize_data(tf)
norm_vf = data.normalize_data(vf)
sq_tf =
|
np.power(norm_tf, 2)
|
numpy.power
|
"""Integration tests for :mod:`esmvaltool.diag_scripts.mlr.custom_sklearn`."""
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.compose import TransformedTargetRegressor
from sklearn.decomposition import PCA
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from esmvaltool.diag_scripts.mlr.custom_sklearn import (
AdvancedPipeline,
AdvancedTransformedTargetRegressor,
_get_fit_parameters,
)
X_TRAIN = np.array([[3.0], [6.0], [10.0]])
Y_TRAIN = np.array([10.0, 20.0, 30.0])
np.set_printoptions(precision=10)
STEPS_1 = [('a', 1)]
STEPS_2 = [('a', 1), ('b', 0)]
TEST_GET_FIT_PARAMETERS = [
({'a': 1}, STEPS_1, ValueError),
({'a': 1, 'a__b': 1}, STEPS_1, ValueError),
({'a__x': 1}, [], ValueError),
({'a__x': 1}, STEPS_1, {'a': {'x': 1}}),
({'a__x': 1, 'a__y': 2}, STEPS_1, {'a': {'x': 1, 'y': 2}}),
({'a__x': 1, 'a__y__z': 2}, STEPS_1, {'a': {'x': 1, 'y__z': 2}}),
({'a__x': 1, 'b__y': 2}, STEPS_1, ValueError),
({'a__x': 1, 'b__y': 2}, STEPS_2, {'a': {'x': 1}, 'b': {'y': 2}}),
]
@pytest.mark.parametrize('kwargs,steps,output', TEST_GET_FIT_PARAMETERS)
def test_get_fit_parameters(kwargs, steps, output):
"""Test retrieving of fit parameters."""
if isinstance(output, type):
with pytest.raises(output):
_get_fit_parameters(kwargs, steps, 'x')
return
params = _get_fit_parameters(kwargs, steps, 'x')
assert params == output
class StdLinearRegression(LinearRegression):
"""Expand :class:`sklearn.linear_model.CoolLinearRegression`."""
def predict(self, x, return_std=False):
"""Expand :meth:`predict`."""
pred = super().predict(x)
if return_std:
err = np.ones(x.shape[0], dtype=x.dtype)
return (pred, err)
return pred
class VarLinearRegression(LinearRegression):
"""Expand :class:`sklearn.linear_model.CoolLinearRegression`."""
def predict(self, x, return_var=False, return_cov=False, err_2d=False):
"""Expand :meth:`predict`."""
pred = super().predict(x)
if return_var:
err = np.ones(x.shape[0], dtype=x.dtype)
if err_2d:
err = err.reshape(-1, 1)
return (pred, err)
if return_cov:
err = np.ones((x.shape[0], x.shape[0]), dtype=x.dtype)
return (pred, err)
return pred
class NonStandardScaler(StandardScaler):
"""Expand :class:`sklearn.preprocessing.StandardScaler`."""
def fit(self, x, y=None, f=0.0):
"""Expand :meth:`fit`."""
return_value = super().fit(x, y)
if self.mean_ is not None:
self.mean_ += f
return return_value
class TestAdvancedPipeline():
"""Tests for :class:`esmvaltool.diag_scripts.mlr.AdvancedPipeline`."""
AREG = AdvancedTransformedTargetRegressor(
transformer=NonStandardScaler(),
regressor=LinearRegression(),
)
REG = TransformedTargetRegressor(
transformer=NonStandardScaler(),
regressor=LinearRegression(),
)
STEPS = [
[('t', NonStandardScaler())],
[('t', NonStandardScaler()), ('r', LinearRegression())],
[('t', NonStandardScaler()), ('r', REG)],
[('t', NonStandardScaler()), ('r', AREG)],
[('t', NonStandardScaler()), ('r', AREG)],
[('t', NonStandardScaler()), ('r', AREG)],
[('t', NonStandardScaler()), ('r', AREG)],
]
PIPELINES = [AdvancedPipeline(step) for step in STEPS]
KW_X0 = {'a': 1, 't__f': 2.0}
KW_X1 = {'b__a': 1, 't__f': 2.0}
KW_X2 = {'t__wrongparam': 1, 't__f': 2.0}
KW_X3 = {'r__wrongparam': 1, 't__f': 2.0}
KW_X4 = {'r__wrongstep__f': 1, 't__f': 2.0}
KW_X5 = {'r__regressor__wrongparam': 1, 't__f': 2.0}
KW_0 = {'t__f': 2.0}
KW_1 = {'t__f': 2.0, 'r__sample_weight': np.arange(3.0)}
KW_2 = {'t__f': 2.0, 'r__transformer__f': 3.0}
TEST_CHECK_FINAL_STEP = zip(
PIPELINES,
[TypeError, TypeError, TypeError, True, True, True, True, True],
)
@pytest.mark.parametrize('pipeline,output', TEST_CHECK_FINAL_STEP)
def test_check_final_step(self, pipeline, output):
"""Test checking if final step."""
pipeline = clone(pipeline)
if isinstance(output, type):
with pytest.raises(output):
pipeline._check_final_step()
return
assert pipeline._check_final_step() is None
TEST_FIT_TARGET_TRANSFORMER_ONLY = zip(
PIPELINES,
[{}, {}, {}, KW_X3, KW_X4, KW_0, KW_2],
[TypeError,
TypeError,
TypeError,
ValueError,
ValueError,
(np.array([20.0]), np.array([200.0 / 3.0])),
NotImplementedError],
)
@pytest.mark.parametrize('pipeline,kwargs,output',
TEST_FIT_TARGET_TRANSFORMER_ONLY)
def test_fit_target_transformer_only(self, pipeline, kwargs, output):
"""Test fitting of target transformer only."""
pipeline = clone(pipeline)
if isinstance(output, type):
with pytest.raises(output):
pipeline.fit_target_transformer_only(Y_TRAIN, **kwargs)
return
pipeline.fit_target_transformer_only(Y_TRAIN, **kwargs)
transformer = pipeline.steps[-1][1].transformer_
np.testing.assert_allclose(transformer.mean_, output[0])
np.testing.assert_allclose(transformer.var_, output[1])
assert not hasattr(pipeline.steps[-1][1], 'regressor_')
with pytest.raises(NotFittedError):
pipeline.predict(X_TRAIN)
with pytest.raises(NotFittedError):
pipeline.steps[-1][1].predict(X_TRAIN)
TEST_FIT_TRANSFORMERS_ONLY = zip(
PIPELINES,
[KW_0, KW_0, KW_1, {}, KW_X0, KW_X1, KW_2],
[None,
(np.array([8.333333]), np.array([8.222222])),
(np.array([8.333333]), np.array([8.222222])),
(np.array([6.333333]), np.array([8.222222])),
ValueError,
ValueError,
(np.array([8.333333]), np.array([8.222222]))],
)
@pytest.mark.parametrize('pipeline,kwargs,output',
TEST_FIT_TRANSFORMERS_ONLY)
def test_fit_transformers_only(self, pipeline, kwargs, output):
"""Test fitting transformers only."""
pipeline = clone(pipeline)
if isinstance(output, type):
with pytest.raises(output):
pipeline.fit_transformers_only(X_TRAIN, Y_TRAIN, **kwargs)
return
pipeline.fit_transformers_only(X_TRAIN, Y_TRAIN, **kwargs)
transformer = pipeline.steps[0][1]
if output is None:
assert not hasattr(transformer, 'mean_')
assert not hasattr(transformer, 'var_')
return
np.testing.assert_allclose(transformer.mean_, output[0])
np.testing.assert_allclose(transformer.var_, output[1])
with pytest.raises(NotFittedError):
pipeline.predict(X_TRAIN)
with pytest.raises(NotFittedError):
pipeline.steps[-1][1].predict(X_TRAIN)
TEST_TRANSFORM_ONLY = [
(KW_X0, ValueError),
(KW_X1, KeyError),
({}, np.array([[-1.1624763874], [-0.1162476387], [1.2787240262]])),
(KW_0, np.array([[-3.1624763874], [-2.1162476387], [-0.7212759738]])),
]
@pytest.mark.parametrize('kwargs,output', TEST_TRANSFORM_ONLY)
def test_transform_only(self, kwargs, output):
"""Test transforming only."""
pipeline = AdvancedPipeline([
('s', StandardScaler()),
('t', NonStandardScaler()),
('r', LinearRegression()),
])
with pytest.raises(NotFittedError):
pipeline.transform_only(X_TRAIN)
if isinstance(output, type):
with pytest.raises(output):
pipeline.fit(X_TRAIN, Y_TRAIN, **kwargs)
return
pipeline.fit(X_TRAIN, Y_TRAIN, **kwargs)
x_trans = pipeline.transform_only(X_TRAIN)
np.testing.assert_allclose(x_trans, output)
TEST_TRANSFORM_TARGET_ONLY = zip(
PIPELINES,
[{}, {}, {}, {}, KW_X2, KW_0, KW_X5],
[TypeError,
TypeError,
TypeError,
np.array([-1.22474487, 0.0, 1.22474487]),
np.array([-1.22474487, 0.0, 1.22474487]),
np.array([-1.22474487, 0.0, 1.22474487]),
np.array([-1.22474487, 0.0, 1.22474487])],
)
@pytest.mark.parametrize('pipeline,kwargs,output',
TEST_TRANSFORM_TARGET_ONLY)
def test_transform_target_only(self, pipeline, kwargs, output):
"""Test transforming of target only."""
pipeline = clone(pipeline)
if isinstance(output, type):
with pytest.raises(output):
pipeline.fit_target_transformer_only(Y_TRAIN, **kwargs)
return
with pytest.raises(NotFittedError):
pipeline.transform_target_only(Y_TRAIN)
pipeline.fit_target_transformer_only(Y_TRAIN, **kwargs)
y_trans = pipeline.transform_target_only(Y_TRAIN)
np.testing.assert_allclose(y_trans, output)
assert not hasattr(pipeline.steps[-1][1], 'regressor_')
with pytest.raises(NotFittedError):
pipeline.predict(X_TRAIN)
with pytest.raises(NotFittedError):
pipeline.steps[-1][1].predict(X_TRAIN)
class TestAdvancedTransformedTargetRegressor():
"""Tests for class ``AdvancedTransformedTargetRegressor``."""
AREG = AdvancedTransformedTargetRegressor(
transformer=NonStandardScaler(),
regressor=LinearRegression(),
)
FIT_KWARGS = [
{'a': 1},
{'b__a': 1, 't__f': 2.0},
{'regressor__wrongparam': 1},
{'transformer__fails': 1, 'regressor__a': 1, 'regressor__b': 1},
{},
{'regressor__sample_weight':
|
np.arange(3.0)
|
numpy.arange
|
"""Base classes for NARX estimator."""
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
from itertools import combinations_with_replacement
from itertools import chain
from collections import Counter
def _get_max_lag(ylag, xlag):
"""Get the max lag defined by the user.
Parameters
----------
ylag : int
The maximum lag of output regressors.
xlag : int
The maximum lag of input regressors.
Returns
-------
max_lag = int
The max lag value defined by the user.
"""
ny = np.max(list(chain.from_iterable([[ylag]])))
nx = np.max(list(chain.from_iterable([[xlag]])))
return np.max([ny, np.max(nx)])
class GenerateRegressors:
"""Polynomial NARMAX model
Provides the main functions to generate the regressor dictionary
and regressor codes for polynomial basis.
"""
def regressor_space(self, non_degree, xlag, ylag, n_inputs):
"""Create the code representation of the regressors.
This function generates a codification from all possibles
regressors given the maximum lag of the input and output.
This is used to write the final terms of the model in a
readable form. [1001] -> y(k-1).
This code format was based on a dissertation from UFMG. See
reference below.
Parameters
----------
non_degree : int
The desired maximum nonlinearity degree.
ylag : int
The maximum lag of output regressors.
xlag : int
The maximum lag of input regressors.
Returns
-------
max_lag : int
This value can be used by another functions.
regressor_code : ndarray of int
Matrix codification of all possible regressors.
Examples
--------
The codification is defined as:
>>> 100n = y(k-n)
>>> 200n = u(k-n)
>>> [100n 100n] = y(k-n)y(k-n)
>>> [200n 200n] = u(k-n)u(k-n)
References
----------
[1] Master Thesis: Barbosa, <NAME>.
Técnicas de otimizaçao bi-objetivo para a determinaçao
da estrutura de modelos NARX (2010).
"""
if not isinstance(non_degree, int) or non_degree < 1:
raise ValueError(
"non_degree must be integer and > zero. Got %f" % non_degree
)
if not isinstance(ylag, (int, list)) or np.min(np.minimum(ylag, 1)) < 1:
raise ValueError("ylag must be integer or list and > zero. Got %f" % ylag)
if (
not isinstance(xlag, (int, list))
# or np.min(np.minimum(xlag, 1)) < 1):
or np.min(np.min(list(chain.from_iterable([[xlag]])))) < 1
):
raise ValueError("xlag must be integer or list and > zero. Got %f" % xlag)
if not isinstance(n_inputs, int) or n_inputs < 1:
raise ValueError("n_inputs must be integer and > zero. Got %f" % n_inputs)
if isinstance(ylag, list):
# create only the lags passed from list
y_vec = []
y_vec.extend([lag + 1000 for lag in ylag])
y_vec = np.array(y_vec)
else:
# create a range of lags if passed a int value
y_vec = np.arange(1001, 1001 + ylag)
if isinstance(xlag, list) and n_inputs == 1:
# create only the lags passed from list
x_vec_tmp = []
x_vec_tmp.extend([lag + 2000 for lag in xlag])
x_vec_tmp = np.array(x_vec_tmp)
elif isinstance(xlag, int) and n_inputs == 1:
# create a range of lags if passed a int value
x_vec_tmp = np.arange(2001, 2001 + xlag)
elif n_inputs > 1:
# only list are allowed if n_inputs > 1
# the user must entered list of the desired lags explicity
x_vec_tmp = []
for i in range(n_inputs):
if isinstance(xlag[i], list) and n_inputs > 1:
# create 200n, 300n,..., 400n to describe each input
x_vec_tmp.extend([lag + 2000 + i * 1000 for lag in xlag[i]])
elif isinstance(xlag[i], int) and n_inputs > 1:
x_vec_tmp.extend(
[np.arange(2001 + i * 1000, 2001 + i * 1000 + xlag[i])]
)
reg_aux = np.array([0])
if n_inputs > 1:
# if x_vec is a nested list, ensure all elements are arrays
all_arrays = [np.array([i]) if isinstance(i, int) else i for i in x_vec_tmp]
x_vec = np.concatenate([i for i in all_arrays])
else:
x_vec = x_vec_tmp
reg_aux = np.concatenate([reg_aux, y_vec, x_vec])
regressor_code = list(combinations_with_replacement(reg_aux, non_degree))
regressor_code = np.array(regressor_code)
regressor_code = regressor_code[:, regressor_code.shape[1] :: -1]
max_lag = _get_max_lag(ylag, xlag)
return regressor_code, max_lag
class HouseHolder:
"""Householder reflection and transformation."""
def _house(self, x):
"""Perform a Househoulder reflection of vector.
Parameters
----------
x : array-like of shape = number_of_training_samples
The respective column of the matrix of regressors in each
iteration of ERR function.
Returns
-------
v : array-like of shape = number_of_training_samples
The reflection of the array x.
References
----------
[1] Manuscript: <NAME>., <NAME>., & <NAME>. (1989).
Orthogonal least squares methods and their application to non-linear system identification.
"""
u = np.linalg.norm(x, 2)
if u != 0:
aux_b = x[0] + np.sign(x[0]) * u
x = x[1:] / aux_b
x = np.concatenate((np.array([1]), x))
return x
def _rowhouse(self, RA, v):
"""Perform a row Househoulder transformation.
Parameters
----------
RA : array-like of shape = number_of_training_samples
The respective column of the matrix of regressors in each
iteration of ERR function.
v : array-like of shape = number_of_training_samples
The reflected vector obtained by using the householder reflection.
Returns
-------
B : array-like of shape = number_of_training_samples
References
----------
[1] Manuscript: <NAME>., <NAME>., & <NAME>. (1989).
Orthogonal least squares methods and their application to
non-linear system identification. International Journal of
control, 50(5), 1873-1896.
"""
b = -2 / np.dot(v.T, v)
w = b * np.dot(RA.T, v)
w = w.reshape(1, -1)
v = v.reshape(-1, 1)
RA = RA + v * w
B = RA
return B
class InformationMatrix:
"""Class for methods regarding preprocessing of columns"""
def shift_column(self, col_to_shift, lag):
"""Shift values based on a lag.
Parameters
----------
col_to_shift : array-like of shape = n_samples
The samples of the input or output.
lag : int
The respective lag of the regressor.
Returns
-------
tmp_column : array-like of shape = n_samples
The shifted array of the input or output.
Examples
--------
>>> y = [1, 2, 3, 4, 5]
>>> shift_column(y, 1)
[0, 1, 2, 3, 4]
"""
n_samples = col_to_shift.shape[0]
tmp_column =
|
np.zeros((n_samples, 1))
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 11 15:24:23 2022
Modified on Tue May 3 -- add plotting function
-- add regression classifer
@author: <NAME> and <NAME>
"""
# %% Import packages
import numpy as np
import loadmat as lm
import math
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.utils import resample
from sklearn.linear_model import LogisticRegression
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras import optimizers
from tensorflow.keras import utils as np_utils
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Activation, Permute, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Input, Flatten
from tensorflow.keras.constraints import max_norm
import mne
from mne import io
from mne.datasets import sample
# %% Helper func
def preprocess(data_dict, ch_indices, fs, epoch_period=667):
'''
This function loads the data struct and extracts machine learning features
from raw dataset.
Parameters
----------
data_dict : dict
DESCRIPTION. The EEG data dictionary
ch_indices : int list/array
DESCRIPTION. The list/array of the integer of desired channel
fs : int
DESCRIPTION. Sampling frequency in Hz
epoch_period : int, optional
DESCRIPTION. The epoch duration in ms.
The default is 667.
Returns
-------
X_scaled : 2d array: sample x feature
DESCRIPTION. Scaled training data
y : 1d array
DESCRIPTION. Labels
'''
# extract info
flashing = data_dict['Flashing']
types= data_dict['StimulusType']
# get the event index
event_time = np.where(np.diff([flashing])[0][0] > 0)[0] + 1
# manually insert the first event at index 0
event_time = np.insert(event_time, 0, 0)
# extract labels for each sample/epoch
label = types[:, event_time].flatten()
# calculate the period length for each sample
epoch_len = round(epoch_period * fs / 1000)
# extract data for each electode
sample_all_electrodes = None
for ch in ch_indices:
data = data_dict['Signal'][:, :, ch]
# declare the 3d epoch array
epochs = np.zeros((data.shape[0], len(event_time), epoch_len))
# loop through the eeg data into epochs
for epoch_index, epoch_start in enumerate(event_time):
epoch_end = epoch_start + epoch_len
epochs[:, epoch_index] = data[:, epoch_start:epoch_end]
# reshape the epochs array to 2d
sample = epochs.reshape((epochs.shape[0]*epochs.shape[1]),
epochs.shape[2])
#--------------extract 14 samples---------------------
indices = np.arange(0, epochs.shape[2], int(epochs.shape[2]/14))
sample = sample[:, indices]
# combine electrode feature(s)
if sample_all_electrodes is None:
sample_all_electrodes = sample
else:
# sample_all_electrodes = sample_all_electrodes + sample
sample_all_electrodes = np.hstack((sample_all_electrodes, sample))
# ------------------ Filter -----------------------
# create a 8-order bandpass Chebyshev Type I filter which cut-off
# frequencies are 0.1 and 10 Hz
# filter_coefficients = signal.firwin(8, [0.5, 10], window='hann',
# pass_zero=False, fs=fs)
# # filter the eeg data
# filtered_sample = signal.filtfilt(filter_coefficients, 1, sample)
# # reform the dataset
# dataset = np.hstack((filtered_sample, np.reshape(label, (-1, 1))))
# testing...
#------------
#X = sample_all_electrodes/len(ch_indices)
X = sample_all_electrodes
y = label
#------------
# normalize X
scaler = preprocessing.StandardScaler().fit(X)
X_scaled = scaler.transform(X)
return X_scaled, y
def preprocess1(data_dict, ch_indices, fs, epoch_period=667):
'''
This function loads the data struct and extracts machine learning features
for EEGNet only from raw dataset.
Parameters
----------
data_dict : dict
DESCRIPTION. The EEG data dictionary
ch_indices : int list/array
DESCRIPTION. The list/array of the integer of desired channel
fs : int
DESCRIPTION. Sampling frequency in Hz
epoch_period : int, optional
DESCRIPTION. The epoch duration in ms.
The default is 667.
Returns
-------
X_ds : 2d array: sample x feature
DESCRIPTION. Downsampled training data
y_ds : 1d array
DESCRIPTION. Labels
'''
# extract info
flashing = data_dict['Flashing']
types= data_dict['StimulusType']
# get the event index
event_time = np.where(np.diff([flashing])[0][0] > 0)[0] + 1
# manually insert the first event at index 0
event_time = np.insert(event_time, 0, 0)
# extract labels for each sample/epoch
label = types[:, event_time].flatten()
# calculate the period length for each sample
epoch_len = round(epoch_period * fs / 1000)
# extract data for each electode
X = []
for ch in ch_indices:
data = data_dict['Signal'][:, :, ch]
# declare the 3d epoch array
epochs = np.zeros((data.shape[0], len(event_time), epoch_len))
# loop through the eeg data into epochs
for epoch_index, epoch_start in enumerate(event_time):
epoch_end = epoch_start + epoch_len
epochs[:, epoch_index] = data[:, epoch_start:epoch_end]
# reshape the epochs array to 2d
sample = epochs.reshape((epochs.shape[0]*epochs.shape[1]),
epochs.shape[2])
# combine all data
X.append(sample)
# reshape X
X = np.asarray(X)
X = X.reshape(X.shape[1], X.shape[0], X.shape[2])
# downsample size
y= label
downsample_size = 250
# split target and nontarget samples
target = X[np.where(y==1)[0]]
nontarget = X[np.where(y==0)[0]]
# generate indices
target_ind = resample(np.arange(target.shape[0]),
replace=False, n_samples=downsample_size)
nontarget_ind = resample(np.arange(nontarget.shape[0]),
replace=False, n_samples=downsample_size)
# merge two classes
X_ds = np.vstack((target[target_ind], nontarget[nontarget_ind]))
y_ds = np.vstack((np.ones((downsample_size, 1)),
np.zeros((downsample_size, 1))))
return X_ds, y_ds
def downsample(X, y, downsample_size=2000):
'''
This function downsample-size training data.
Parameters
----------
X : 2d array: sample x feature
DESCRIPTION. Testing data
y : 1d array
DESCRIPTION. Testing labels
downsample_size : int, optional
DESCRIPTION. The downsampled data size
The default is 2000.
Returns
-------
X_ds : 2d array: sample x feature
DESCRIPTION. Downsampled training data
y_ds : 1d array
DESCRIPTION. Labels
'''
# split target and nontarget samples
target = X[np.where(y==1)[0]]
nontarget = X[np.where(y==0)[0]]
# use resample to downsample
target_ds = resample(target, replace=False, n_samples=downsample_size)
nontarget_ds = resample(nontarget, replace=False, n_samples=downsample_size)
# merge two classes
X_ds = np.vstack((target_ds, nontarget_ds))
y_ds = np.vstack((
|
np.ones((downsample_size, 1))
|
numpy.ones
|
from __future__ import print_function
import glob
import os
import numpy as np
from six.moves import cPickle as pickle
from six.moves.urllib.request import urlretrieve
import tarfile
import sys
from adversarial_robustness.dataset import *
class notMNIST(Dataset):
def __init__(self, data_dir=default_data_dir):
self.X, self.y, self.Xv, self.yv, self.Xt, self.yt = load_notmnist(data_dir)
self.feature_names = [str(i) for i in range(28*28)]
self.label_names = ['A','B','C','D','E','F','G','H','I','J']
self.image_shape = (28,28)
def load_notmnist(data_dir):
filename = data_dir + '/notMNIST.pickle'
if not os.path.exists(filename):
print('Dataset not found, downloading and preprocessing...')
download_and_extract_notmnist(data_dir)
with open(filename, 'rb') as f:
data = pickle.load(f)
def resh(x):
return x.reshape(len(x), 28*28)
X = resh(data['train_dataset'])
Xv = resh(data['valid_dataset'])
Xt = resh(data['test_dataset'])
y = data['train_labels']
yv = data['valid_labels']
yt = data['test_labels']
return X, y, Xv, yv, Xt, yt
def download_and_extract_notmnist(data_root):
"""
Adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/1_notmnist.ipynb
"""
url = 'https://commondatastorage.googleapis.com/books1000/'
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
num_classes = 10
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
from scipy import ndimage
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:',
|
np.std(dataset)
|
numpy.std
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" <NAME>
ISIR - CNRS / Sorbonne Université
02/2018
"""
from gym_round_bot.envs import round_bot_model
import numpy as np
from gym import spaces
import copy
"""
This file defines the Controller class for controlling the robot
In this module, the speed value must be seen as the motor commanded speed,
and not tha actual speed (which can vary with collisions and friction)
"""
##################################################################################################################################
class Controller(object):
def __init__(self, controllerType, xzrange, thetarange, model=None, noise_ratio=0):
"""
Abstract class for controllers : controllers are here mappings from actions to model's code execution
Parameters:
----------
- controllerType : (string) Describe the controller
- xzrange : (int, int) x,z speed multiplication factors
- thetarange : (int) Dtheta multiplication factors
- model : (round_bot_model) Model controlled by the controller
- noise_ratio : (float) Ratio to compute additive gaussian noise standard deviation from action's speed
"""
# prevent user from instantiating directly this abstract class
if type(self) is Controller:
raise NotImplementedError('Cannot instantiate this abstract class')
self._controllerType = controllerType
self._model = model # can be set after initialization
self._xzrange = xzrange
self._thetarange = thetarange
self.action_meaning = '' # string to explain link between actions value and their meaning
self._action_space = None # the gym action space corresponding to this controller
self.noise_ratio = noise_ratio # additive gaussian noise stdv ratio to speed
self._act = None # function for causing effects of actions
self._discrete = None # whether controller is discrete, to be setubclassescontrollerType,
@property
def model(self):
if not model:
print(Warning('returned model = None'))
return self._model
@model.setter
def model(self, model):
if self._model: # can't assign same controller for several model
raise Exception('Cannot assign same controller to different models, please create a new controller')
else: # model must be None here
self._model = model
@property
def num_actions(self):
raise NotImplementedError()
@property
def action_space(self):
if not self._action_space:
print(Warning('returned action_space = None'))
else:
return self._action_space
@property
def speed(self, s):
self._model.rolling_speed = s
@property
def speed(self):
return self._model.rolling_speed
@property
def controllerType(self):
return copy.copy(self._controllerType)
@property
def discrete(self):
return self._discrete
def step(self, action):
"""
Controls the model's robot to perform the action
Execute functions containded in action functions dictionnary
"""
# exec action's function
self._act(*action)
##################################################################################################################################
class DiscreteController(Controller):
def __init__(self, controllerType, xzrange, thetarange, model=None, int_actions=False, noise_ratio=0):
"""
Parameters:
-----------
- int_actions : (Bool) Wether provided actions are of type int
- *args, **kwargs : see Controller.__init__
"""
# prevent user from instantiating directly this abstract class
if type(self) is DiscreteController:
raise NotImplementedError('Cannot instantiate this abstract class')
super(DiscreteController, self).__init__(controllerType=controllerType, xzrange=xzrange,
thetarange=thetarange, model=model, noise_ratio=noise_ratio)
self._discrete = True
self._actions = {} # dictionnary to map actions number to their code meaning
self.int_actions = int_actions
self._reversed_actions_mapping = None # to be build with self.reverse_actions_mapping afer self._actions initializatio n
@property
def num_actions(self):
return len(self._actions)
@property
def action_space_int(self):
if not self._action_space:
print(Warning('returned action_space = None'))
else:
return spaces.Discrete(self.num_actions-1)
@property
def actions_mapping(self):
"""
Returns a mapping from actions to integer indices. Ex: {(0,0):0, (0,1):1, (1,0):2}
"""
# Easiest way of doing it :
# keys = self._actions.keys()
# return dict( zip(keys, range(len(keys))) )
# We choose the more way also implemented in our action_wrapper
action_space = self._action_space
name=type(action_space).__name__
if name == 'Discrete':
return {i:i for i in range(action_space.n)}
elif name =='MultiDiscrete':
r=[[]]
for x in action_space.nvec:
t = []
for y in list(range(x)):
for i in r:
t.append(i+[y])
r = t
return {tuple(r[i]): i for i in range(len(r))}
else:
raise Exception('action_space class name should be either Discrete or MultiDiscrete!')
@property
def reverse_actions_mapping(self):
"""
Returns a mapping from integers indices to action. Ex: {0:(0,0), 1:(0,1), 2:(1,0)}
"""
# Easiest way of doing it :
# keys = self._actions.keys()
# return dict( zip( range(len(keys)), keys) )
# We choose the more way also implemented in our action_wrapper
actions_mapping = self.actions_mapping
return {actions_mapping[k]:k for k in actions_mapping.keys()}
def step(self, action):
"""
Controls the model's robot to perform the action
Execute functions containded in action functions dictionnary
"""
if self.int_actions:
# If actions are taken as int, convert them to the correct format
action = self._reversed_actions_mapping[action]
self._act(*action)
##################################################################################################################################
class ContinuousController(Controller):
def __init__(self, controllerType, xzrange, thetarange, model=None, noise_ratio=0):
# prevent user from instantiating directly this abstract class
if type(self) is ContinuousController:
raise NotImplementedError('Cannot instantiate this abstract class')
super(ContinuousController, self).__init__(controllerType=controllerType, xzrange=xzrange,
thetarange=thetarange, model=model, noise_ratio=noise_ratio)
self._discrete = False
@property
def num_actions(self):
return self._action_space.shape[0]
def center_reduce_actions(self, actions):
"""
Center and reduce actions with continuous action space (gym.spaces.Box) parameters
Return actions in [-1:1] range
"""
middle = (self._action_space.high + self._action_space.low)/2.0
maxdist = np.abs(self._action_space.high - self._action_space.low)/2.0
return (actions - middle)/maxdist
##################################################################################################################################
class Theta_Controller(DiscreteController):
""" This class controls the robot with 2*thetarange dtheta rotations and speedrange fixed speed forward/bacwkard move
For Theta controllers, super xzrange parameter is set to speedrange*2
"""
def __init__(self, model, dtheta, speed, speedrange=1, thetarange=1, int_actions=False, noise_ratio=0):
super(Theta_Controller,self).__init__('Theta', model=model, xzrange=[speedrange,None], thetarange=thetarange,
int_actions=int_actions, noise_ratio=noise_ratio)
self.dtheta = dtheta
self._initial_speed = speed
self._init()
self._reversed_actions_mapping = self.reverse_actions_mapping # build reversed action mapping
def _init(self):
""" Private initialisation of Theta_Controller
"""
self.action_meaning = '[s, dth] 2-tuple coding for speed between -initial_speed*2 and +initial_speed*2 and dtheta between -2dt and 2dt'
self._actions = {(s,d) for s in range(0,2*self._xzrange[0]+1) for d in range(0,2*self._thetarange+1) }
def act(s,d):
self._model.strafe[0]= 0 if s-self._xzrange[0]==0 else np.sign(s-self._xzrange[0])
speed = self._initial_speed*(abs(s-self._xzrange[0]))
self._model.rolling_speed= speed + np.random.normal(0,speed*self.noise_ratio)
dth = ((d-self._thetarange)*self.dtheta)
self._model.change_robot_rotation(dth+np.random.normal(0,abs(dth)*self.noise_ratio),0)
self._act = act
self._action_space = spaces.MultiDiscrete([2*self._xzrange[0]+1,2*self._thetarange+1])
# set missing MultiDiscrete parameter n
self._action_space.n = self.num_actions
##################################################################################################################################
class Theta2_Controller(Theta_Controller):
""" This class controls the robot like Theta but cannot go backwards
"""
def __init__(self, model, dtheta, speed, speedrange=1, thetarange=1, int_actions=False, noise_ratio=0):
super(Theta2_Controller,self).__init__(model=model, dtheta=dtheta, speed=speed, speedrange=speedrange, thetarange=thetarange,
int_actions=int_actions, noise_ratio=noise_ratio)
self._controllerType = 'Theta2'
def _init(self):
""" Private initialisation of Theta2_Controller
"""
self.action_meaning = '[s, dth] 2-tuple coding for speed between 0 and +initial_speed and dtheta between -dt and dt'
self._actions = { (s,d) for s in range(0,self._xzrange[0]+1) for d in range(0,2*self._thetarange+1) }
def act(s,d):
self._model.strafe[0]= 0 if s-self._xzrange[0]==0 else np.sign(s-self._xzrange[0])
speed = self._initial_speed*abs(s-self._xzrange[0])
self._model.rolling_speed= speed + np.random.normal(0,speed*self.noise_ratio)
dth = (d-self._thetarange)*self.dtheta
self._model.change_robot_rotation(dth+np.random.normal(0,abs(dth)*self.noise_ratio),0)
self._act = act
self._action_space = spaces.MultiDiscrete([1+self._xzrange[0],2*self._thetarange+1])
# set missing MultiDiscrete parameter n
self._action_space.n = self.num_actions
##################################################################################################################################
class XZ_Controller(DiscreteController):
"""
This class controls the robot to move on (oXZ) plan, always looking in the same direction, with descrete moves
"""
def __init__(self, model, speed, xzrange=[1,1], thetarange=2, int_actions=False, noise_ratio=0):
super(XZ_Controller,self).__init__('XZ',model=model, int_actions=int_actions, xzrange=xzrange,
thetarange=thetarange, noise_ratio=noise_ratio)
self._initial_speed = speed
self.action_meaning = '[x, z] 2-tuple coding for x and z between -xzrange and +xzrange'
self._init()
self._action_space = spaces.MultiDiscrete([2*xzrange[0]+1,2*xzrange[1]+1])
# set missing MultiDiscrete parameter n
self._action_space.n = self.num_actions
self._reversed_actions_mapping = self.reverse_actions_mapping # build reversed action mapping
def _init(self):
self._actions = { (x,z) for x in range(0,2*self._xzrange[0]+1) for z in range(0,2*self._xzrange[1]+1)}
def act(x,z):
self._model.strafe=[x-self._xzrange[0],z-self._xzrange[1]]
speed = self._initial_speed*np.sqrt((x-self._xzrange[0])**2+(z-self._xzrange[1])**2)
self._model.rolling_speed = speed +
|
np.random.normal(0,speed*self.noise_ratio)
|
numpy.random.normal
|
import cv2
import numpy as np
from math import sqrt
from copy import copy
from geometry import is_right, line, intersection, distance
# Contains helper functions to use the transformation
def warp_point(p, M):
p_t = np.array([p], dtype="float32")
p_t = np.array([p_t])
p_t = cv2.perspectiveTransform(p_t, M)
return p_t[0][0]
def warp_generator(image, bb3d, vp1, vp2, im_h, im_w):
bb3d = np.array(bb3d, np.float32)
M, _ = get_transform_matrix(vp1, vp2, image, im_w, im_h)
# image_t = cv2.warpPerspective(image, M, (im_w, im_h), borderMode=cv2.BORDER_REPLICATE)
image_t = cv2.warpPerspective(image, M, (im_w, im_h), borderMode=cv2.BORDER_CONSTANT)
t_bb3d = cv2.perspectiveTransform(np.array([[point] for point in bb3d], np.float32), M)
xs = [point[0][0] for point in t_bb3d]
ys = [point[0][1] for point in t_bb3d]
bb_out = {'x_min': np.amin(xs), 'y_min': np.amin(ys), 'x_max': np.amax(xs), 'y_max': np.amax(ys)}
front = [0, 1, 4, 5]
xs = [xs[idx] for idx in front]
ys = [ys[idx] for idx in front]
bb_in = {'x_min': np.amin(xs), 'y_min': np.amin(ys), 'x_max': np.amax(xs), 'y_max': np.amax(ys)}
return image_t, M, bb_in, bb_out
def warp_inference(image, vp1, vp2, im_h, im_w):
M, IM = get_transform_matrix(vp1, vp2, image, im_w, im_h)
image_t = cv2.warpPerspective(image, M, (im_w, im_h), borderMode=cv2.BORDER_REPLICATE)
return image_t, M, IM
def unwarp_inference(image, M, bb_in, bb_out):
if abs(bb_in['x_max'] - bb_out['x_max'] < abs(bb_out['x_min'] - bb_out['x_min'])):
# bb_in is in lower right corner
bb_out_x_offset = bb_out['x_min'] - bb_in['x_min']
else:
bb_out_x_offset = bb_out['x_max'] - bb_in['x_max']
bb_out_y_offset = bb_out['y_min'] - bb_in['y_min']
bb_out_offset = np.ndarray([bb_out_x_offset, bb_out_y_offset], np.float32)
bb3d = []
bb3d.append(bb_in['x_min'], bb_in['y_min'])
bb3d.append(bb_in['x_max'], bb_in['y_min'])
bb3d.append(bb_in['x_max'], bb_in['y_max'])
bb3d.append(bb_in['x_min'], bb_in['y_max'])
bb3d = np.ndarray(bb3d, np.float32)
bb3d = np.vstack(bb3d, bb3d + bb_out_offset)
bb3d_it = cv2.perspectiveTransform(bb3d, cv2.invert(M))
return bb3d_it
# find appropriate corner points
def find_cornerpts(VP, pts):
pts =
|
np.array(pts)
|
numpy.array
|
import logging
import json
import pandas as pd
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import cg
from scipy.interpolate import interp1d
import plotly.graph_objects as go
from plotly.utils import PlotlyJSONEncoder
from datasets.constants import domain_types
from datasets.processing_methods.method_base import AnalysisMethodBase
LOGGER = logging.getLogger(__name__)
def detrendingKubios(RR_interval, lambda_v=300):
z = np.matrix(RR_interval).T
T = len(z)
I = np.ones([T - 2, 1])
v = [1, -2, 1]
data = I * v
II = sparse.eye(T)
D2 = sparse.spdiags(data.transpose(), [0, 1, 2], T - 2, T)
D2 = sparse.lil_matrix(D2)
D2[-2, -2] = 1
D2[-1, -2] = -2
D2[-1, -1] = 1
D2 = sparse.csc_matrix(D2)
DD = II + ((lambda_v**2) * (D2.T * D2))
DD = sparse.csc_matrix(DD)
# start = time()
xx_tmp = cg(DD, z)
# print(time() - start)
xx = xx_tmp[0]
z_stat = II * z - np.matrix(xx).T
return z_stat.T.tolist()[0]
class TimeDomainAnalysis(AnalysisMethodBase):
@classmethod
def name(cls):
return 'HRV Time Domain'
@classmethod
def domain(cls):
return domain_types.TIME
@classmethod
def options(cls):
return {
'hr_window': {
'title': 'Min/max HR as average of',
'type': 'number',
'unit': 'beats',
'default': 5,
'limits': [1, 1000]
},
'threshold': {
'title': 'Threshold for NNxx',
'type': 'number',
'unit': 'ms',
'default': 50
}
}
def process(self):
# Get selected samples from signal
data = pd.DataFrame()
for each in self.analysis_samples:
data = pd.concat([data, self.signal.samples_dataframe(each.start, each.end)])
# Read parameters from configuration
nn_threshold = self.configuration['threshold']
window = self.configuration['hr_window']
# Prepare data
ibi_series = data[data.columns[0]]
instantaneous_hr = 60 / (ibi_series / 1000)
rolling_mean_hr = instantaneous_hr.rolling(window).mean()
rolling_24h = ibi_series.rolling('5min')
# Precalculate data for standard indexes
nn_diff =
|
np.diff(ibi_series)
|
numpy.diff
|
#!/usr/bin/env python3
"""Vectorize result
Usage:
vectorize.py [options] <npzdir> [<indices>...]
vectorize.py (-h | --help )
Options:
-h --help Show this screen.
--show Show the result on screen
-j --jobs <jobs> Number of threads for vectorization [default: 1]
--vpdir <vpdir> Directory to the vanishing points prediction
[Default: logs/pretrained-vanishing-points/npz/000096000]
"""
import os
import sys
import json
import math
import random
import os.path as osp
from collections import deque
import cv2
import yaml
import numpy as np
import matplotlib as mpl
import skimage.io
import numpy.linalg as LA
import skimage.draw
import matplotlib.cm as cm
import skimage.filters
import matplotlib.pyplot as plt
import skimage.morphology
from docopt import docopt
from wireframe.utils import parmap
from wireframe.metric import nms_junction
from wireframe.viewer import show_wireframe
from wireframe.optimize import (
to_world,
lifting_from_vp,
vanish_point_refine,
vanish_point_clustering,
vanish_point_clustering2,
estimate_intrinsic_from_vp,
)
PI2 = math.pi * 2
NMS_ANGLE = PI2 / 24
JUNC = 0.2
JUND = 0.3
MAX_T_DISTANCE = 5
T_SCALE = 1.1
# Thresholding
MEDIAN = 0.1
SCORE = 0.65
N_ITER = 3
# Gaussian blur
SIGMA = 0.5
SCALE = 2.0
# setup matplotlib
cmap = plt.get_cmap("jet")
norm = mpl.colors.Normalize(vmin=0.6, vmax=1.1)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
def line_color(x):
return sm.to_rgba(x)
def filter_heatmap(h, h_threshold, offset=None):
if offset is None:
offset = np.zeros([2] + list(h.shape))
result = []
for y in range(h.shape[0]):
for x in range(h.shape[1]):
if h[y, x] > h_threshold:
result.append(
[(x + offset[0, y, x]) * 4, (y + offset[1, y, x]) * 4, h[y, x]]
)
return result
def project(c, a, b):
px = b[0] - a[0]
py = b[1] - a[1]
dd = px * px + py * py
u = max(min(((c[0] - a[0]) * px + (c[1] - a[1]) * py) / float(dd), 1), 0)
return (a[0] + u * px, a[1] + u * py)
def is_intersected(a0, a1, b0, b1):
def ccw(c, a, b):
return (a[0] - c[0]) * (b[1] - c[1]) - (b[0] - c[0]) * (a[1] - c[1])
def sgn(x):
if abs(x) < 1e-6:
return 0
if x > 0:
return 1
return -1
c0 = sgn(ccw(a0, a1, b0))
c1 = sgn(ccw(a0, a1, b1))
d0 = sgn(ccw(b0, b1, a0))
d1 = sgn(ccw(b0, b1, a1))
return c0 * c1 < 0 and d0 * d1 < 0
def angle(c, a, b):
a = (a[0] - c[0], a[1] - c[1])
b = (b[0] - c[0], b[1] - c[1])
dot = (
(a[0] * b[0] + a[1] * b[1])
/ math.sqrt(a[0] ** 2 + a[1] ** 2 + 1e-9)
/ math.sqrt(b[0] ** 2 + b[1] ** 2 + 1e-9)
)
return math.acos(max(min(dot, 1), -1))
def point2line(c, a, b):
px = b[0] - a[0]
py = b[1] - a[1]
dd = px * px + py * py
u = ((c[0] - a[0]) * px + (c[1] - a[1]) * py) / float(dd)
if u <= 0 or u >= 1:
return 100
dx = a[0] + u * px - c[0]
dy = a[1] + u * py - c[1]
return dx * dx + dy * dy
def parse_result(result):
junc = nms_junction(result["jmap"][0])
jund = nms_junction(result["jmap"][1])
line = result["lmap"]
jdep = result["jdep"]
junc = filter_heatmap(junc, JUNC)
jund = filter_heatmap(jund, JUND)
jun = junc + jund
return jun, list(range(len(junc))), list(range(len(junc), len(jun))), line, jdep
def edge_pruning(juncs, edges):
def polar_angle(p0, p1):
return math.atan2(p1[1] - p0[1], p1[0] - p0[0])
def polar_diff(p1, p2):
d = math.fmod(p1 - p2, PI2)
if d < 0:
d += PI2
return min(abs(d), abs(PI2 - d))
esets = set()
links = [{} for _ in range(len(juncs))]
def delete_edge(i):
j1, j2 = edges[i][3], edges[i][4]
del links[j1][i]
del links[j2][i]
esets.remove(i)
for it in range(N_ITER):
for i, (p1, p2, score, j1, j2) in enumerate(edges):
if i in links[j1]:
continue
angle1 = polar_angle(p1, p2)
angle2 = math.fmod(angle1 + math.pi, PI2)
# check nearby edges
error = False
# if it == N_ITER - 1:
# score = -1
for j, angle in links[j1].copy().items():
if polar_diff(angle, angle1) < NMS_ANGLE and edges[j][2] > score:
error = True
break
if error:
continue
for j, angle in links[j2].copy().items():
if polar_diff(angle, angle2) < NMS_ANGLE and edges[j][2] > score:
error = True
break
if error:
continue
# prunning other edges
for j, angle in links[j1].copy().items():
if polar_diff(angle, angle1) < NMS_ANGLE and edges[j][2] < score:
delete_edge(j)
for j, angle in links[j2].copy().items():
if polar_diff(angle, angle2) < NMS_ANGLE and edges[j][2] < score:
delete_edge(j)
# add this edge
esets.add(i)
links[j1][i] = angle1
links[j2][i] = angle2
# remove intersected edges
for i in esets.copy():
if i not in esets:
continue
for j in esets.copy():
if j not in esets:
continue
if edges[i][2] < edges[j][2]:
continue
if is_intersected(*edges[i][:2], *edges[j][:2]):
delete_edge(j)
return [edges[i] for i in sorted(esets)]
def line_score(p1, p2, line_map, shrink=True):
if p1[0] == p2[0] and p1[1] == p2[1]:
return -1, -1
r0, c0, r1, c1 = map(int, [p1[1] // 4, p1[0] // 4, p2[1] // 4, p2[0] // 4])
rr, cc, I = skimage.draw.line_aa(r0, c0, r1, c1)
if shrink:
if len(rr) <= 2:
return -1, -1
rr, cc, I = rr[1:-1], cc[1:-1], I[1:-1]
Ip = line_map[rr, cc]
Ip = Ip / np.maximum(I, Ip)
score = (I * Ip).sum() / I.sum()
Ip_sorted = np.sort(Ip)
median = Ip_sorted[max(min(2, len(Ip) - 1), len(Ip) // 7)]
return score, median
def extract_wireframe(prefix, image, result, plot=True, imshow=True):
jun, ijunc, ijund, line_map, jdep = parse_result(result)
line_map[line_map > 1] = 1
line_map = skimage.filters.gaussian(line_map, SIGMA) * SCALE
line_map[line_map > 1] = 1
if plot:
# plt.figure(), plt.imshow(jdep[0])
# plt.figure(), plt.imshow(jdep[1])
# plt.figure(), plt.title("Edge map"), plt.tight_layout()
# plt.imshow(line_map), plt.colorbar(fraction=0.046)
plt.figure(), plt.axis("off"), plt.tight_layout(), plt.axes([0, 0, 1, 1])
plt.xlim([-0.5, 127.5]), plt.ylim([127.5, -0.5])
plt.imshow(line_map, cmap="Purples")
# for i in ijunc:
# plt.scatter(jun[i][0] / 4, jun[i][1] / 4, color="red", zorder=100)
# for i in ijund:
# plt.scatter(jun[i][0] / 4, jun[i][1] / 4, color="blue", zorder=100)
plt.savefig(f"{prefix}_map.svg", bbox_inches=0)
plt.close()
# plt.figure(), plt.title("Initial Wireframe"), plt.tight_layout()
# plt.imshow(image), plt.colorbar(sm, fraction=0.046)
# for i in ijunc:
# plt.scatter(jun[i][0], jun[i][1], color="red", zorder=100)
edges = []
for i_, i in enumerate(ijunc):
for j in ijunc[:i_]:
p1, p2 = jun[i], jun[j]
score, median = line_score(p1, p2, line_map)
if median > MEDIAN and score > SCORE:
edges.append((p1, p2, score, i, j))
# plt.plot([p1[0], p2[0]], [p1[1], p2[1]], c=line_color(score))
edges.sort(key=lambda e: e[2])
edges = edge_pruning(jun, edges)
# plt.figure(), plt.title("Prunned Wireframe"), plt.tight_layout()
# plt.imshow(image), plt.colorbar(sm, fraction=0.046)
# for i in ijunc:
# plt.scatter(jun[i][0], jun[i][1], color="red", zorder=100)
# for p1, p2, score, *_ in edges:
# plt.plot([p1[0], p2[0]], [p1[1], p2[1]], c=line_color(score))
selected_juns = set(i for i in ijunc)
for i in ijund:
pi = jun[i][:2]
mind = 1e10
for e in edges:
dist = point2line(pi, e[0], e[1])
if dist < mind:
mind = dist
mine = e
if mind < MAX_T_DISTANCE:
pip = project(pi, mine[0], mine[1]) # reproject for nicer figure
jun[i][0], jun[i][1] = pip[0], pip[1]
best_score = -1e100
for j, pj in enumerate(jun):
if i == j or (j > len(ijunc) and j not in selected_juns):
continue
if min(angle(pi, pj, mine[0]), angle(pi, pj, mine[1])) < 0.2:
continue
if LA.norm(
|
np.array(pi[:2])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 17:23:20 2019
Functions for smoothing generating the pdf, cdf.
@author: Yanlong
"""
from __future__ import unicode_literals
import numpy as np
#import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
matplotlib.rcParams['xtick.top'] = True
matplotlib.rcParams['ytick.right'] = True
from scipy.optimize import curve_fit
import sys
import glob
import lmfit
from scipy import signal
from scipy import interpolate
from scipy import special
from scipy import optimize
from scipy import stats
def release_list(a):
del a[:]
del a
def func(x, rho, a, b, rc):
return np.log(np.exp(rho)/(np.exp(x)/np.exp(rc))**a / (1.+np.exp(x)/np.exp(rc))**(b-a))
def func_pw(x, rho, a, b, rc):
return np.log(np.exp(rho)/((np.exp(x)/np.exp(rc))**a + (np.exp(x)/np.exp(rc))**b ) )
def func_cdf(x, rho, a, b, rc):
rc = np.exp(rc)
rho = np.exp(rho)
x = np.exp(x)
x = x/rc
aa = 3-a
bb = 1+a-b
beta = x**aa / aa * special.hyp2f1(aa, 1-bb, aa+1, -x)
try:
res = np.log(4*np.pi*rho*rc**3 * beta)
except:
pass
return res
def func_cdf_inv(frac, rho, a, b, c):
m_h = func_cdf(np.log(1e8), rho, a, b, c) + np.log(frac)
rmin = np.log(1e-10)
rmax = np.log(1e10)
alpha = 0.5
while rmax - rmin > 1e-6:
rmid = alpha*rmax + (1.-alpha)*rmin
if func_cdf(rmid, rho, a, b, c)>m_h:
rmax = rmid
else:
rmin = rmid
rmid = alpha*rmax + (1.-alpha)*rmin
return np.exp(rmid)
def func_cdf_pw(x, rho, a, b, rc):
rc = np.exp(rc)
rho = np.exp(rho)
x = np.exp(x)
x = x/rc
#print(rho, x, rc)
ab = (a-3.) /(a-b)
#print(aa, bb)
hgf = x**(3.-a) / (3.-a) * special.hyp2f1(1, ab, ab+1, -x**(b-a))
return np.log(4*np.pi*rho*rc**3 * hgf)
def func_cdf_pw_inv(frac, rho, a, b, c):
m_h = func_cdf_pw(np.log(1e8), rho, a, b, c) + np.log(frac)
rmin = np.log(1e-10)
rmax = np.log(1e10)
alpha = 0.5
while rmax - rmin > 1e-6:
rmid = alpha*rmax + (1.-alpha)*rmin
if func_cdf_pw(rmid, rho, a, b, c)>m_h:
rmax = rmid
else:
rmin = rmid
rmid = alpha*rmax + (1.-alpha)*rmin
return np.exp(rmid)
def cdf_sample(r_m):
rmin = r_m[3,0] *1.01
rmax = r_m[-1,0] *1.01
n_points = 500
r = np.logspace(np.log10(rmin), np.log10(rmax), num=n_points)
mcum = np.zeros(n_points)
mt = 0.
i = j = 0
for i in range(n_points):
if j>=len(r_m):
mcum[i] = mt
continue
while r_m[j, 0]<r[i]:
mt += r_m[j, 1]
j += 1
if j >=len(r_m):
break
mcum[i] = mt
#print(r[i], r_m[j-1,0], mcum[i])
return np.array(list(zip(r, mcum)))
def pdf_sample(r_m):
rmin = r_m[3,0] *1.01
rmax = r_m[-1,0] *1.01
n_points = 20
r = np.logspace(np.log10(rmin), np.log10(rmax), num=n_points-1)
eta = r[2]/r[1]
dmcum = np.zeros(n_points-1)
i = j = 0
for i in range(n_points-1):
while j < len(r_m):
if r_m[j, 0]<r[i+1]:
dmcum[i]+=r_m[j, 1]
j+=1
continue
else:
break
dmcum[i] /= ((r[i]*eta)**3 - r[i]**3)*4.*np.pi/3.
#print(r[i], r_m[j-1,0], mcum[i])
result = np.array(list(zip(r*np.sqrt(eta), dmcum)))
return result[np.all(result > 1e-3, axis=1)]
def cdf_smooth(raw_cdf):
n_points = 500
x = np.log(raw_cdf[:,0])
y = np.log(raw_cdf[:,1])
#tck = interpolate.splrep(x, y, s=0)
#tck, u = interpolate.splprep([x, y], s=0)
f = interpolate.interp1d(x, y, kind='linear')
xnew = np.linspace(x[0], x[-1], n_points)
#ynew = interpolate.splev(xnew, tck, der=0)
ynew = f(xnew)
#spl = interpolate.UnivariateSpline(xnew, ynew)
#spl.set_smoothing_factor(0.9)
#ynew = spl(xnew)
ynew = signal.savgol_filter(ynew, 349, 2)
return np.array(list(zip(np.exp(xnew), np.exp(ynew))))
def cdf_smooth_cheby(raw_cdf):
n_points = 500
x = np.log(raw_cdf[:,0])
y = np.log(raw_cdf[:,1])
#tck = interpolate.splrep(x, y, s=0)
#tck, u = interpolate.splprep([x, y], s=0)
f = interpolate.interp1d(x, y, kind='linear')
xnew = np.linspace(x[0], x[-1], n_points)
#ynew = interpolate.splev(xnew, tck, der=0)
ynew = f(xnew)
#spl = interpolate.UnivariateSpline(xnew, ynew)
#spl.set_smoothing_factor(0.9)
#ynew = spl(xnew)
#ynew = signal.savgol_filter(ynew, 349, 2)
cheby = np.polynomial.Chebyshev.fit(xnew, ynew, 4)
#y = signal.savgol_filter(y, len(x)//8*2+1, 3)
ynew = cheby(xnew)
return np.array(list(zip(np.exp(xnew), np.exp(ynew))))
def cdf_smooth_mono(raw_cdf):
return
def pdf_cal(cdf):
x = np.log(cdf[:,0])
y = np.log(cdf[:,1])
dydx_log = np.diff(y)/np.diff(x)
z = dydx_log * np.exp(y[:-1])/4./np.pi/(np.exp(x[:-1]))**3
return np.array(list(zip(np.exp(x[:-1]), z)))
def fit_pdf(pdf):
fmodel = lmfit.Model(func)
#fmodel = lmfit.Model(func_pw)
fmodel.set_param_hint('a', min=0)
x = np.log(pdf[:, 0])
y = np.log(pdf[:, 1])
result = fmodel.fit(y, x=x, rho = 12., a = 0., b=3., rc =-2.)
#print(result.fit_report())
params = list(result.best_values.values())
params[0] = np.exp(params[0])
params[-1] = np.exp(params[-1])
print(params)
return np.array(list(zip(np.exp(x), np.exp(result.best_fit)))), params
def fit_cdf(raw_cdf, r_h, m_tot):
for j in range(len(raw_cdf)):
if raw_cdf[j][1] > raw_cdf[-1, 1]/2.:
break
weights = np.ones(len(raw_cdf))
#weights[[j, -1]] = 50.
#print(raw_cdf[j, 0])
fmodel = lmfit.Model(func_cdf)
#print(m_tot, r_h)
#print((m_tot/2.0/(4*np.pi*np.exp(-2)**3)/((r_h/np.exp(-2))**(3-1)/(3-1) * special.hyp2f1(3-1, 4-1, 4-1, -r_h/np.exp(-2)))))
#fmodel.set_param_hint('rho', expr='log(%f/2.0/(4*pi*exp(rc)**3)/((%f/exp(rc))**(3-a)/(3-a) * special.hyp2f1(3-a, b-a, 4-a, -%f/exp(rc))))'%(m_tot, r_h, r_h), vary=True)
#fmodel.set_param_hint('rc', expr='a+b')
fmodel.set_param_hint('a', value=1, min=0)
#fmodel.set_param_hint('m_tot', expr='2* (4*pi*exp(rc)**3)*exp(rho)*((r_h/exp(rc))**(3-a)/(3-a) * special.hyp2f1(3-a, b-a, 4-a, -r_h/exp(rc)))')
#fmodel.set_param_hint('b',)
x = np.log(raw_cdf[:, 0])
y = np.log(raw_cdf[:, 1])
result = fmodel.fit(y, x=x, rho = 12., a = 1, b=4, rc =-2., method='least_square', weights=weights)
#print(result.fit_report())
params = list(result.best_values.values())
params[0] = np.exp(params[0])
params[-1] = np.exp(params[-1])
print(params)
return np.array(list(zip(np.exp(x), np.exp(result.best_fit)))), params
def fit_cdf_pw(raw_cdf):
for j in range(len(raw_cdf)):
if raw_cdf[j][1] > raw_cdf[-1, 1]/2.:
break
weights = np.ones(len(raw_cdf))
weights[[j, -1]] = 50.
#print(raw_cdf[j, 0])
fmodel = lmfit.Model(func_cdf_pw)
fmodel.set_param_hint('a', value=1, min=0)
#fmodel.set_param_hint('b')
x = np.log(raw_cdf[:, 0])
y = np.log(raw_cdf[:, 1])
result = fmodel.fit(y, x=x, rho = 12., a = 1, b=4, rc =-2., method='least_squares', weights=weights)
#print(result.fit_report())
params = list(result.best_values.values())
params[0] = np.exp(params[0])
params[-1] = np.exp(params[-1])
print(params)
return np.array(list(zip(np.exp(x), np.exp(result.best_fit)))), params
def fit_cdf_chi2(x, r, m):
model = func_cdf(r, *x)
chi_sq = sum((model - m)**2)
return chi_sq
def fit_cdf_scipy(raw_cdf, r_h, m_h, r_max, m_tot):
r = np.log(raw_cdf[:,0])
m = np.log(raw_cdf[:,1])
fun_con = lambda x: func_cdf(np.log(r_h), *x) - np.log(m_h)
fun_con_tot = lambda x: func_cdf(np.log(r_max), *x) - np.log(m_tot)
delta = 0
cons = ({'type': 'eq', 'fun': fun_con},
{'type': 'eq', 'fun': fun_con_tot},
{'type': 'ineq', 'fun': lambda x: x[1]-delta},
{'type': 'ineq', 'fun': lambda x: 3.0-x[1]-delta},
{'type': 'ineq', 'fun': lambda x: x[2]-3.0-delta})
res = optimize.minimize(fit_cdf_chi2, (12, 1, 4, -1), args=(r, m), method='SLSQP', constraints=cons)
params = res.x
fits = np.array(func_cdf(r, *params))
chi_sq_test = stats.chisquare(m, f_exp=fits)
fits = np.exp(fits)
print(fun_con(params), fun_con_tot(params))
if res.success == False or chi_sq_test[1]<0.05 or fun_con(params)>1e-5 or fun_con_tot(params)>1e-5:
params[2] = -1.0
params[0] = np.exp(params[0])
params[1] = np.abs(params[1])
params[-1] = np.exp(params[-1])
r_h_fit = func_cdf_inv(0.5, np.log(params[0]), params[1], params[2], np.log(params[3]))
# if params[-1] > r_max or r_h_fit > r_max:
# params[2] = -1.0
# if np.log10(r_h_fit/r_h) > 0.1:
# params[2] = -1.0
return np.array(list(zip(np.exp(r), fits))), params
def fit_cdf_pw_chi2(x, r, m):
model = func_cdf_pw(r, *x)
chi_sq = sum((model - m)**2)
return chi_sq
def fit_cdf_pw_scipy(raw_cdf, r_h, m_h, r_max, m_tot):
r = np.log(raw_cdf[:,0])
m = np.log(raw_cdf[:,1])
fun_con = lambda x: func_cdf_pw(np.log(r_h), *x) - np.log(m_h)
fun_con_tot = lambda x: func_cdf_pw(np.log(r_max), *x) -
|
np.log(m_tot)
|
numpy.log
|
# -*- coding: utf-8 -*-
import os
import sys
import argparse
from glob import glob
from PIL import Image, ImageDraw
import numpy as np
import warnings
sys.path.insert(1, os.path.realpath(os.path.join(sys.path[0], os.pardir, os.pardir)))
from measurements.image_graph_parser import ImageGraphParser
from frequency_response import FrequencyResponse
DIR_PATH = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
def parse_image(im, model):
"""Parses graph image downloaded from innerfidelity.com"""
# Crop by left and right edges
box = (69, 31, 550, 290)
im = im.crop(box)
px_a_max = 0
px_a_min = im.size[1]
#im.show()
# X axis
f_min = 20
f_max = 20000
f_step = (f_max / f_min) ** (1 / im.size[0])
f = [f_min]
for _ in range(1, im.size[0]):
f.append(f[-1] * f_step)
# Y axis
a_max = 150
a_min = 66
a_res = (a_max - a_min) / (px_a_min - px_a_max)
# Colors
#line_color = np.array([50, 155, 254])
line_color = np.array([0, 135, 0])
_im = im.copy()
inspection = _im.load()
amplitude = []
# Iterate each column
for x in range(im.size[0]):
pxs = [] # Graph pixels
# Iterate each row (pixel in column)
for y in range(im.size[1]):
# Convert read RGB pixel values and convert to HSV
rgba = im.getpixel((x, y))
# Graph pixels are colored
if np.mean(np.abs(line_color - rgba[:3])) < 15:
pxs.append(float(y))
else:
p = im.getpixel((x, y))
inspection[x, y] = (int(0.3 * p[0]), int(255 * 0.7 + 0.3 * p[1]), int(0 + 0.3 * p[2]))
if not pxs:
# No graph pixels found on this column
amplitude.append(None)
else:
# Mean of recorded pixels
v = np.mean(pxs)
# Convert to dB value
v = a_max - v * a_res
amplitude.append(v)
# Inspection image
draw = ImageDraw.Draw(_im)
x0 = np.log(30 / f_min) / np.log(f_step)
x1 =
|
np.log(10000 / f_min)
|
numpy.log
|
#!/usr/bin/env python
"""
Holds the IonoContainer class that contains the ionospheric parameters.
@author: <NAME>
"""
from six import string_types
import os
import glob
import inspect
import posixpath
import copy
from SimISR import Path
import numpy as np
import scipy.io as sio
import scipy.interpolate
import tables
import numbers
from datetime import datetime
# From my
from SimISR.utilFunctions import Chapmanfunc, TempProfile
class IonoContainer(object):
"""
Holds the coordinates and parameters to create the ISR data. This class can hold plasma parameters
spectra, or ACFs. If given plasma parameters the can call the ISR spectrum functions and for each
point in time and space a spectra will be created.
"""
#%% Init function
def __init__(self,coordlist,paramlist,times = None,sensor_loc = np.zeros(3),ver =0,coordvecs =
None,paramnames=None,species=None,velocity=None):
""" This constructor function will use create an instance of the IonoContainer class
using either cartisian or spherical coordinates depending on which ever the user prefers.
Inputs:
coordlist - Nx3 Numpy array where N is the number of coordinates.
paramlist - NxTxP Numpy array where T is the number of times and P is the number of parameters
alternatively it could be NxP if there is only one time instance.
times - A T length numpy array where T is the number of times. This is
optional input, if not given then its just a numpy array of 0-T
sensor_loc - A numpy array of length 3 that gives the sensor location.
The default value is [0,0,0] in cartisian space.
ver - (Optional) If 0 the coordlist is in Cartisian coordinates if 1 then
coordlist is a spherical coordinates.
coordvecs - (Optional) A dictionary that holds the individual coordinate vectors.
if sphereical coordinates keys are 'r','theta','phi' if cartisian 'x','y','z'.
paramnames - This is a list or number numpy array of numbers for each parameter in the
"""
# Set up the size for the time vector if its not given.
Ndims = paramlist.ndim
psizetup = paramlist.shape
if times is None:
if Ndims==3:
times = np.arange(psizetup[1])
else:
times = np.arange(1)
if Ndims==2:
paramlist = paramlist[:,np.newaxis,:]
# Assume that the
if ver==0:
X_vec = coordlist[:,0]
Y_vec = coordlist[:,1]
Z_vec = coordlist[:,2]
R_vec = np.sqrt(X_vec**2+Y_vec**2+Z_vec**2)
Az_vec = np.degrees(np.arctan2(X_vec,Y_vec))
El_vec = np.degrees(np.arcsin(Z_vec/R_vec))
self.Cart_Coords = coordlist
self.Sphere_Coords = np.array([R_vec,Az_vec,El_vec]).transpose()
if coordvecs is not None:
if set(coordvecs)!={'x','y','z'}:
raise NameError("Keys for coordvecs need to be 'x','y','z' ")
else:
coordvecs = ['x','y','z']
elif ver==1:
R_vec = coordlist[:,0]
Az_vec = np.radians(coordlist[:,1])
El_vec = np.radians(coordlist[:,2])
xvecmult = np.sin(Az_vec)*np.cos(El_vec)
yvecmult = np.cos(Az_vec)*np.cos(El_vec)
zvecmult = np.sin(El_vec)
X_vec = R_vec*xvecmult
Y_vec = R_vec*yvecmult
Z_vec = R_vec*zvecmult
self.Cart_Coords = np.column_stack((X_vec,Y_vec,Z_vec))
self.Sphere_Coords = coordlist
if coordvecs is not None:
if set(coordvecs)!={'r','theta','phi'}:
raise NameError("Keys for coordvecs need to be 'r','theta','phi' ")
else:
coordvecs = ['r','theta','phi']
# used to deal with the change in the files
if type(coordvecs)==np.ndarray:
coordvecs = [str(ic) for ic in coordvecs]
self.Param_List = paramlist
self.Time_Vector = times
self.Coord_Vecs = coordvecs
self.Sensor_loc = sensor_loc
self.Species = species
(Nloc,Nt) = paramlist.shape[:2]
#set up a Velocity measurement
if velocity is None:
self.Velocity=np.zeros((Nloc,Nt,3))
else:
# if in sperical coordinates and you have a velocity
if velocity.ndim ==2 and ver==1:
veltup = (velocity*np.tile(xvecmult[:,np.newaxis],(1,Nt)),
velocity*np.tile(yvecmult[:,np.newaxis],(1,Nt)),
velocity*np.tile(zvecmult[:,np.newaxis],(1,Nt)))
self.Velocity= np.dstack(veltup)
else:
self.Velocity=velocity
# set up a params name
if paramnames is None:
partparam = paramlist.shape[2:]
if species is not None:
paramnames = [['Ni_'+isp,'Ti_'+isp] for isp in species[:-1]]
paramnames.append(['Ne','Te'])
self.Param_Names=np.array(paramnames,dtype=str)
else:
paramnums = np.arange(
|
np.product(partparam)
|
numpy.product
|
# -*- coding: utf-8 -*-
from nodes import Val,Var,VSF, Add,Concat,Dot,Transpose
import numpy as np
#import zmq
from gensim.models import Word2Vec
class Word(object):
#class Word():
__slots__= ["vec"]
def __init__(self, word, vec=np.random.random((5,1)) ):
self.vec = Var(word, vec)#
def __unicode__(self):
return self.vec.name
def toBinaryTreeRep(self):
return self.vec.name
def __repr__(self):
return u"Word(%r)"%(self.vec.name)
#def __eq__(self)
def expression(self):
return 'w2v(%s)'%self.vec.name
class Phrase(object):
#class Phrase():
__slots__= ["left","right","vec"]
def __init__(self, left, right, vec):
self.left = left
self.right = right
self.vec = vec
def __unicode__(self):
return u'(%s,%s)'%(self.left,self.right)
def toBinaryTreeRep(self):
return u'(%s %s)'%(self.left.toBinaryTreeRep(),self.right.toBinaryTreeRep())
def __repr__(self):
return "Phrase(%r,%r)"%(self.left,self.right)
def expression(self):
return self.vec.expression()
class RecursiveNN(object):
#class RecursiveNN():
__slots__= ["W","bias","u_score"]
def __init__(self, W_init, bias_init, u_score_init):
assert isinstance(W_init, Var), "W should be instance of `Var`"
assert isinstance(bias_init, Var), "bias should be instance of `Var`"
self.W=W_init
self.bias=bias_init
self.u_score=u_score_init
def combineTwoNodes(self, left,right):
#TODO: there can be too many vec... replace Var to Val?
vec = Var(u'(%s⊕%s)'%(left,right))
vec.val=np.concatenate([left.vec.val,right.vec.val],0)
#TODO: replace raw concat to Concat.
#vec = Concat(left.vec,right.vec)
Wxh=Dot(self.W, vec)
x=Add(Wxh, self.bias)
vec=VSF('tanh', x)
phrase=Phrase(left,right, vec)
return phrase
def score(self, phrase):
return Dot(Transpose(self.u_score),phrase.vec)
def combineToSentence(self, sentence_words):
nodes=list(sentence_words)
assert nodes[0] is sentence_words[0]
def mergeHighestPair(nodes,score):
if len(nodes)==1:
return nodes[0], score
phrases=[self.combineTwoNodes(x,y) for x,y in zip(nodes,nodes[1:])]
scores=[self.score(node).val for node in phrases]
idx_max=
|
np.argmax(scores)
|
numpy.argmax
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:52:00 2020
@author: <NAME>
This script reproduces Figure 2 from Pittman et al., 2021.
It produces a timeseries of New production and Co2 flux for each of the 6 eqpac moorings.
Includes SST, isotherm depth.
Calculates averages during el nino, la nina and neutral
Also calculates thermocline slope.
Requires:
datasets/tao/tao_physics/*
processed/combined_dataset/month_data_exports.nc
processed/flux/landsch_mooring_co2_flux.nc
processed/flux/npp.nc
Produces:
processed/results/means.csv
figs/Figure2_Co2fluxevents+ratio_name+.png
processed/results/enso_mooring_avg.csv
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from carbon_math import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.patches as patches
from matplotlib.ticker import MultipleLocator,AutoMinorLocator
from scipy.stats import linregress
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error
def check_single_bias(truth,model):
bias=((model-truth)/truth)*100
abs_error=abs(bias)
logbias=10**(np.nanmean(np.log10(model)-np.log10(truth)))
medlogbias=10**(np.nanmedian(np.log10(model)-
|
np.log10(truth)
|
numpy.log10
|
#!/usr/bin/env python
# coding: utf-8
# # Water Resource Management Model
#
# **<NAME>, PhD**
#
# This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>.
#
# Original (Matlab) CompEcon file: **demdp01.m**
#
# Running this file requires the Python version of CompEcon. This can be installed with pip by running
#
# !pip install compecon --upgrade
#
# <i>Last updated: 2021-Oct-01</i>
# <hr>
# ## About
#
# Public authority must decide how much water to release from a reservoir so as to maximize benefits derived from agricultural and recreational uses.
#
#
# - States
# - s reservoiur level at beginning of summer
# - Actions
# - x quantity of water released for irrigation
# - Parameters
# - a0,a1 -- producer benefit function parameters
# - b0,b1 -- recreational user benefit function parameters
# - $\mu$ -- mean rainfall
# - $\sigma$ -- rainfall volatility
# - $\delta$ -- discount factor
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from compecon import BasisChebyshev, DPmodel, DPoptions, qnwlogn, demo
import seaborn as sns
import pandas as pd
# ### Model parameters
# In[2]:
a0, a1, b0, b1 = 1, -2, 2, -3
μ, σ, δ = 1.0, 0.2, 0.9
# ### Steady-state
#
# The deterministic steady-state values for this model are
# In[3]:
xstar = 1.0 # action
sstar = 1.0 + (a0*(1-δ)/b0)**(1/b1) # stock
# ### State space
# The state variable is s="Reservoir Level", which we restrict to $s\in[2, 8]$.
#
# Here, we represent it with a Chebyshev basis, with $n=15$ nodes.
# In[4]:
n, smin, smax = 15, 2, 8
basis = BasisChebyshev(n, smin, smax, labels=['Reservoir'])
# ### Continuous state shock distribution
# In[5]:
m = 3 #number of rainfall shocks
e, w = qnwlogn(m, np.log(μ)-σ**2/2,σ**2) # rainfall shocks and proabilities
# ### Action space
# The choice variable x="Irrigation" must be nonnegative.
# In[6]:
def bounds(s, i=None, j=None):
return np.zeros_like(s), 1.0*s
# ### Reward function
# The reward function is
# In[7]:
def reward(s, x, i=None, j=None):
sx = s-x
u = (a0/(1+a1))*x**(1+a1) + (b0/(1+b1))*sx**(1+b1)
ux = a0*x**a1 - b0*sx**b1
uxx = a0*a1*x**(a1-1) + b0*b1*sx**(b1-1)
return u, ux, uxx
# ### State transition function
# Next period, reservoir level wealth will be equal to current level minus irrigation plus random rainfall:
# In[8]:
def transition(s, x, i=None, j=None, in_=None, e=None):
g = s - x + e
gx = -
|
np.ones_like(s)
|
numpy.ones_like
|
from __future__ import print_function, division
import time, os, sys
from datetime import datetime
from cryoio.imagestack import MRCImageStack, CombinedImageStack
from cryoio.ctfstack import CTFStack, CombinedCTFStack
from cryoio.dataset import CryoDataset
opj = os.path.join
from copy import copy, deepcopy
import numpy as np
from shutil import copyfile
from util import BackgroundWorker, Output, OutputStream, Params, format_timedelta, gitutil, FiniteRunningSum
import cryoem
import cryoops
from objectives import eval_objective, SumObjectives
from importancesampler.gaussian import FixedGaussianImportanceSampler
from importancesampler.fisher import FixedFisherImportanceSampler
try:
import cPickle as pickle # python 2
except ImportError:
import pickle # python 3
import socket
from threading import Thread
try:
from Queue import Queue # python 2
except ModuleNotFoundError:
from queue import Queue # python 3
from optimizers.sagd import SAGDStep
from optimizers.sgd import SGDMomentumStep
from cryoio.mrc import writeMRC, readMRC
from symmetry import get_symmetryop
import density
import geometry
# precond should ideally be set to inv(chol(H)) where H is the Hessian
def density2params(M,fM,xtype,grad_transform = False,precond = None):
if xtype == 'real':
if grad_transform:
x0 = M if precond is None else M * precond
else:
x0 = M if precond is None else M / precond
elif xtype == 'complex':
raise NotImplementedError()
if grad_transform:
x0 = fM if precond is None else fM * precond
else:
x0 = fM if precond is None else fM / precond
elif xtype == 'complex_coeff':
raise NotImplementedError()
if grad_transform:
pfM = fM if precond is None else fM * precond
else:
pfM = fM if precond is None else fM / precond
x0 = np.empty((2*fM.size,),dtype=density.real_t)
x0[0:fM.size] = pfM.real.reshape((-1,))
x0[fM.size:] = pfM.imag.reshape((-1,))
elif xtype == 'complex_herm_coeff':
raise NotImplementedError()
assert precond is None, 'Unimplemented'
N = fM.shape[0]
NC = int(N/2) + 1
startFreq = int(1-(N%2))
herm_freqs = fM[0:NC,:,:]
if startFreq:
herm_freqs += np.roll(np.roll(np.roll(fM[::-1, ::-1, ::-1], \
1, axis=0), \
1, axis=1), \
1, axis=2)[0:NC,:,:].conj()
else:
herm_freqs += fM[::-1, ::-1, ::-1][0:NC,:,:].conj()
if not grad_transform:
herm_freqs *= 0.5
x0 = np.empty((2*NC*N**2,),dtype=density.real_t)
x0[0:NC*N**2] = herm_freqs.real.reshape((-1,))
x0[NC*N**2:] = herm_freqs.imag.reshape((-1,))
return x0
def param2density(x,xtype,sz,precond = None):
if xtype == 'real':
M, fM = x.reshape(sz), None
if precond is not None:
M = M * precond
elif xtype == 'complex':
raise NotImplementedError()
M, fM = None, x.reshape(sz)
if precond is not None:
fM = fM * precond
elif xtype == 'complex_coeff':
raise NotImplementedError()
M, fM = None, density.empty_cplx(sz)
fM.real = x[0:fM.size].reshape(sz)
fM.imag = x[fM.size:].reshape(sz)
if precond is not None:
fM *= precond
elif xtype == 'complex_herm_coeff':
raise NotImplementedError()
assert precond is None, 'Unimplemented'
M, fM = None, density.empty_cplx(sz)
N = sz[0]
NC = int(N/2) + 1
startFreq = int(1-(N%2))
zeroFreq = int(N/2)
herm_freqs = np.empty((NC,N,N),dtype=density.complex_t)
herm_freqs.real = x[0:NC*N**2].reshape(herm_freqs.shape)
herm_freqs.imag = x[NC*N**2:].reshape(herm_freqs.shape)
fM[0:NC,:,:] = herm_freqs
if startFreq:
fM[NC:,:,:] = np.roll(np.roll(herm_freqs[startFreq:zeroFreq,:,:][::-1,::-1,::-1].conj(), \
1, axis=1), 1, axis=2)
else:
fM[NC:,:,:] = herm_freqs[startFreq:zeroFreq,:,:][::-1,::-1,::-1].conj()
return M,fM
"""
This class is meant to wrap an objective function and deal with
reducing FFTs while allowing the optimizers to not need to know anything
about the real-space versus fourier space (or whatever) parameterizations.
"""
class ObjectiveWrapper:
def __init__(self,xtype,obj = None,arg_dict = None,precond = None):
self.arg_dict = arg_dict if arg_dict is not None else {}
self.objective = obj
self.xtype = xtype
self.precond = precond
assert xtype in ['real','complex','complex_coeff','complex_herm_coeff']
def require_fspace(self):
return self.xtype in ['complex','complex_coeff','complex_herm_coeff']
def set_objective(self,obj,arg_dict = None):
self.args = arg_dict if arg_dict is not None else {}
self.objective = obj
if self.require_fspace():
assert self.objective.fspace
else:
assert not self.objective.fspace
def get_parameter(self):
return self.x0
def convert_parameter(self,x,comp_real=False,comp_fspace=False):
is_x0 = x is self.x0
if is_x0:
M, fM = self.M0, self.fM0
else:
M, fM = param2density(x, self.xtype, self.M0.shape, \
precond=self.precond)
if comp_real and M is None:
# M = density.fspace_to_real(fM)
M = fM
if comp_fspace and fM is None:
# fM = density.real_to_fspace(M)
fM = M
return M, fM
def set_density(self,M0,fM0):
self.M0 = M0
self.fM0 = fM0
self.x0 = density2params(M0,fM0,self.xtype,precond=self.precond)
return self.x0
def eval_obj(self,x,**kwargs):
M, fM = self.convert_parameter(x)
cargs = copy(self.args)
cargs.update(kwargs)
if cargs.get('compute_gradient',True):
logP,dlogP,outputs = self.objective.eval(M=M, fM=fM,
**cargs)
else:
logP,outputs = self.objective.eval(M=M, fM=fM,
**cargs)
return logP,outputs
if self.xtype in ['complex_coeff','complex_herm_coeff'] :
raise NotImplementedError()
if cargs.get('all_grads',False):
new_dlogPs = []
for adlogP in outputs['all_dlogPs']:
new_dlogP = density2params(None,adlogP.reshape(fM.shape), \
self.xtype,grad_transform=True, \
precond=self.precond)
new_dlogPs.append(new_dlogP)
outputs['all_dlogPs'] = new_dlogPs
dlogP = density2params(None,dlogP.reshape(fM.shape),self.xtype, \
grad_transform=True,precond=self.precond)
return logP,dlogP.reshape(x.shape),outputs
class CryoOptimizer(BackgroundWorker):
def outputbatchinfo(self,batch,res,logP,prefix,name):
diag = {}
stat = {}
like = {}
N_M = batch['N_M']
cepoch = self.cryodata.get_epoch(frac=True)
epoch = self.cryodata.get_epoch()
num_data = self.cryodata.N_D_Train
# sigma = np.sqrt(np.mean(res['Evar_like']))
sigma = np.sqrt(max(0,
|
np.mean(res['Evar_like'])
|
numpy.mean
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import pytest
import numpy as np
from itertools import product
from celerite import terms
__all__ = ["test_product", "test_jacobian"]
def test_product(seed=42):
np.random.seed(seed)
t = np.sort(np.random.uniform(0, 5, 100))
tau = t[:, None] - t[None, :]
k1 = terms.RealTerm(log_a=0.1, log_c=0.5)
k2 = terms.ComplexTerm(0.2, -3.0, 0.5, 0.01)
k3 = terms.SHOTerm(1.0, 0.2, 3.0)
K1 = k1.get_value(tau)
K2 = k2.get_value(tau)
K3 = k3.get_value(tau)
assert np.allclose((k1 + k2).get_value(tau), K1 + K2)
assert np.allclose((k3 + k2).get_value(tau), K3 + K2)
assert np.allclose((k1 + k2 + k3).get_value(tau), K1 + K2 + K3)
for (a, b), (A, B) in zip(product((k1, k2, k3, k1+k2, k1+k3, k2+k3),
(k1, k2, k3)),
product((K1, K2, K3, K1+K2, K1+K3, K2+K3),
(K1, K2, K3))):
assert np.allclose((a * b).get_value(tau), A*B)
def test_bounds(seed=42):
bounds = [(-1.0, 0.3), (-2.0, 5.0)]
kernel = terms.RealTerm(log_a=0.1, log_c=0.5, bounds=bounds)
b0 = kernel.get_parameter_bounds()
assert all(np.allclose(a, b) for a, b in zip(b0, bounds))
kernel = terms.RealTerm(log_a=0.1, log_c=0.5,
bounds=dict(zip(["log_a", "log_c"], bounds)))
assert all(np.allclose(a, b)
for a, b in zip(b0, kernel.get_parameter_bounds()))
@pytest.mark.parametrize(
"k",
[
terms.RealTerm(log_a=0.1, log_c=0.5),
terms.RealTerm(log_a=0.1, log_c=0.5) +
terms.RealTerm(log_a=-0.1, log_c=0.7),
terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1),
terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
terms.RealTerm(log_a=0.1, log_c=0.4),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
terms.RealTerm(log_a=0.1, log_c=0.4),
]
)
def test_jacobian(k, eps=1.34e-7):
if not terms.HAS_AUTOGRAD:
with pytest.raises(ImportError):
jac = k.get_coeffs_jacobian()
return
v = k.get_parameter_vector()
c =
|
np.concatenate(k.coefficients)
|
numpy.concatenate
|
"Unit tests of larry."
import datetime
import unittest
import numpy as np
nan = np.nan
from numpy.testing import (assert_, assert_almost_equal, assert_raises,
assert_equal)
import la
from la import larry
from la.util.testing import printfail, noreference
from la.util.testing import assert_larry_equal as ale
class Test_init(unittest.TestCase):
"Test init of larry class"
def setUp(self):
self.list = [[1,2],[3,4]]
self.tuple = ((1,2),(3,4))
self.matrix = np.matrix([[1,2],[3,4]])
self.array = np.array([[1,2],[3,4]])
self.label = [[0,1],[0,1]]
def test_init_list(self):
"larry.__init__list"
p = larry(self.list)
t = self.array
msg = printfail(t, p.x, 'x')
self.assert_((p.x == t).all(), msg)
self.assert_(self.label == p.label,
printfail(self.label, p.label, 'label'))
def test_init_tuple(self):
"larry.__init__tuple"
p = larry(self.tuple)
t = self.array
msg = printfail(t, p.x, 'x')
self.assert_((p.x == t).all(), msg)
self.assert_(self.label == p.label,
printfail(self.label, p.label, 'label'))
def test_init_matrix(self):
"larry.__init__matrix"
p = larry(self.matrix)
t = self.array
msg = printfail(t, p.x, 'x')
self.assert_((p.x == t).all(), msg)
self.assert_(self.label == p.label,
printfail(self.label, p.label, 'label'))
def test_init_array(self):
"larry.__init__array"
p = larry(self.array)
t = self.array
msg = printfail(t, p.x, 'x')
self.assert_((p.x == t).all(), msg)
self.assert_(self.label == p.label,
printfail(self.label, p.label, 'label'))
class Test_unary(unittest.TestCase):
"Test unary functions of larry class"
def setUp(self):
self.tol = 1e-8
self.nancode = -9999
self.x = np.array([[ 1.0, 1.0],
[ 1.0, 1.0],
[ 1.0, 1.0]])
self.l = larry(self.x)
self.x2 = np.array([1.0, 1.0])
self.l2 = larry(self.x2)
self.x3 = np.random.rand(2,3,4)
self.l3 = larry(self.x3)
self.l4 = larry([ nan, 0.0, np.inf, -10.0, -np.inf])
def test_log_1(self):
"larry.log_1"
d = larry([[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0]])
ale(self.l.log(), d, 'log_1', original=self.l)
def test_log_2(self):
"larry.log_2"
t = np.log(self.x3)
t = larry(t)
p = self.l3.log()
msg = printfail(t, p, 'larry')
t[np.isnan(t.x)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
self.assert_(noreference(p, self.l3), 'Reference found')
self.assert_(noreference(p, t), 'Reference found')
def test_log_3(self):
"larry.log_3"
t = np.array([ 0.0, 0.0])
p = self.l2.log()
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l2), 'Reference found')
def test_exp(self):
"larry.exp"
t = np.array([[ 1.0, 1.0],
[ 1.0, 1.0],
[ 1.0, 1.0]])
t = np.e * t
p = self.l.exp()
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1,2], [0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l), 'Reference found')
def test_sqrt(self):
"larry.sqrt"
t = np.array([[ 1.0, 1.0],
[ 1.0, 1.0],
[ 1.0, 1.0]])
p = self.l.sqrt()
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1,2], [0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l), 'Reference found')
def test_sign(self):
"larry.sign"
t = np.array([[ 1.0, 1.0],
[ 1.0, 1.0],
[ 1.0, 1.0]])
p = self.l.sign()
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1,2], [0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l), 'Reference found')
def test_power(self):
"larry.power"
t = np.array([[ 1.0, 1.0],
[ 1.0, 1.0],
[ 1.0, 1.0]])
p = self.l.power(2)
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1,2], [0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l), 'Reference found')
def test___pow__(self):
"larry.__pow__"
t = np.array([[ 1.0, 1.0],
[ 1.0, 1.0],
[ 1.0, 1.0]])
p = self.l**2
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1,2], [0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l), 'Reference found')
def test_cumsum_1(self):
"larry.cumsum_1"
t = np.array([[ 1.0, 1.0],
[ 2.0, 2.0],
[ 3.0, 3.0]])
p = self.l.cumsum(0)
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1,2], [0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l), 'Reference found')
def test_cumsum_2(self):
"larry.cumsum_2"
t = np.array([[ 1.0, 2.0],
[ 1.0, 2.0],
[ 1.0, 2.0]])
p = self.l.cumsum(1)
msg = printfail(t, p.x, 'x')
t[np.isnan(t)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
label = [[0,1,2], [0,1]]
self.assert_(label == p.label, printfail(label, p.label, 'label'))
self.assert_(noreference(p, self.l), 'Reference found')
def test_cumsum_3(self):
"larry.cumsum_3"
t = np.cumsum(self.x3, 0)
t = larry(t)
p = self.l3.cumsum(0)
msg = printfail(t, p, 'larry')
t[np.isnan(t.x)] = self.nancode
p[np.isnan(p.x)] = self.nancode
self.assert_((abs(t - p) < self.tol).all(), msg)
self.assert_(noreference(p, self.l3), 'Reference found')
self.assert_(noreference(p, t), 'Reference found')
def test_cumsum_4(self):
"larry.cumsum_4"
t = np.array([ 1.0, 2.0])
p = self.l2.cumsum(0)
msg = printfail(t, p.x, 'x')
t[
|
np.isnan(t)
|
numpy.isnan
|
"""
.. moduleauthor:: <NAME> <<EMAIL>>
To place everything about estimating the parameters of an ode model
under square loss in one single module. Focus on the standard local
method which means obtaining the gradient and Hessian.
"""
#__all__ = [] # don't really want to export this
import copy
import functools
from numbers import Number
import numpy as np
import scipy.sparse
from scipy.interpolate import LSQUnivariateSpline
from scipy.optimize import minimize
from pygom.loss.loss_type import Square
from pygom.model import ode_utils
from pygom.model._model_errors import InputError
from pygom.model.ode_variable import ODEVariable
# Corrections to base_loss.py for initial condition fitting via SquareLoss function by use of target_state optional paramters
# Performed by <NAME> 5.6.2020 Chemelion Inc.
# The problem is an error in the target_state index construction, which could be corrected using a list extend instead of append operation
# The debugging and correction are all commented with lines containing the string 'JSM'
JSMDEBUG = False # to debug the problem above with print statements
# JSMDEBUG = True
class BaseLoss(object):
"""
This contains the base that stores all the information of an ode.
Parameters
----------
theta: array like
input value of the parameters
ode: :class:`DeterministicOde`
the ode class in this package
x0: numeric
initial time
t0: numeric
initial value
t: array like
time points where observations were made
y: array like
observations
state_name: str
the state which the observations came from
state_weight: array like
weight for the observations
target_param: str or array like
parameters that are not fixed
target_state: str or array like
states that are not fixed, applicable only when the initial
values are also of interest
"""
def __init__(self, theta, ode,
x0, t0,
t, y,
state_name, state_weight=None,
target_param=None, target_state=None):
### Execute all the checks first
# conversion into np
t = ode_utils.check_array_type(t)
y = ode_utils.check_array_type(y)
if state_weight is None:
state_weight = 1.0
if len(y) == y.size:
y = y.flatten()
n, p = len(y), 1
else:
n, p = y.shape
if JSMDEBUG:
print('JSM debug init class base_loss')
assert len(t) == n, "Number of observations and time must be equal"
## TODO: think about whether this should be a copy
## there are pros and cons with referencing or copy
## if we copy, we isolate the ode so we can make a
## distributed/parallel estimate but it is easier to diagnose problems
## when we don't copy and also make use of the plotting methods
## because the parameters are now basically just a pointer that is
## continuously updated
self._ode = ode
# We are making a shitty check here because I screwed up (sort of)
# Should have been a base class where we do not have the target_param
# and target_state and another class extending it. The only problem of
# that is the lost of ability to make faster calculation, which is not
# even possible now because of how OperateOdeModel works. Ideally,
# OperateOdeModel will take in the target_param in a way so that the
# gradient information is only computed on those targeted instead of
# computing the full vector before extracting the relevant elements.
# Basically, it will require a lot of work to make things sync and
# that is too much effort and time which I do not have
if self._ode.parameters is None:
if self._ode.num_param != 0:
# note that this is necessary because we want to make sure that
# it is possible to only estimate part of the full parameter set
raise RuntimeError("Set the parameters of the ode first")
else:
try:
self._ode.initial_values = (x0, t0)
solution = self._ode.integrate2(t)
except Exception as e:
# print(e)
if t0 == t[1]:
raise InputError("First time point t[1] is equal to t0")
else:
raise InputError("ode not initialized properly or " +
"unable to integrate using the initial " +
"values provided")
# Information
self._num_param = self._ode.num_param
self._num_state = self._ode.num_state
### We wish to know whether we are dealing with a multiobjective problem
# decide whether we are working on a restricted set
# the target parameters
if target_param is None:
self._targetParam = None
else:
self._targetParam = ode_utils.str_or_list(target_param)
if target_state is None:
self._targetState = None
else:
self._targetState = ode_utils.str_or_list(target_state)
if JSMDEBUG:
print('JSM debug: _tartetState',self._targetState)
# check stuff
# if you are trying to go through this, I apologize
if state_name is None:
# then if
if solution.shape[1] == p:
state_name = [str(i) for i in self._ode._iterStateList()]
self._setWeight(n, p, state_weight)
else:
raise InputError("Expecting the name of states " +
"for the observations")
elif isinstance(state_name, (str, list, tuple)):
if isinstance(state_name, str):
state_name = [state_name]
assert p == len(state_name), "len(state_name) and len(y[0]) not equal"
self._setWeight(n, p, state_weight)
else:
raise InputError("State name should be str or of type list/tuple")
# if self._stateWeight is not None:
if np.any(self._stateWeight <= 0):
raise InputError("Weights should be strictly positive")
# finish ordering information
# now get the index of target states
self._stateName = state_name
self._stateIndex = self._ode.get_state_index(self._stateName)
# finish
### now we set the scene
# making sure that our arrays are actually arrays
# parameters
self._setParam(theta)
self._setX0(x0)
self._y = y
self._t0 = t0
# but the observed array t does not include the initial value
# so we first check the type
self._observeT = t.copy()
# and insert the initial value
self._t = np.insert(t, 0, t0)
# and length
self._numTime = len(self._t)
# interpolating information
self._interpolateTime = None
self._interpolateTimeIndex = None
# TODO: optimal weight in terms of Pareto front from a
# multiobjective optimization perspective
self._lossObj = self._setLossType()
# final check
if self._t is None or self._y is None or self._stateName is None:
raise InputError("Error without data currently not implemented")
def _get_model_str(self):
if isinstance(self._theta, dict):
_theta = list(self._theta.values())
else:
_theta = self._theta.tolist()
model_str = "(%s, %s, %s, %s, %s, %s, %s" % (_theta,
self._ode,
self._x0.tolist(),
self._t0,
self._observeT.tolist(),
self._y.tolist(),
self._stateName)
if self._stateWeight is not None:
model_str += ", %s" % self._stateWeight.tolist()
if self._targetParam is not None:
model_str += ", %s" % self._targetParam
if self._targetState is not None:
model_str += ", %s" % self._targetState
return model_str + ")"
############################################################
#
# Gradient operators
#
############################################################
def gradient(self, theta=None, full_output=False):
"""
Returns the gradient calculated by solving the forward sensitivity
equation. Identical to :meth:`sensitivity` without the choice of
integration method
See Also
--------
:meth:`sensitivity`
"""
return self.sensitivity(theta, full_output)
def adjoint(self, theta=None, full_output=False):
"""
Obtain the gradient given input parameters using the adjoint method.
Values of state variable are found using an univariate spline
interpolation between two observed time points where the internal
knots are explicitly defined.
Parameters
----------
theta: array like
input value of the parameters
full_output: bool
if True, also output the full set of adjoint values (over time)
Returns
-------
grad: :class:`numpy.ndarray`
array of gradient
infodict : dict, only returned if full_output=True
Dictionary containing additional output information
================= =================================================
key meaning
================= =================================================
'resid' residuals given theta
'diff_loss' derivative of the loss function
'gradVec' gradient vectors
'adjVec' adjoint vectors
'interpolateInfo' info from integration over the interpolating
points
'solInterpolate' solution from the integration over the
interpolating points
'tInterpolate' interpolating time points
================= =================================================
See also
--------
:meth:`sensitivity`
"""
if theta is not None:
self._setParam(theta)
self._ode.parameters = self._theta
if self._interpolateTime is None:
self._setupInterpolationTime()
# integrate forward using the extra time points
f = ode_utils.integrateFuncJac
s_and_i = f(self._ode.ode_T,
self._ode.jacobian_T,
self._x0,
self._interpolateTime[0],
self._interpolateTime[1::],
includeOrigin=True,
full_output=full_output,
method=self._ode._intName)
if full_output:
sol = s_and_i[0]
out = s_and_i[1]
else:
sol = s_and_i
# holder, assuming that the index/order is kept (and correct) in
# the list we perform our interpolation per state and only need
# the functional form
interpolate_list = list()
for j in range(self._num_state):
spl = LSQUnivariateSpline(self._interpolateTime.tolist(),
sol[:, j],
self._t[1:-1])
interpolate_list.append(copy.deepcopy(spl))
# find the derivative of the loss function. they act as events
# which are the correction to the gradient function through time
solution = sol[self._interpolateTimeIndex, :]
if full_output:
g, info_dict = self._adjointGivenInterpolation(solution,
interpolate_list,
self._ode._intName,
full_output)
info_dict['interpolateInfo'] = out
info_dict['solInterpolate'] = sol
return g, info_dict
else:
return self._adjointGivenInterpolation(solution, interpolate_list,
self._ode._intName,
full_output)
def _setupInterpolationTime(self):
"""
Increase the number of output time points by putting in equally
space points between two original time step
"""
interpolate_time = np.array([self._t[0]])
interpolate_index = list()
num_time = len(self._t)
for i in range(num_time - 1):
tTemp = np.linspace(self._t[i], self._t[i+1], 20)[1::]
interpolate_time = np.append(interpolate_time, tTemp)
interpolate_index += [len(interpolate_time) - 1]
self._interpolateTime = interpolate_time
self._interpolateTimeIndex = interpolate_index
def _adjointGivenInterpolation(self, solution, interpolateList,
method, full_output=False):
"""
Given an interpolation of the solution of an IVP (for each state).
Compute the gradient via the adjoint method by a backward integration
"""
# find the derivative of the loss function. they act as events
# which are the correction to the gradient function through time
diff_loss = self._lossObj.diff_loss(solution[:,self._stateIndex])
num_diff_loss = len(diff_loss)
# finding the step size in reverse time
diff_t = np.diff(self._t)
# holders. for in place insertion
lambda_temp = np.zeros(self._num_state)
grad_list = list()
ga = grad_list.append
# the last gradient value.
lambda_temp[self._stateIndex] += diff_loss[-1]
ga(np.dot(self._ode.grad(solution[-1], self._t[-1]).T,
-lambda_temp)*-diff_t[-1])
# holders if we want extra shit
if full_output:
adj_vec_list = list()
adj_vec_list.append(lambda_temp)
# integration in reverse time even though our index is going forward
f = ode_utils.integrateFuncJac
for i in range(1, num_diff_loss):
# integration between two intermediate part
# start and the end points in time
tTemp = [self._t[-i-1], self._t[-i]]
lambda_temp[:] = f(self._ode.adjoint_interpolate_T,
self._ode.adjoint_interpolate_jacobian_T,
lambda_temp, tTemp[1], tTemp[0],
args=(interpolateList,),
method=method).ravel()
# and correction due to the "event" i.e. observed value
lambda_temp[self._stateIndex] += diff_loss[-i-1]
# evaluate the gradient at the observed point after the correction
ga(np.dot(self._ode.grad(solution[-i-1], tTemp[0]).T,
-lambda_temp)*-diff_t[-i-1])
if full_output:
adj_vec_list.append(lambda_temp)
# the total gradient.
grad = np.array(grad_list).sum(0)
if full_output:
# binding the dictionaries together
infoDict = dict()
infoDict['resid'] = self._lossObj.residual(solution[:,self._stateIndex])
infoDict['diff_loss'] = diff_loss
infoDict['gradVec'] = np.array(grad_list)
infoDict['adjVec'] = np.array(adj_vec_list)
infoDict['tInterpolate'] = self._interpolateTime
return grad[self._getTargetParamIndex()], infoDict
else:
return grad[self._getTargetParamIndex()]
def sensitivity(self, theta=None, full_output=False, method=None):
"""
Obtain the gradient given input parameters using forward
sensitivity method.
Parameters
----------
theta: array like
input value of the parameters
full_output: bool
if additional output is required
method: str, optional
what method to use in the integrator
Returns
-------
grad: :class:`numpy.ndarray`
array of gradient
infodict : dict, only returned if full_output=True
Dictionary containing additional output information. Same output
as :meth:`jac`
Notes
-----
It calculates the gradient by calling :meth:`jac`
"""
if full_output:
_jac, output = self.jac(theta=theta, full_output=True, method=method)
sens = output['sens']
diff_loss = output['diff_loss']
grad = self._sensToGradWithoutIndex(sens, diff_loss)
output['JTJ'] = self._sensToJTJWithoutIndex(sens)
return grad, output
else:
_jac, sens = self.jac(theta=theta, sens_output=True, full_output=False, method=method)
i = self._stateIndex
diff_loss = self._lossObj.diff_loss(sens[:,i])
grad = self._sensToGradWithoutIndex(sens, diff_loss)
return grad
def jac(self, theta=None, sens_output=False, full_output=False, method=None):
"""
Obtain the Jacobian of the objective function given input parameters
using forward sensitivity method.
Parameters
----------
theta: array like, optional
input value of the parameters
sens_output: bool, optional
whether the full sensitivities is required; full_output overrides this
option when true
full_output: bool, optional
if additional output is required
method: str, optional
Choice between lsoda, vode and dopri5, the three integrator
provided by scipy. Defaults to lsoda.
Returns
-------
grad: :class:`numpy.ndarray`
Jacobian of the objective function
infodict : dict, only returned if full_output=True
Dictionary containing additional output information
=========== =======================================================
key meaning
=========== =======================================================
'sens' intermediate values over the original ode and all the
sensitivities, by state, parameters
'resid' residuals given theta
'diff_loss' derivative of the loss function
=========== =======================================================
See also
--------
:meth:`sensitivity`
"""
if theta is not None:
self._setParam(theta)
self._ode.parameters = self._theta
if method is None:
method = self._ode._intName
# first we want to find out the number of sensitivities required
# add them to the initial values
num_sens = self._num_state*self._num_param
init_state_sens = np.append(self._x0, np.zeros(num_sens))
f = ode_utils.integrateFuncJac
index_out = self._getTargetParamSensIndex()
if full_output:
s_sens = f(self._ode.ode_and_sensitivity_T,
self._ode.ode_and_sensitivity_jacobian_T,
init_state_sens,
self._t[0], self._t[1::],
full_output=full_output,
method=method)
sol_sens = s_sens[0]
sol_out = s_sens[1]
output = dict()
i = self._stateIndex
output['resid'] = self._lossObj.residual(sol_sens[:, i])
output['diff_loss'] = self._lossObj.diff_loss(sol_sens[:, i])
output['sens'] = sol_sens
for i in sol_out:
output[i] = sol_out[i]
return sol_sens[:,index_out], output
else:
sol_sens = f(self._ode.ode_and_sensitivity_T,
self._ode.ode_and_sensitivity_jacobian_T,
init_state_sens,
self._t[0], self._t[1::],
method=method)
if sens_output:
return sol_sens[:, index_out], sol_sens
else:
return sol_sens[:,index_out]
############################################################
#
# Operators for Gradient with initial value
#
############################################################
def sensitivityIV(self, theta=None, full_output=False, method=None):
"""
Obtain the gradient given input parameters (which include the current
guess of the initial conditions) using forward sensitivity method.
Parameters
----------
theta: array like, optional
input value of the parameters
full_output: bool, optional
if additional output is required
method: str, optional
what method to use in the integrator
Returns
-------
grad: :class:`numpy.ndarray`
array of gradient
infodict : dict, only returned if full_output=True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'sens' intermediate values over the original ode and all the
sensitivities, by state, parameters
'resid' residuals given theta
'info' output from the integration
======= ============================================================
Notes
-----
It calculates the gradient by calling :meth:`jacIV`
"""
if full_output:
_jac_iv, output_iv = self.jacIV(theta=theta,
full_output=True,
method=method)
# the most important information! and in fact all the information
# we need to calculate the gradient
diff_loss = output_iv['diff_loss']
sens = output_iv['sens']
grad = self._sensToGradWithoutIndex(sens, diff_loss)
grad_iv = self._sensToGradIVWithoutIndex(sens, diff_loss)
grad = np.append(grad, grad_iv)
return grad, output_iv
else:
_sol_iv, sens = self.jacIV(theta=theta,
sens_output=True,
full_output=False,
method=method)
i = self._stateIndex
diff_loss = self._lossObj.diff_loss(sens[:, i])
# grad for parameters and the initial values. Then join the two
grad = self._sensToGradWithoutIndex(sens, diff_loss)
grad_iv = self._sensToGradIVWithoutIndex(sens, diff_loss)
grad = np.append(grad, grad_iv)
return grad
def jacIV(self, theta=None, sens_output=False, full_output=False, method=None):
"""
Obtain the Jacobian of the objective function given input parameters
which include the current guess of the initial value using forward
sensitivity method.
Parameters
----------
theta: array like, optional
input value of the parameters
sens_output: bool, optional
whether the full sensitivities is required; full_output overrides this
option when true
full_output: bool, optional
if additional output is required
method: str, optional
Choice between lsoda, vode and dopri5, the three integrator
provided by scipy. Defaults to lsoda
Returns
-------
grad: :class:`numpy.ndarray`
Jacobian of the objective function
infodict : dict, only returned if full_output=True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'sens' intermediate values over the original ode and all the
sensitivities, by state, parameters
'resid' residuals given theta
'info' output from the integration
======= ============================================================
See also
--------
:meth:`sensitivityIV`
"""
if theta is not None:
self._setParamStateInput(theta)
self._ode.parameters = self._theta
if method is None:
method = self._ode._intName
# first we want to find out the number of sensitivities required
num_sens = self._num_state*self._num_param
# add them to the initial values
initial_state_sens = np.append(np.append(self._x0, np.zeros(num_sens)),
np.eye(self._num_state).flatten())
f = ode_utils.integrateFuncJac
# build the indexes to locate the correct parameters
index1 = self._getTargetParamSensIndex()
index2 = self._getTargetStateSensIndex()
index_out = index1 + index2
if full_output:
s_iv = f(self._ode.ode_and_sensitivityIV_T,
self._ode.ode_and_sensitivityIV_jacobian_T,
initial_state_sens,
self._t[0], self._t[1::],
full_output=full_output,
method=method)
sol_iv = s_iv[0]
output_iv = s_iv[1]
output = dict()
i = self._stateIndex
output['resid'] = self._lossObj.residual(sol_iv[:, i])
output['diff_loss'] = self._lossObj.diff_loss(sol_iv[:, i])
output['sens'] = sol_iv
for i in output_iv:
output[i] = output_iv[i]
return sol_iv[:, index_out], output
else:
sol_iv = f(self._ode.ode_and_sensitivityIV_T,
self._ode.ode_and_sensitivityIV_jacobian_T,
initial_state_sens,
self._t[0], self._t[1::],
method=method)
if sens_output:
return sol_iv[:, index_out], sol_iv
else:
return sol_iv[:, index_out]
############################################################
#
# Operators for Hessian from ode
#
############################################################
def hessian(self, theta=None, full_output=False, method=None):
"""
Obtain the Hessian using the forward forward sensitivities.
Parameters
----------
theta: array like
input value of the parameters
full_output: bool
if additional output is required
method: str, optional
what method to use in the integrator
Returns
-------
Hessian: :class:`numpy.ndarray`
Hessian of the objective function
infodict : dict, only returned if full_output=True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'state' intermediate values for the state (original ode)
'sens' intermediate values for the sensitivities by state,
parameters, i.e. :math:`x_{(i-1)p + j}` is the element for
state :math:`i` and parameter :math:`j` with a total of
:math:`p` parameters
'hess' intermediate values for the hessian by state, parameter,
parameter, i.e. :math:`x_{(i-1)p^{2} + j + k}` is the
element for state :math:`i`, parameter :math:`j` and
parameter :math:`k`
'resid' residuals given theta
'info' output from the integration
======= ============================================================
See also
--------
:meth:`sensitivity`
"""
if theta is not None:
self._setParam(theta)
self._ode.parameters = self._theta
if method is None:
method = self._ode._intName
nS = self._num_state
nP = self._num_param
num_time = len(self._t)
# first we want to find out the number of initial values required to
# fill the initial conditins
num_sens = nS*nP
num_ff = nS*nP*nP
initial_state_sens = np.append(self._x0, np.zeros(num_sens + num_ff))
f = ode_utils.integrateFuncJac
s_out_all = f(self._ode.ode_and_forwardforward_T,
self._ode.ode_and_forwardforward_jacobian_T,
initial_state_sens,
self._t[0], self._t[1::],
full_output=full_output,
method=method)
if full_output:
solution_all = s_out_all[0]
solution_output = s_out_all[1]
else:
solution_all = s_out_all
# the starting index for which the forward forward sensitivities
# are stored
base_index_hess = nS + nS*nP
diff_loss = self._lossObj.diff_loss(solution_all[:,self._stateIndex])
H = np.zeros((nP, nP))
for i in range(num_time - 1):
FF = ode_utils.vecToMatFF(solution_all[i,base_index_hess::], nS, nP)
E = np.zeros(nS)
E[self._stateIndex] += -diff_loss[i]
H += scipy.sparse.kron(E, scipy.sparse.eye(nP)).dot(FF)
# just the J^{\top}J part of the Hessian (which is guarantee to be PSD)
# full Hessian with the outer product gradient
param_idx = self._getTargetParamIndex()
HJTJ = H[param_idx][:, param_idx].copy()
JTJ = self._sensToJTJWithoutIndex(solution_all)
HJTJ += 2*JTJ
if full_output:
sens_idx = self._getTargetParamSensIndex()
output = dict()
i = self._stateIndex
output['resid'] = self._lossObj.residual(solution_all[:, i])
output['grad'] = self._sensToGradWithoutIndex(solution_all, diff_loss)
output['state'] = solution_all[:, nS:(nS*(nP+1))]
output['sens'] = solution_all[:, sens_idx]
output['hess'] = solution_all[:, base_index_hess::]
output['info'] = solution_output
output['H'] = H
output['JTJ'] = JTJ
return HJTJ, output
else:
return HJTJ
def jtj(self, theta=None, full_output=False, method=None):
"""
Obtain the approximation to the Hessian using the inner
product of the Jacobian.
Parameters
----------
theta: array like
input value of the parameters
full_output: bool
if additional output is required
method: str, optional
what method to use in the integrator
Returns
-------
jtj: :class:`numpy.ndarray`
:math:`J^{\\top}J` of the objective function
infodict : dict, only returned if full_output=True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'state' intermediate values for the state (original ode)
'sens' intermediate values for the sensitivities by state,
parameters, i.e. :math:`x_{(i-1)p + j}` is the element for
state :math:`i` and parameter :math:`j` with a total of
:math:`p` parameters
'resid' residuals given theta
'info' output from the integration
======= ============================================================
See also
--------
:meth:`sensitivity`
"""
_jac, output = self.jac(theta=theta, full_output=True, method=method)
sens = output['sens']
JTJ = self._sensToJTJWithoutIndex(sens)
if full_output:
diff_loss = output['diff_loss']
output['grad'] = self._sensToGradWithoutIndex(sens, diff_loss)
return JTJ, output
else:
return JTJ
def fisher_information(self, theta=None, full_output=False, method=None):
"""
Obtain the Fisher information
Parameters
----------
theta: array like
input value of the parameters
full_output: bool
if additional output is required
method: str, optional
what method to use in the integrator
Returns
-------
I: :class:`numpy.ndarray`
:math:`I(\\theta)` of the objective function
infodict : dict, only returned if full_output=True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'state' intermediate values for the state (original ode)
'sens' intermediate values for the sensitivities by state,
parameters, i.e. :math:`x_{(i-1)p + j}` is the element for
state :math:`i` and parameter :math:`j` with a total of
:math:`p` parameters
'resid' residuals given theta
'info' output from the integration
======= ============================================================
See also
--------
:meth:`sensitivity`, :meth:`jtj`
"""
_jac, output = self.jac(theta=theta, full_output=True, method=method)
sens = output['sens']
JTJ = self._sensToJTJWithoutIndex(sens, output['resid'])
if full_output:
sens = output['sens']
diffLoss = output['diff_loss']
output['grad'] = self._sensToGradWithoutIndex(sens, diffLoss)
return JTJ, output
else:
return JTJ
############################################################
#
# Other stuff related to the objective function
#
############################################################
def cost(self, theta=None):
"""
Find the cost/loss given time points and the corresponding
observations.
Parameters
----------
theta: array like
input value of the parameters
Returns
-------
numeric
sum of the residuals squared
Notes
-----
Only works with a single target (state)
See also
--------
:meth:`diff_loss`
"""
yhat = self._getSolution(theta)
c = self._lossObj.loss(yhat)
return np.nan_to_num(c) if c == np.inf else c
def diff_loss(self, theta=None):
"""
Find the derivative of the loss function given time points
and the corresponding observations, with initial conditions
Parameters
----------
theta: array like
input value of the parameters
Returns
-------
:class:`numpy.ndarray`
an array of residuals
See also
--------
:meth:`cost`
"""
try:
# the solution does not include the origin
solution = self._getSolution(theta)
return self._lossObj.diff_loss(solution)
except Exception as e:
# print(e)
# print("parameters = " +str(theta))
return np.nan_to_num((np.ones(self._y.shape)*np.inf))
def residual(self, theta=None):
"""
Find the residuals given time points and the corresponding
observations, with initial conditions
Parameters
----------
theta: array like
input value of the parameters
Returns
-------
:class:`numpy.ndarray`
an array of residuals
Notes
-----
Makes a direct call to initialized loss object which has a
method called residual
See also
--------
:meth:`cost`
"""
try:
# the solution does not include the origin
solution = self._getSolution(theta)
return self._lossObj.residual(solution)
except Exception as e:
# print(e)
return np.nan_to_num((np.ones(self._y.shape)*np.inf))
############################################################
#
# Other crap where initial values are also parameters
#
############################################################
def costIV(self, theta=None):
"""
Find the cost/loss given the parameters. The input theta
here is assumed to include both the parameters as well as the
initial values
Parameters
----------
theta: array like
parameters and guess of initial values of the states
Returns
-------
numeric
sum of the residuals squared
See also
--------
:meth:`residualIV`
"""
if theta is not None:
self._setParamStateInput(theta)
solution = self._getSolution()
return self._lossObj.loss(solution)
def diff_lossIV(self, theta=None):
"""
Find the derivative of the loss function w.r.t. the parameters
given time points and the corresponding observations, with
initial conditions.
Parameters
----------
theta: array like
parameters and initial values of the states
Returns
-------
:class:`numpy.ndarray`
an array of result
See also
--------
:meth:`costIV`, :meth:`diff_loss`
"""
if theta is not None:
self._setParamStateInput(theta)
try:
# the solution does not include the origin
solution = self._getSolution()
return self._lossObj.diff_loss(solution)
except Exception as e:
# print(e)
# print("parameters = " + str(theta))
return np.nan_to_num((np.ones(self._y.shape)*np.inf))
def residualIV(self, theta=None):
"""
Find the residuals given time points and the corresponding
observations, with initial conditions.
Parameters
----------
theta: array like
parameters and initial values of the states
Returns
-------
:class:`numpy.ndarray`
an array of residuals
Notes
-----
Makes a direct call to :meth:`residual` using the
initialized information
See also
--------
:meth:`costIV`, :meth:`residual`
"""
if theta is not None:
self._setParamStateInput(theta)
try:
# the solution does not include the origin
solution = self._getSolution()
return self._lossObj.residual(solution)
except Exception as e:
# print(e)
return np.nan_to_num((np.ones(self._y.shape)*np.inf))
############################################################
#
# Commonly used routines in our code that are now functions
#
############################################################
def sens_to_grad(self, sens, diff_loss):
"""
Forward sensitivites to the gradient.
Parameters
----------
sens: :class:`numpy.ndarray`
forward sensitivities
diff_loss: array like
derivative of the loss function
Returns
-------
g: :class:`numpy.ndarray`
gradient of the loss function
"""
# the number of states which we will have residuals for
num_s = len(self._stateName)
assert isinstance(sens, np.ndarray), "Expecting an np.ndarray"
n, p = sens.shape
assert n == len(diff_loss), ("Length of sensitivity must equal to " +
"the derivative of the loss function")
# Divide through to obtain the number of parameters we are inferring
num_out = int(p/num_s) # number of out parameters
sens = np.reshape(sens, (n, num_s, num_out), 'F')
for j in range(num_out):
sens[:, :, j] *= self._stateWeight
grad = functools.reduce(np.add,map(np.dot, diff_loss, sens)).ravel()
return grad
def sens_to_jtj(self, sens, resid=None):
"""
forward sensitivites to :math:`J^{\\top}J` where :math:`J` is the
Jacobian. The approximation to the Hessian.
Parameters
----------
sens: :class:`numpy.ndarray`
forward sensitivities
resid: :class:`numpy.ndarray`, optional
the residuals corresponding to the input sens
Returns
-------
JTJ: :class:`numpy.ndarray`
An approximation to the Hessian using the inner product
of the Jacobian
"""
assert isinstance(sens, np.ndarray), "Expecting an np.ndarray"
# the number of states which we will have residuals for
num_s = len(self._stateName)
n, p = sens.shape
# obviously divide through to find out the number of parameters
# we are inferring
num_out = int(p/num_s)
# define our holder accordingly
J = np.zeros((num_out, num_out))
# s = np.zeros((numS, numOut))
sens = np.reshape(sens, (n, num_s, num_out), 'F')
for j in range(num_out):
sens[:,:,j] *= self._stateWeight
for i, s in enumerate(sens):
if resid is None:
J += np.dot(s.T, s)
else:
s1 = s*resid[i].T
J += np.dot(s1.T, s1)
return J
def plot(self):
"""
Plots the solution of all the states and the observed y values
"""
solution = self._getSolution(all_solution=True)
ode_utils.plot_det(solution, self._observeT, self._ode._stateList,
self._y, self._stateName)
def fit(self, x, lb=None, ub=None, A=None, b=None,
disp=False, full_output=False):
"""
Find the estimates given the data and an initial guess :math:`x`.
Note that there is no guarantee that the estimation procedure is
successful. It is recommended to at least supply box constraints,
i.e. lower and upper bounds
Parameters
----------
x: array like
an initial guess
lb: array like
the lower bound elementwise :math:`lb_{i} <= x_{i}`
ub: array like
upper bound elementwise :math:`x_{i} <= ub_{i}`
A: array like
matrix :math:`A` for the inequality :math:`Ax<=b`
b: array like
vector :math:`b` for the inequality :math:`Ax<=b`
Returns
-------
xhat: :class:`numpy.ndarray`
estimated value
"""
if lb is None or ub is None:
if ub is None:
ub = np.array([None]*len(x))
if lb is None:
lb = np.array([None]*len(x))
else:
if len(lb) != len(ub):
raise InputError("Number of lower and upper bounds " +
"needs to be equal")
if len(lb) != len(x):
raise InputError("Number of box constraints must equal to " +
"the number of variables")
box_bounds = np.reshape(np.append(lb, ub), (len(lb), 2), 'F')
con_list = list()
if A is None:
method = 'L-BFGS-B'
else:
if isinstance(A, np.ndarray):
A = np.ndarray(A)
n,p = A.shape
if n != len(b):
raise InputError("Number of rows in A needs to be equal to " +
"length of b in the equality Ax<=b")
if p != len(x):
raise InputError("Number of box constraints must equal to " +
"the number of variables")
def F(a, x):
def func(x):
return a.dot(x)
return func
for a in A: # is the row vector
con_list.append({'type': 'ineq', 'fun': F(a,x)})
method = 'SLSQP'
if disp == True:
callback = self.thetaCallBack
else:
callback = None
res = minimize(fun=self.cost,
jac=self.sensitivity,
x0=x,
bounds=box_bounds,
constraints=con_list,
method=method,
callback=callback)
if full_output:
return res['x'], res
else:
return res['x']
############################################################
#
# These are "private"
#
############################################################
def _getSolution(self, theta=None, all_solution=False):
"""
Find the residuals given time points and the corresponding
observations, with initial conditions
"""
if theta is not None:
self._setParam(theta)
self._ode.parameters = self._theta
# TODO: is this the correct approach
# to jacobian_T what should be the return if we fail an integration
# Note that the solution does not include the origin. This is
# because they do not contribute when the initial conditions are
# given and we assume that they are accurate
solution = ode_utils.integrateFuncJac(self._ode.ode_T,
self._ode.jacobian_T,
self._x0, self._t0,
self._observeT,
full_output=False,
method=self._ode._intName)
if all_solution:
return solution
else:
return solution[:, self._stateIndex]
def _sensToGradWithoutIndex(self, sens, diffLoss):
"""
forward sensitivites to g where g is the gradient.
Indicies obtained using information defined here
"""
index_out = self._getTargetParamSensIndex()
return self.sens_to_grad(sens[:, index_out], diffLoss)
def _sensToGradIVWithoutIndex(self, sens, diffLoss):
"""
Same as sensToGradWithoutIndex above but now we also include the
initial conditions.
"""
index_out = self._getTargetStateSensIndex()
return self.sens_to_grad(sens[:, index_out], diffLoss)
def _sensToJTJWithoutIndex(self, sens, diffLoss=None):
"""
forward sensitivites to :math:`J^{\\top}J: where :math:`J` is
the Jacobian. The approximation to the Hessian.
"""
index_out = self._getTargetParamSensIndex()
return self.sens_to_jtj(sens[:, index_out], diffLoss)
def _sensToJTJIVWithoutIndex(self, sens, diffLoss=None):
"""
Same as sensToJTJIVWithoutIndex above but now we also include the
initial conditions.
"""
index_out = self._getTargetStateSensIndex()
return self.sens_to_jtj(sens[:, index_out], diffLoss)
############################################################
#
# Obtain the correct index
#
############################################################
def _getTargetParamSensIndex(self):
# as usual, locate the index of the state
state_index = self._ode.get_state_index(self._stateName)
# build the indexes to locate the correct parameters
index_out = list()
# locate the target indexes
index_list = self._getTargetParamIndex()
if isinstance(state_index, list):
for j in state_index:
for i in index_list:
# always ignore the first numState because they are
# outputs from the actual ode and not the sensitivities.
# Hence the +1
index_out.append(j + (i + 1) * self._num_state)
else:
# else, happy times!
for i in index_list:
index_out.append(state_index + (i + 1) * self._num_state)
return np.sort(np.array(index_out)).tolist()
def _getTargetParamIndex(self):
"""
Get the indices of the targeted parameters
"""
# we assume that all the parameters are targets
if self._targetParam is None:
index_list = range(0, self._num_param)
else:
# only select from the list
index_list = list()
# note that "i" is a string here
for i in self._targetParam:
index_list.append(self._ode.get_param_index(i))
return index_list
def _getTargetStateSensIndex(self):
# as usual, locate the index of the state
state_index = self._ode.get_state_index(self._stateName)
# build the indexes to locate the correct parameters
index_out = list()
# locate the target indexes
index_list = self._getTargetStateIndex()
## Note to self. We do not use list comprehension here because it will
## exceed the 80 character limit
n_s = self._num_state
n_p = self._num_param
if isinstance(state_index, list):
if JSMDEBUG:
print('JSM debug 1 index_list,state_index,index_out,sorted',index_list,state_index,index_out,np.sort(np.array(index_out)).tolist())
for j in state_index:
for i in index_list:
# always ignore the first numState because they are outputs
# from the actual ode and not the sensitivities
index_out.append(j + (i + 1 + n_p)*n_s)
else:
if JSMDEBUG:
print('JSM debug 2 index_list,state_index,index_out,sorted',index_list,state_index,index_out,np.sort(np.array(index_out)).tolist())
# else, happy times!
for i in index_list:
index_out.append(state_index + (i + 1 + n_p)*n_s)
if JSMDEBUG:
print('JSM debug 3 index_list,state_index,index_out,sorted',index_list,state_index,index_out,np.sort(np.array(index_out)).tolist())
return np.sort(np.array(index_out)).tolist()
def _getTargetStateIndex(self):
"""
Get the indices of our targeted states
"""
if self._targetState is None:
index_list = range(self._num_state)
if JSMDEBUG:
print('JSM debug _getTargetStateIndex __targetState None index_list',index_list)
else:
# index_list = [self._ode.get_state_index(i) for i in self._targetState] # original version
index_list = list() # JSM
for i in self._targetState: # JSM
index_list.extend(self._ode.get_state_index(i)) # JSM
if JSMDEBUG:
print('JSM debug _getTargetStateIndex _targetState index_list',self._targetState,index_list)
return index_list
def _setParamInput(self, theta):
if JSMDEBUG:
print('JSM debug _setParamInput theta',theta)
if self._targetParam is None:
if len(theta) != self._num_param:
raise InputError("Expecting input to all the parameters")
else: # happy, standard case
self._setParam(theta)
else:
if len(theta) == len(self._targetParam):
self._unrollParam(theta)
else:
raise InputError("Expecting input theta to be of length " +
str(len(self._targetParam)))
def _setParamStateInput(self, theta):
"""
Set both the parameters and initial condition :math:`x_{0}`
"""
if JSMDEBUG:
print('JSM debug _setParamStateInput theta',theta)
if self._targetParam is None and self._targetState is None:
# we are expecting the standard case here
if len(theta) != (self._num_state + self._num_param):
raise InputError("Expecting a guess of the initial value, " +
"use diff_loss() " +
"instead for just parameter estimation")
else:
self._setX0(theta[-self._num_state:])
self._setParam(theta[:self._num_param])
else:
if self._targetParam is None:
# this mean all the parameters or without the parameters
if len(theta) == len(self._targetState):
# without parameters
self._unrollState(theta)
elif len(theta) == (self._num_param + len(self._targetState)):
# the parameters first
self._setParam(theta[:self._num_param])
# then the states
# x0 = theta[-len(self._targetState):]
self._unrollState(theta[-len(self._targetState):])
else:
raise InputError("Expecting input to all the parameters " +
"and to the states with length %s" %
len(self._targetState))
elif self._targetState is None:
# this mean all the state or without the states
if len(theta) == self._num_param:
# without the states, obviously using the wrong function
# call
raise InputError("Input has the same length as the " +
"number of parameters. If the initial " +
"conditions for the states are not " +
"required, use diff_loss() instead")
elif len(theta) == (self._num_state + self._num_param):
# all the states
# begin setting the information
self._setParam(theta[:self._num_param])
# then the states
# x0 = theta[-self._num_state:]
self._setX0(theta[-self._num_state:])
elif len(theta) == (self._num_state + len(self._targetParam)):
# again we have all the states
self._unrollParam(theta[:len(self._targetParam)])
# x0 = theta[-self._num_state:]
self._setX0(theta[-self._num_state:])
else: # happy
raise InputError("The number of input is just plain " +
"wrong. Cannot help you further.")
else:
# we have both set of input
l1, l2 = len(self._targetParam), len(self._targetState)
if len(theta) == (l1 + l2):
# WOOT "reached correct place"
x0 = theta[-l2:]
theta = theta[:l1]
self._unrollState(x0)
self._unrollParam(theta)
if JSMDEBUG:
print('JSM debug at l 1515: x0,theta',x0,theta)
else:
raise InputError("Input of length " + str(len(theta)) +
": Expecting input to the parameters " +
"of length " + str(l1) +
" and to the states of length " + str(l2))
def _setParam(self, theta):
"""
Set the parameters
"""
if self._num_param == 0:
self._theta = None
else:
if self._targetParam is not None:
theta = ode_utils.check_array_type(theta)
thetaDict = dict()
l1, l2 = len(theta), len(self._targetParam)
if len(self._targetParam) > 1:
if len(theta) != len(self._targetParam):
raise InputError("Input length = %s but we expect %s" %\
(l1, l2))
# begin to construct our dictionary
for i in range(l1):
thetaDict[self._targetParam[i]] = theta[i]
else:
if isinstance(theta, Number):
thetaDict[self._targetParam[0]] = theta
elif len(theta) > 1:
raise InputError("Input length = " +str(l1) +
" but we only have one parameter")
else:
if isinstance(self._targetParam[0], ODEVariable):
thetaDict[str(self._targetParam[0])] = theta[0]
else:
thetaDict[self._targetParam[0]] = theta[0]
self._theta = thetaDict
else:
# conver to something sensible
theta = ode_utils.check_array_type(theta)
self._theta = np.copy(theta)
def _setWeight(self, n, p, w):
# note that we NEVER scale the weights
# also note that we can use the weights as a control
# with normalized input
w = ode_utils.check_array_type(w)
if len(w) == w.size:
m, q = len(w), 1
else:
m, q = w.shape
if p == q:
if n == m:
self._stateWeight = w
elif m == 1:
self._stateWeight = np.ones((n, p))*w
else:
raise InputError("Number of input weights is not equal " +
"to the number of observations")
elif p == m:
if q == 1:
self._stateWeight = np.ones((n, p))*w
else:
raise InputError("Number of input weights is not equal " +
"to number of states")
else:
if q == 1 and m == 1:
self._stateWeight = np.ones((n, p))*w
else:
raise InputError("Number of input weights differs from " +
"the number of observations")
def _setX0(self, x0):
"""
Set the initial value, pretty much only used when we are
dealing with estimating the initial value as well
"""
x0 = ode_utils.check_array_type(x0)
self._x0 =
|
np.copy(x0)
|
numpy.copy
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Authors
# <NAME> (http://herve.niderb.fr)
"""Generic dynamic time warping (DTW) algorithm"""
from __future__ import unicode_literals
import numpy as np
# vertical item starts before/after horizontal item does
STARTS_BEFORE = 1
STARTS_AFTER = 2
# items start simultaneously
STARTS_WITH = STARTS_BEFORE | STARTS_AFTER
# vertical item ends before/after horizontal item does
ENDS_BEFORE = 4
ENDS_AFTER = 8
# items end simultaneously
ENDS_WITH = ENDS_BEFORE | ENDS_AFTER
class DynamicTimeWarping(object):
"""Dynamic time warping
Implements standard dynamic time warping between two (vertical and
horizontal) sequences of length V and H respectively.
* ────────────────> horizontal
│ * sequence
│ *
│ * * *
│ *
│ *
V *
vertical
sequence
Parameters
----------
distance_func : func, optional
Distance function taking two arguments `vitem` (any item from the
vertical sequence) and `hitem` (any item from the horizontal sequence)
and returning their distance as float.
`distance_func` must be provided in case pre-computed `distance` is not
available.
mask_func : func, optional
Mask function taking two required arguments (`v`, `h`) and two optional
arguments (`vitem`, `hitem`) and returning True when aligning them is
permitted and False otherwise. Defaults to all True.
vcost, hcost, dcost : float, optional
Extra cost added to each vertical, horizontal and diagonal move.
For instance, a positive `vcost` will encourage horizontal and diagonal
paths. All three values default to 0.
no_vertical, no_horizontal : boolean, optional
Constrain dynamic time warping to contain only non-vertical (resp.
non-horizontal) moves. Defaults to False (i.e. no constraint).
"""
def __init__(self, distance_func=None, mask_func=None,
vcost=0., hcost=0., dcost=0.,
no_vertical=False, no_horizontal=False):
super(DynamicTimeWarping, self).__init__()
# extra cost for each elementary move
self.vcost = vcost # vertical
self.hcost = hcost # horizontal
self.dcost = dcost # diagonal
# no vertical move
self.no_vertical = no_vertical
# no horizontal move
self.no_horizontal = no_horizontal
self.distance_func = distance_func
self.mask_func = mask_func
def _get_distance(self, v, h):
"""Get distance between vth and hth items"""
# if distance is not computed already
# do it once and for all
if np.isnan(self._distance[v, h]):
vitem = self._vsequence[v]
hitem = self._hsequence[h]
self._distance[v, h] = self.distance_func(vitem, hitem)
return self._distance[v, h]
def _get_mask(self, v, h):
"""Get mask at position (v, h)"""
# if mask is not computed already
# do it once and for all
if np.isnan(self._mask[v, h]):
vitem = self._vsequence[v]
hitem = self._hsequence[h]
self._mask[v, h] = self.mask_func(v, vitem, h, hitem)
return self._mask[v, h]
def _initialize(self, vsequence, hsequence, distance, mask):
self._vsequence = vsequence
self._hsequence = hsequence
V = len(self._vsequence)
H = len(self._hsequence)
# ~~~ distance matrix ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# precomputed distance matrix
if distance is not None:
assert distance.shape == (V, H)
self._distance = distance
# on-the-fly distance computation
elif self.distance_func is not None:
self._distance =
|
np.empty((V, H))
|
numpy.empty
|
import numpy as np
import warnings
from discretize.utils import mkvc, ndgrid
from discretize.utils.code_utils import deprecate_method
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib.collections import PolyCollection, LineCollection
from matplotlib import rc_params
from mpl_toolkits.mplot3d.art3d import Line3DCollection
import discretize
class InterfaceMPL(object):
"""Class for plotting ``discretize`` meshes with matplotlib.
This interface adds three plotting methods to all ``discretize`` meshes.
:py:attr:`~InterfaceMPL.plot_grid` will plot gridded points for 2D and 3D meshes.
:py:attr:`~InterfaceMPL.plot_image` is used for plotting models, scalars and vectors
defined on a given mesh. And :py:attr:`~InterfaceMPL.plot_slice` is used for plotting
models, scalars and vectors on a 2D slice through a 3D mesh.
"""
def plot_grid(
self,
ax=None,
nodes=False,
faces=False,
centers=False,
edges=False,
lines=True,
show_it=False,
**kwargs,
):
"""Plot the grid for nodal, cell-centered and staggered grids.
For 2D and 3D meshes, this method plots the mesh grid. Additionally,
the user can choose to denote edge, face, node and cell center locations.
This function is built upon the ``matplotlib.pyplot.plot`` function
and will accept associated keyword arguments.
Parameters
----------
ax : matplotlib.axes.Axes or None, optional
The axes to draw on. *None* produces a new axes.
nodes, faces, centers, edges, lines : bool, optional
Whether to plot the corresponding item
show_it : bool, optional
whether to call plt.show()
color : Color or str, optional
If lines=True, defines the color of the grid lines.
linewidth : float, optional
If lines=True, defines the thickness of the grid lines.
Returns
-------
matplotlib.axes.Axes
Axes handle for the plot
Other Parameters
----------------
edges_x, edges_y, edges_z, faces_x, faces_y, faces_z : bool, optional
When plotting a ``TreeMesh``, these are also options to plot the
individual component items.
cell_line : bool, optional
When plotting a ``TreeMesh``, you can also plot a line through the
cell centers in order.
slice : {'both', 'theta', 'z'}
When plotting a ``CylindricalMesh``, which dimension to slice over.
Notes
-----
Excess arguments are passed on to `plot`
Examples
--------
Plotting a 2D TensorMesh grid
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> import numpy as np
>>> h1 = np.linspace(.1, .5, 3)
>>> h2 = np.linspace(.1, .5, 5)
>>> mesh = discretize.TensorMesh([h1, h2])
>>> mesh.plot_grid(nodes=True, faces=True, centers=True, lines=True)
>>> plt.show()
Plotting a 3D TensorMesh grid
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> import numpy as np
>>> h1 = np.linspace(.1, .5, 3)
>>> h2 = np.linspace(.1, .5, 5)
>>> h3 = np.linspace(.1, .5, 3)
>>> mesh = discretize.TensorMesh([h1, h2, h3])
>>> mesh.plot_grid(nodes=True, faces=True, centers=True, lines=True)
>>> plt.show()
Plotting a 2D CurvilinearMesh
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> X, Y = discretize.utils.exampleLrmGrid([10, 10], 'rotate')
>>> M = discretize.CurvilinearMesh([X, Y])
>>> M.plot_grid()
>>> plt.show()
Plotting a 3D CurvilinearMesh
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> X, Y, Z = discretize.utils.exampleLrmGrid([5, 5, 5], 'rotate')
>>> M = discretize.CurvilinearMesh([X, Y, Z])
>>> M.plot_grid()
>>> plt.show()
Plotting a 2D TreeMesh
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> M = discretize.TreeMesh([32, 32])
>>> M.insert_cells([[0.25, 0.25]], [4])
>>> M.plot_grid()
>>> plt.show()
Plotting a 3D TreeMesh
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> M = discretize.TreeMesh([32, 32, 32])
>>> M.insert_cells([[0.3, 0.75, 0.22]], [4])
>>> M.plot_grid()
>>> plt.show()
"""
mesh_type = self._meshType.lower()
plotters = {
"tree": self.__plot_grid_tree,
"tensor": self.__plot_grid_tensor,
"curv": self.__plot_grid_curv,
"cyl": self.__plot_grid_cyl,
}
try:
plotter = plotters[mesh_type]
except KeyError:
raise NotImplementedError(
"Mesh type `{}` does not have a plot_grid implementation.".format(
type(self).__name__
)
)
if "showIt" in kwargs:
show_it = kwargs.pop("showIt")
warnings.warn(
"showIt has been deprecated, please use show_it", DeprecationWarning
)
if ax is not None:
ax_test = ax
if not isinstance(ax, (list, tuple, np.ndarray)):
ax_test = (ax,)
for a in ax_test:
if not isinstance(a, matplotlib.axes.Axes):
raise TypeError("ax must be an matplotlib.axes.Axes")
elif mesh_type != "cyl":
axOpts = {"projection": "3d"} if self.dim == 3 else {}
plt.figure()
ax = plt.subplot(111, **axOpts)
rcParams = rc_params()
if lines:
kwargs["color"] = kwargs.get("color", rcParams["lines.color"])
kwargs["linewidth"] = kwargs.get("linewidth", rcParams["lines.linewidth"])
out = plotter(
ax=ax,
nodes=nodes,
faces=faces,
centers=centers,
edges=edges,
lines=lines,
**kwargs,
)
if show_it:
plt.show()
return out
def plot_image(
self,
v,
v_type="CC",
grid=False,
view="real",
ax=None,
clim=None,
show_it=False,
pcolor_opts=None,
stream_opts=None,
grid_opts=None,
range_x=None,
range_y=None,
sample_grid=None,
stream_thickness=None,
stream_threshold=None,
**kwargs,
):
"""Plots quantities defined on a given mesh.
This method is primarily used to plot models, scalar quantities and vector
quantities defined on 2D meshes. For 3D :class:`discretize.TensorMesh` however,
this method will plot the quantity for every slice of the 3D mesh.
Parameters
----------
v : numpy.ndarray
Gridded values being plotted. The length of the array depends on the quantity being
plotted; e.g. if the quantity is a scalar value defined on mesh nodes, the
length must be equal to the number of mesh nodes.
v_type : {'CC','CCV', 'N', 'F', 'Fx', 'Fy', 'Fz', 'E', 'Ex', 'Ey', 'Ez'}
Defines the input parameter *v*.
view : {'real', 'imag', 'abs', 'vec'}
For complex scalar quantities, options are included to image the real, imaginary or
absolute value. For vector quantities, *view* must be set to 'vec'.
ax : matplotlib.axes.Axes, optional
The axes to draw on. *None* produces a new Axes.
clim : tuple of float, optional
length 2 tuple of (vmin, vmax) for the color limits
range_x, range_y : tuple of float, optional
length 2 tuple of (min, max) for the bounds of the plot axes.
pcolor_opts : dict, optional
Arguments passed on to ``pcolormesh``
grid : bool, optional
Whether to plot the edges of the mesh cells.
grid_opts : dict, optional
If ``grid`` is true, arguments passed on to ``plot`` for grid
sample_grid : tuple of numpy.ndarray, optional
If ``view`` == 'vec', mesh cell widths (hx, hy) to interpolate onto for vector plotting
stream_opts : dict, optional
If ``view`` == 'vec', arguments passed on to ``streamplot``
stream_thickness : float, optional
If ``view`` == 'vec', linewidth for ``streamplot``
stream_threshold : float, optional
If ``view`` == 'vec', only plots vectors with magnitude above this threshold
show_it : bool, optional
Whether to call plt.show()
numbering : bool, optional
For 3D :class:`~discretize.TensorMesh` only, show the numbering of the slices
annotation_color : Color or str, optional
For 3D :class:`~discretize.TensorMesh` only, color of the annotation
Examples
--------
2D ``TensorMesh`` plotting
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> import numpy as np
>>> M = discretize.TensorMesh([20, 20])
>>> v = np.sin(M.gridCC[:, 0]*2*np.pi)*np.sin(M.gridCC[:, 1]*2*np.pi)
>>> M.plot_image(v)
>>> plt.show()
3D ``TensorMesh`` plotting
>>> import discretize
>>> import numpy as np
>>> M = discretize.TensorMesh([20, 20, 20])
>>> v = np.sin(M.gridCC[:, 0]*2*np.pi)*np.sin(M.gridCC[:, 1]*2*np.pi)*np.sin(M.gridCC[:, 2]*2*np.pi)
>>> M.plot_image(v, annotation_color='k')
>>> plt.show()
"""
mesh_type = self._meshType.lower()
plotters = {
"tree": self.__plot_image_tree,
"tensor": self.__plot_image_tensor,
"curv": self.__plot_image_curv,
"cyl": self.__plot_image_cyl,
}
try:
plotter = plotters[mesh_type]
except KeyError:
raise NotImplementedError(
"Mesh type `{}` does not have a plot_image implementation.".format(
type(self).__name__
)
)
if "pcolorOpts" in kwargs:
pcolor_opts = kwargs.pop("pcolorOpts")
warnings.warn(
"pcolorOpts has been deprecated, please use pcolor_opts",
DeprecationWarning,
)
if "streamOpts" in kwargs:
stream_opts = kwargs.pop("streamOpts")
warnings.warn(
"streamOpts has been deprecated, please use stream_opts",
DeprecationWarning,
)
if "gridOpts" in kwargs:
grid_opts = kwargs.pop("gridOpts")
warnings.warn(
"gridOpts has been deprecated, please use grid_opts", DeprecationWarning
)
if "showIt" in kwargs:
show_it = kwargs.pop("showIt")
warnings.warn(
"showIt has been deprecated, please use show_it", DeprecationWarning
)
if "vType" in kwargs:
v_type = kwargs.pop("vType")
warnings.warn(
"vType has been deprecated, please use v_type", DeprecationWarning
)
# Some Error checking and common defaults
if pcolor_opts is None:
pcolor_opts = {}
if stream_opts is None:
stream_opts = {"color": "k"}
if grid_opts is None:
if grid:
grid_opts = {"color": "k"}
else:
grid_opts = {}
v_typeOptsCC = ["N", "CC", "Fx", "Fy", "Ex", "Ey"]
v_typeOptsV = ["CCv", "F", "E"]
v_typeOpts = v_typeOptsCC + v_typeOptsV
if view == "vec":
if v_type not in v_typeOptsV:
raise ValueError(
"v_type must be in ['{0!s}'] when view='vec'".format(
"', '".join(v_typeOptsV)
)
)
if v_type not in v_typeOpts:
raise ValueError(
"v_type must be in ['{0!s}']".format("', '".join(v_typeOpts))
)
viewOpts = ["real", "imag", "abs", "vec"]
if view not in viewOpts:
raise ValueError("view must be in ['{0!s}']".format("', '".join(viewOpts)))
if v.dtype == complex and view == "vec":
raise NotImplementedError("Can not plot a complex vector.")
if ax is None:
fig = plt.figure()
ax = plt.subplot(111)
else:
if not isinstance(ax, matplotlib.axes.Axes):
raise TypeError("ax must be an Axes!")
fig = ax.figure
if clim is not None:
pcolor_opts["vmin"] = clim[0]
pcolor_opts["vmax"] = clim[1]
out = plotter(
v,
v_type=v_type,
view=view,
ax=ax,
range_x=range_x,
range_y=range_y,
pcolor_opts=pcolor_opts,
grid=grid,
grid_opts=grid_opts,
sample_grid=sample_grid,
stream_opts=stream_opts,
stream_threshold=stream_threshold,
stream_thickness=stream_thickness,
**kwargs,
)
if show_it:
plt.show()
return out
def plot_slice(
self,
v,
v_type="CC",
normal="Z",
ind=None,
slice_loc=None,
grid=False,
view="real",
ax=None,
clim=None,
show_it=False,
pcolor_opts=None,
stream_opts=None,
grid_opts=None,
range_x=None,
range_y=None,
sample_grid=None,
stream_threshold=None,
stream_thickness=None,
**kwargs,
):
"""Plots slice of fields on the given 3D mesh.
Parameters
----------
v : numpy.ndarray
values to plot
v_type : {'CC','CCV', 'N', 'F', 'Fx', 'Fy', 'Fz', 'E', 'Ex', 'Ey', 'Ez'}, or tuple of these options
Where the values of v are defined.
normal : {'Z', 'X', 'Y'}
Normal direction of slicing plane.
ind : None, optional
index along dimension of slice. Defaults to the center index.
slice_loc : None, optional
Value along dimension of slice. Defaults to the center of the mesh.
view : {'real', 'imag', 'abs', 'vec'}
How to view the array.
ax : matplotlib.axes.Axes, optional
The axes to draw on. None produces a new Axes. Must be None if ``v_type`` is a tuple.
clim : tuple of float, optional
length 2 tuple of (vmin, vmax) for the color limits
range_x, range_y : tuple of float, optional
length 2 tuple of (min, max) for the bounds of the plot axes.
pcolor_opts : dict, optional
Arguments passed on to ``pcolormesh``
grid : bool, optional
Whether to plot the edges of the mesh cells.
grid_opts : dict, optional
If ``grid`` is true, arguments passed on to ``plot`` for the edges
sample_grid : tuple of numpy.ndarray, optional
If ``view`` == 'vec', mesh cell widths (hx, hy) to interpolate onto for vector plotting
stream_opts : dict, optional
If ``view`` == 'vec', arguments passed on to ``streamplot``
stream_thickness : float, optional
If ``view`` == 'vec', linewidth for ``streamplot``
stream_threshold : float, optional
If ``view`` == 'vec', only plots vectors with magnitude above this threshold
show_it : bool, optional
Whether to call plt.show()
Examples
--------
Plot a slice of a 3D `TensorMesh` solution to a Laplace's equaiton.
First build the mesh:
>>> from matplotlib import pyplot as plt
>>> import discretize
>>> from pymatsolver import Solver
>>> hx = [(5, 2, -1.3), (2, 4), (5, 2, 1.3)]
>>> hy = [(2, 2, -1.3), (2, 6), (2, 2, 1.3)]
>>> hz = [(2, 2, -1.3), (2, 6), (2, 2, 1.3)]
>>> M = discretize.TensorMesh([hx, hy, hz])
then build the necessary parts of the PDE:
>>> q = np.zeros(M.vnC)
>>> q[[4, 4], [4, 4], [2, 6]]=[-1, 1]
>>> q = discretize.utils.mkvc(q)
>>> A = M.face_divergence * M.cell_gradient
>>> b = Solver(A) * (q)
and finaly, plot the vector values of the result, which are defined on faces
>>> M.plot_slice(M.cell_gradient*b, 'F', view='vec', grid=True, pcolor_opts={'alpha':0.8})
>>> plt.show()
We can use the `slice_loc kwarg to tell `plot_slice` where to slice the mesh.
Let's create a mesh with a random model and plot slice of it. The `slice_loc`
kwarg automatically determines the indices for slicing the mesh along a plane with
the given normal.
>>> M = discretize.TensorMesh([32, 32, 32])
>>> v = discretize.utils.random_model(M.vnC, seed=789).reshape(-1, order='F')
>>> x_slice, y_slice, z_slice = 0.75, 0.25, 0.9
>>> plt.figure(figsize=(7.5, 3))
>>> ax = plt.subplot(131)
>>> M.plot_slice(v, normal='X', slice_loc=x_slice, ax=ax)
>>> ax = plt.subplot(132)
>>> M.plot_slice(v, normal='Y', slice_loc=y_slice, ax=ax)
>>> ax = plt.subplot(133)
>>> M.plot_slice(v, normal='Z', slice_loc=z_slice, ax=ax)
>>> plt.tight_layout()
>>> plt.show()
This also works for `TreeMesh`. We create a mesh here that is refined within three
boxes, along with a base level of refinement.
>>> TM = discretize.TreeMesh([32, 32, 32])
>>> TM.refine(3, finalize=False)
>>> BSW = [[0.25, 0.25, 0.25], [0.15, 0.15, 0.15], [0.1, 0.1, 0.1]]
>>> TNE = [[0.75, 0.75, 0.75], [0.85, 0.85, 0.85], [0.9, 0.9, 0.9]]
>>> levels = [6, 5, 4]
>>> TM.refine_box(BSW, TNE, levels)
>>> v_TM = discretize.utils.volume_average(M, TM, v)
>>> plt.figure(figsize=(7.5, 3))
>>> ax = plt.subplot(131)
>>> TM.plot_slice(v_TM, normal='X', slice_loc=x_slice, ax=ax)
>>> ax = plt.subplot(132)
>>> TM.plot_slice(v_TM, normal='Y', slice_loc=y_slice, ax=ax)
>>> ax = plt.subplot(133)
>>> TM.plot_slice(v_TM, normal='Z', slice_loc=z_slice, ax=ax)
>>> plt.tight_layout()
>>> plt.show()
"""
mesh_type = self._meshType.lower()
plotters = {
"tree": self.__plot_slice_tree,
"tensor": self.__plot_slice_tensor,
# 'curv': self.__plot_slice_curv,
# 'cyl': self.__plot_slice_cyl,
}
try:
plotter = plotters[mesh_type]
except KeyError:
raise NotImplementedError(
"Mesh type `{}` does not have a plot_slice implementation.".format(
type(self).__name__
)
)
normal = normal.upper()
if "pcolorOpts" in kwargs:
pcolor_opts = kwargs["pcolorOpts"]
warnings.warn(
"pcolorOpts has been deprecated, please use pcolor_opts",
DeprecationWarning,
)
if "streamOpts" in kwargs:
stream_opts = kwargs["streamOpts"]
warnings.warn(
"streamOpts has been deprecated, please use stream_opts",
DeprecationWarning,
)
if "gridOpts" in kwargs:
grid_opts = kwargs["gridOpts"]
warnings.warn(
"gridOpts has been deprecated, please use grid_opts", DeprecationWarning
)
if "showIt" in kwargs:
show_it = kwargs["showIt"]
warnings.warn(
"showIt has been deprecated, please use show_it", DeprecationWarning
)
if "vType" in kwargs:
v_type = kwargs["vType"]
warnings.warn(
"vType has been deprecated, please use v_type", DeprecationWarning
)
if pcolor_opts is None:
pcolor_opts = {}
if stream_opts is None:
stream_opts = {"color": "k"}
if grid_opts is None:
if grid:
grid_opts = {"color": "k"}
else:
grid_opts = {}
if type(v_type) in [list, tuple]:
if ax is not None:
raise TypeError("cannot specify an axis to plot on with this function.")
fig, axs = plt.subplots(1, len(v_type))
out = []
for v_typeI, ax in zip(v_type, axs):
out += [
self.plot_slice(
v,
v_type=v_typeI,
normal=normal,
ind=ind,
slice_loc=slice_loc,
grid=grid,
view=view,
ax=ax,
clim=clim,
show_it=False,
pcolor_opts=pcolor_opts,
stream_opts=stream_opts,
grid_opts=grid_opts,
stream_threshold=stream_threshold,
stream_thickness=stream_thickness,
)
]
return out
viewOpts = ["real", "imag", "abs", "vec"]
normalOpts = ["X", "Y", "Z"]
v_typeOpts = [
"CC",
"CCv",
"N",
"F",
"E",
"Fx",
"Fy",
"Fz",
"E",
"Ex",
"Ey",
"Ez",
]
# Some user error checking
if v_type not in v_typeOpts:
raise ValueError(
"v_type must be in ['{0!s}']".format("', '".join(v_typeOpts))
)
if not self.dim == 3:
raise TypeError("Must be a 3D mesh. Use plotImage.")
if view not in viewOpts:
raise ValueError("view must be in ['{0!s}']".format("', '".join(viewOpts)))
if normal not in normalOpts:
raise ValueError(
"normal must be in ['{0!s}']".format("', '".join(normalOpts))
)
if not isinstance(grid, bool):
raise TypeError("grid must be a boolean")
if v.dtype == complex and view == "vec":
raise NotImplementedError("Can not plot a complex vector.")
if self.dim == 2:
raise NotImplementedError("Must be a 3D mesh. Use plotImage.")
# slice_loc errors
if (ind is not None) and (slice_loc is not None):
raise Warning("Both ind and slice_loc are defined. Behavior undefined.")
# slice_loc implement
if slice_loc is not None:
if normal == "X":
ind = int(np.argmin(np.abs(self.cell_centers_x - slice_loc)))
if normal == "Y":
ind = int(np.argmin(np.abs(self.cell_centers_y - slice_loc)))
if normal == "Z":
ind = int(np.argmin(np.abs(self.cell_centers_z - slice_loc)))
if ax is None:
plt.figure()
ax = plt.subplot(111)
else:
if not isinstance(ax, matplotlib.axes.Axes):
raise TypeError("ax must be an matplotlib.axes.Axes")
if clim is not None:
pcolor_opts["vmin"] = clim[0]
pcolor_opts["vmax"] = clim[1]
out = plotter(
v,
v_type=v_type,
normal=normal,
ind=ind,
grid=grid,
view=view,
ax=ax,
pcolor_opts=pcolor_opts,
stream_opts=stream_opts,
grid_opts=grid_opts,
range_x=range_x,
range_y=range_y,
sample_grid=sample_grid,
stream_threshold=stream_threshold,
stream_thickness=stream_thickness,
**kwargs,
)
if show_it:
plt.show()
return out
def plot_3d_slicer(
self,
v,
xslice=None,
yslice=None,
zslice=None,
v_type="CC",
view="real",
axis="xy",
transparent=None,
clim=None,
xlim=None,
ylim=None,
zlim=None,
aspect="auto",
grid=[2, 2, 1],
pcolor_opts=None,
fig=None,
**kwargs,
):
"""Plot slices of a 3D volume, interactively (scroll wheel).
If called from a notebook, make sure to set
%matplotlib notebook
See the class `discretize.View.Slicer` for more information.
It returns nothing. However, if you need the different figure handles
you can get it via
`fig = plt.gcf()`
and subsequently its children via
`fig.get_children()`
and recursively deeper, e.g.,
`fig.get_children()[0].get_children()`.
One can also provide an existing figure instance, which can be useful
for interactive widgets in Notebooks. The provided figure is cleared
first.
"""
mesh_type = self._meshType.lower()
if mesh_type != "tensor":
raise NotImplementedError(
"plot_3d_slicer has only been implemented for a TensorMesh"
)
# Initiate figure
if fig is None:
fig = plt.figure()
else:
fig.clf()
if "pcolorOpts" in kwargs:
pcolor_opts = kwargs["pcolorOpts"]
warnings.warn(
"pcolorOpts has been deprecated, please use pcolor_opts",
DeprecationWarning,
)
# Populate figure
tracker = Slicer(
self,
v,
xslice,
yslice,
zslice,
v_type,
view,
axis,
transparent,
clim,
xlim,
ylim,
zlim,
aspect,
grid,
pcolor_opts,
)
# Connect figure to scrolling
fig.canvas.mpl_connect("scroll_event", tracker.onscroll)
# Show figure
plt.show()
# TensorMesh plotting
def __plot_grid_tensor(
self,
ax=None,
nodes=False,
faces=False,
centers=False,
edges=False,
lines=True,
color="C0",
linewidth=1.0,
**kwargs,
):
if self.dim == 1:
if nodes:
ax.plot(
self.gridN, np.ones(self.nN), color="C0", marker="s", linestyle=""
)
if centers:
ax.plot(
self.gridCC, np.ones(self.nC), color="C1", marker="o", linestyle=""
)
if lines:
ax.plot(self.gridN, np.ones(self.nN), color="C0", linestyle="-")
ax.set_xlabel("x1")
elif self.dim == 2:
if nodes:
ax.plot(
self.gridN[:, 0],
self.gridN[:, 1],
color="C0",
marker="s",
linestyle="",
)
if centers:
ax.plot(
self.gridCC[:, 0],
self.gridCC[:, 1],
color="C1",
marker="o",
linestyle="",
)
if faces:
ax.plot(
self.gridFx[:, 0],
self.gridFx[:, 1],
color="C2",
marker=">",
linestyle="",
)
ax.plot(
self.gridFy[:, 0],
self.gridFy[:, 1],
color="C2",
marker="^",
linestyle="",
)
if edges:
ax.plot(
self.gridEx[:, 0],
self.gridEx[:, 1],
color="C3",
marker=">",
linestyle="",
)
ax.plot(
self.gridEy[:, 0],
self.gridEy[:, 1],
color="C3",
marker="^",
linestyle="",
)
# Plot the grid lines
if lines:
NN = self.reshape(self.gridN, "N", "N", "M")
nCx, nCy = self.shape_cells
X1 = np.c_[
mkvc(NN[0][0, :]), mkvc(NN[0][nCx, :]), mkvc(NN[0][0, :]) * np.nan
].flatten()
Y1 = np.c_[
mkvc(NN[1][0, :]), mkvc(NN[1][nCx, :]), mkvc(NN[1][0, :]) * np.nan
].flatten()
X2 = np.c_[
mkvc(NN[0][:, 0]), mkvc(NN[0][:, nCy]), mkvc(NN[0][:, 0]) * np.nan
].flatten()
Y2 = np.c_[
mkvc(NN[1][:, 0]), mkvc(NN[1][:, nCy]), mkvc(NN[1][:, 0]) * np.nan
].flatten()
X = np.r_[X1, X2]
Y = np.r_[Y1, Y2]
ax.plot(X, Y, color=color, linestyle="-", lw=linewidth)
ax.set_xlabel("x1")
ax.set_ylabel("x2")
elif self.dim == 3:
if nodes:
ax.plot(
self.gridN[:, 0],
self.gridN[:, 1],
color="C0",
marker="s",
linestyle="",
zs=self.gridN[:, 2],
)
if centers:
ax.plot(
self.gridCC[:, 0],
self.gridCC[:, 1],
color="C1",
marker="o",
linestyle="",
zs=self.gridCC[:, 2],
)
if faces:
ax.plot(
self.gridFx[:, 0],
self.gridFx[:, 1],
color="C2",
marker=">",
linestyle="",
zs=self.gridFx[:, 2],
)
ax.plot(
self.gridFy[:, 0],
self.gridFy[:, 1],
color="C2",
marker="<",
linestyle="",
zs=self.gridFy[:, 2],
)
ax.plot(
self.gridFz[:, 0],
self.gridFz[:, 1],
color="C2",
marker="^",
linestyle="",
zs=self.gridFz[:, 2],
)
if edges:
ax.plot(
self.gridEx[:, 0],
self.gridEx[:, 1],
color="C3",
marker=">",
linestyle="",
zs=self.gridEx[:, 2],
)
ax.plot(
self.gridEy[:, 0],
self.gridEy[:, 1],
color="C3",
marker="<",
linestyle="",
zs=self.gridEy[:, 2],
)
ax.plot(
self.gridEz[:, 0],
self.gridEz[:, 1],
color="C3",
marker="^",
linestyle="",
zs=self.gridEz[:, 2],
)
# Plot the grid lines
if lines:
nCx, nCy, nCz = self.shape_cells
NN = self.reshape(self.gridN, "N", "N", "M")
X1 = np.c_[
mkvc(NN[0][0, :, :]),
mkvc(NN[0][nCx, :, :]),
mkvc(NN[0][0, :, :]) * np.nan,
].flatten()
Y1 = np.c_[
mkvc(NN[1][0, :, :]),
mkvc(NN[1][nCx, :, :]),
mkvc(NN[1][0, :, :]) * np.nan,
].flatten()
Z1 = np.c_[
mkvc(NN[2][0, :, :]),
mkvc(NN[2][nCx, :, :]),
mkvc(NN[2][0, :, :]) * np.nan,
].flatten()
X2 = np.c_[
mkvc(NN[0][:, 0, :]),
mkvc(NN[0][:, nCy, :]),
mkvc(NN[0][:, 0, :]) * np.nan,
].flatten()
Y2 = np.c_[
mkvc(NN[1][:, 0, :]),
mkvc(NN[1][:, nCy, :]),
mkvc(NN[1][:, 0, :]) * np.nan,
].flatten()
Z2 = np.c_[
mkvc(NN[2][:, 0, :]),
mkvc(NN[2][:, nCy, :]),
mkvc(NN[2][:, 0, :]) * np.nan,
].flatten()
X3 = np.c_[
mkvc(NN[0][:, :, 0]),
mkvc(NN[0][:, :, nCz]),
mkvc(NN[0][:, :, 0]) * np.nan,
].flatten()
Y3 = np.c_[
mkvc(NN[1][:, :, 0]),
mkvc(NN[1][:, :, nCz]),
mkvc(NN[1][:, :, 0]) * np.nan,
].flatten()
Z3 = np.c_[
mkvc(NN[2][:, :, 0]),
mkvc(NN[2][:, :, nCz]),
mkvc(NN[2][:, :, 0]) * np.nan,
].flatten()
X = np.r_[X1, X2, X3]
Y = np.r_[Y1, Y2, Y3]
Z = np.r_[Z1, Z2, Z3]
ax.plot(X, Y, color=color, linestyle="-", lw=linewidth, zs=Z)
ax.set_xlabel("x1")
ax.set_ylabel("x2")
ax.set_zlabel("x3")
ax.grid(True)
return ax
def __plot_image_tensor(
self,
v,
v_type="CC",
grid=False,
view="real",
ax=None,
pcolor_opts=None,
stream_opts=None,
grid_opts=None,
numbering=True,
annotation_color="w",
range_x=None,
range_y=None,
sample_grid=None,
stream_threshold=None,
**kwargs,
):
if "annotationColor" in kwargs:
annotation_color = kwargs.pop("annotationColor")
warnings.warn(
"annotationColor has been deprecated, please use annotation_color",
DeprecationWarning,
)
if self.dim == 1:
if v_type == "CC":
ph = ax.plot(
self.cell_centers_x, v, linestyle="-", color="C1", marker="o"
)
elif v_type == "N":
ph = ax.plot(self.nodes_x, v, linestyle="-", color="C0", marker="s")
ax.set_xlabel("x")
ax.axis("tight")
elif self.dim == 2:
return self.__plot_image_tensor2D(
v,
v_type=v_type,
grid=grid,
view=view,
ax=ax,
pcolor_opts=pcolor_opts,
stream_opts=stream_opts,
grid_opts=grid_opts,
range_x=range_x,
range_y=range_y,
sample_grid=sample_grid,
stream_threshold=stream_threshold,
)
elif self.dim == 3:
# get copy of image and average to cell-centers is necessary
if v_type == "CC":
vc = v.reshape(self.vnC, order="F")
elif v_type == "N":
vc = (self.aveN2CC * v).reshape(self.vnC, order="F")
elif v_type in ["Fx", "Fy", "Fz", "Ex", "Ey", "Ez"]:
aveOp = "ave" + v_type[0] + "2CCV"
# n = getattr(self, 'vn'+v_type[0])
# if 'x' in v_type: v = np.r_[v, np.zeros(n[1]), np.zeros(n[2])]
# if 'y' in v_type: v = np.r_[np.zeros(n[0]), v, np.zeros(n[2])]
# if 'z' in v_type: v = np.r_[np.zeros(n[0]), np.zeros(n[1]), v]
v = getattr(self, aveOp) * v # average to cell centers
ind_xyz = {"x": 0, "y": 1, "z": 2}[v_type[1]]
vc = self.reshape(v.reshape((self.nC, -1), order="F"), "CC", "CC", "M")[
ind_xyz
]
nCx, nCy, nCz = self.shape_cells
# determine number oE slices in x and y dimension
nX = int(np.ceil(np.sqrt(nCz)))
nY = int(np.ceil(nCz / nX))
# allocate space for montage
C = np.zeros((nX * nCx, nY * nCy))
for iy in range(int(nY)):
for ix in range(int(nX)):
iz = ix + iy * nX
if iz < nCz:
C[ix * nCx : (ix + 1) * nCx, iy * nCy : (iy + 1) * nCy] = vc[
:, :, iz
]
else:
C[ix * nCx : (ix + 1) * nCx, iy * nCy : (iy + 1) * nCy] = np.nan
C = np.ma.masked_where(np.isnan(C), C)
xx = np.r_[0, np.cumsum(np.kron(np.ones((nX, 1)), self.h[0]).ravel())]
yy = np.r_[0, np.cumsum(np.kron(np.ones((nY, 1)), self.h[1]).ravel())]
# Plot the mesh
ph = ax.pcolormesh(xx, yy, C.T, **pcolor_opts)
# Plot the lines
gx = np.arange(nX + 1) * (self.nodes_x[-1] - self.origin[0])
gy = np.arange(nY + 1) * (self.nodes_y[-1] - self.origin[1])
# Repeat and seperate with NaN
gxX = np.c_[gx, gx, gx + np.nan].ravel()
gxY = np.kron(
np.ones((nX + 1, 1)), np.array([0, sum(self.h[1]) * nY, np.nan])
).ravel()
gyX = np.kron(
np.ones((nY + 1, 1)), np.array([0, sum(self.h[0]) * nX, np.nan])
).ravel()
gyY = np.c_[gy, gy, gy + np.nan].ravel()
ax.plot(gxX, gxY, annotation_color + "-", linewidth=2)
ax.plot(gyX, gyY, annotation_color + "-", linewidth=2)
ax.axis("tight")
if numbering:
pad = np.sum(self.h[0]) * 0.04
for iy in range(int(nY)):
for ix in range(int(nX)):
iz = ix + iy * nX
if iz < nCz:
ax.text(
(ix + 1) * (self.nodes_x[-1] - self.origin[0]) - pad,
(iy) * (self.nodes_y[-1] - self.origin[1]) + pad,
"#{0:.0f}".format(iz),
color=annotation_color,
verticalalignment="bottom",
horizontalalignment="right",
size="x-large",
)
ax.set_title(v_type)
return (ph,)
def __plot_image_tensor2D(
self,
v,
v_type="CC",
grid=False,
view="real",
ax=None,
pcolor_opts=None,
stream_opts=None,
grid_opts=None,
range_x=None,
range_y=None,
sample_grid=None,
stream_threshold=None,
stream_thickness=None,
):
"""Common function for plotting an image of a TensorMesh"""
if ax is None:
plt.figure()
ax = plt.subplot(111)
else:
if not isinstance(ax, matplotlib.axes.Axes):
raise AssertionError("ax must be an matplotlib.axes.Axes")
# Reshape to a cell centered variable
if v_type == "CC":
pass
elif v_type == "CCv":
if view != "vec":
raise AssertionError("Other types for CCv not supported")
elif v_type in ["F", "E", "N"]:
aveOp = "ave" + v_type + ("2CCV" if view == "vec" else "2CC")
v = getattr(self, aveOp) * v # average to cell centers (might be a vector)
elif v_type in ["Fx", "Fy", "Ex", "Ey"]:
aveOp = "ave" + v_type[0] + "2CCV"
v = getattr(self, aveOp) * v # average to cell centers (might be a vector)
xORy = {"x": 0, "y": 1}[v_type[1]]
v = v.reshape((self.nC, -1), order="F")[:, xORy]
out = ()
if view in ["real", "imag", "abs"]:
v = self.reshape(v, "CC", "CC", "M")
v = getattr(np, view)(v) # e.g. np.real(v)
v = np.ma.masked_where(np.isnan(v), v)
out += (
ax.pcolormesh(
self.nodes_x,
self.nodes_y,
v.T,
**{**pcolor_opts, **grid_opts},
),
)
elif view in ["vec"]:
# Matplotlib seems to not support irregular
# spaced vectors at the moment. So we will
# Interpolate down to a regular mesh at the
# smallest mesh size in this 2D slice.
if sample_grid is not None:
hxmin = sample_grid[0]
hymin = sample_grid[1]
else:
hxmin = self.h[0].min()
hymin = self.h[1].min()
if range_x is not None:
dx = range_x[1] - range_x[0]
nxi = int(dx / hxmin)
hx = np.ones(nxi) * dx / nxi
origin_x = range_x[0]
else:
nxi = int(self.h[0].sum() / hxmin)
hx = np.ones(nxi) * self.h[0].sum() / nxi
origin_x = self.origin[0]
if range_y is not None:
dy = range_y[1] - range_y[0]
nyi = int(dy / hymin)
hy = np.ones(nyi) * dy / nyi
origin_y = range_y[0]
else:
nyi = int(self.h[1].sum() / hymin)
hy = np.ones(nyi) * self.h[1].sum() / nyi
origin_y = self.origin[1]
U, V = self.reshape(v.reshape((self.nC, -1), order="F"), "CC", "CC", "M")
tMi = self.__class__(h=[hx, hy], origin=np.r_[origin_x, origin_y])
P = self.get_interpolation_matrix(tMi.gridCC, "CC", zerosOutside=True)
Ui = tMi.reshape(P * mkvc(U), "CC", "CC", "M")
Vi = tMi.reshape(P * mkvc(V), "CC", "CC", "M")
# End Interpolation
x = self.nodes_x
y = self.nodes_y
if range_x is not None:
x = tMi.nodes_x
if range_y is not None:
y = tMi.nodes_y
if range_x is not None or range_y is not None: # use interpolated values
U = Ui
V = Vi
if stream_threshold is not None:
mask_me = np.sqrt(Ui ** 2 + Vi ** 2) <= stream_threshold
Ui = np.ma.masked_where(mask_me, Ui)
Vi = np.ma.masked_where(mask_me, Vi)
if stream_thickness is not None:
scaleFact = np.copy(stream_thickness)
# Calculate vector amplitude
vecAmp = np.sqrt(U ** 2 + V ** 2).T
# Form bounds to knockout the top and bottom 10%
vecAmp_sort = np.sort(vecAmp.ravel())
nVecAmp = vecAmp.size
tenPercInd = int(np.ceil(0.1 * nVecAmp))
lowerBound = vecAmp_sort[tenPercInd]
upperBound = vecAmp_sort[-tenPercInd]
lowInds = np.where(vecAmp < lowerBound)
vecAmp[lowInds] = lowerBound
highInds = np.where(vecAmp > upperBound)
vecAmp[highInds] = upperBound
# Normalize amplitudes 0-1
norm_thickness = vecAmp / vecAmp.max()
# Scale by user defined thickness factor
stream_thickness = scaleFact * norm_thickness
# Add linewidth to stream_opts
stream_opts.update({"linewidth": stream_thickness})
out += (
ax.pcolormesh(
x,
y,
np.sqrt(U ** 2 + V ** 2).T,
**{**pcolor_opts, **grid_opts},
),
)
out += (
ax.streamplot(
tMi.cell_centers_x,
tMi.cell_centers_y,
Ui.T,
Vi.T,
**stream_opts,
),
)
ax.set_xlabel("x")
ax.set_ylabel("y")
if range_x is not None:
ax.set_xlim(*range_x)
else:
ax.set_xlim(*self.nodes_x[[0, -1]])
if range_y is not None:
ax.set_ylim(*range_y)
else:
ax.set_ylim(*self.nodes_y[[0, -1]])
return out
def __plot_slice_tensor(
self,
v,
v_type="CC",
normal="z",
ind=None,
grid=False,
view="real",
ax=None,
pcolor_opts=None,
stream_opts=None,
grid_opts=None,
range_x=None,
range_y=None,
sample_grid=None,
stream_threshold=None,
stream_thickness=None,
**kwargs,
):
dim_ind = {"X": 0, "Y": 1, "Z": 2}[normal]
szSliceDim = self.shape_cells[dim_ind] #: Size of the sliced dimension
if ind is None:
ind = szSliceDim // 2
if not isinstance(ind, int):
raise TypeError("ind must be an integer")
def getIndSlice(v):
if normal == "X":
v = v[ind, :, :]
elif normal == "Y":
v = v[:, ind, :]
elif normal == "Z":
v = v[:, :, ind]
return v
def doSlice(v):
if v_type == "CC":
return getIndSlice(self.reshape(v, "CC", "CC", "M"))
elif v_type == "CCv":
if view != "vec":
raise AssertionError("Other types for CCv not supported")
else:
# Now just deal with 'F' and 'E' (x, y, z, maybe...)
aveOp = "ave" + v_type + ("2CCV" if view == "vec" else "2CC")
Av = getattr(self, aveOp)
if v.size == Av.shape[1]:
v = Av * v
else:
v = self.reshape(v, v_type[0], v_type) # get specific component
v = Av * v
# we should now be averaged to cell centers (might be a vector)
v = self.reshape(v.reshape((self.nC, -1), order="F"), "CC", "CC", "M")
if view == "vec":
outSlice = []
if "X" not in normal:
outSlice.append(getIndSlice(v[0]))
if "Y" not in normal:
outSlice.append(getIndSlice(v[1]))
if "Z" not in normal:
outSlice.append(getIndSlice(v[2]))
return np.r_[mkvc(outSlice[0]), mkvc(outSlice[1])]
else:
return getIndSlice(self.reshape(v, "CC", "CC", "M"))
h2d = []
x2d = []
if "X" not in normal:
h2d.append(self.h[0])
x2d.append(self.origin[0])
if "Y" not in normal:
h2d.append(self.h[1])
x2d.append(self.origin[1])
if "Z" not in normal:
h2d.append(self.h[2])
x2d.append(self.origin[2])
tM = self.__class__(h=h2d, origin=x2d) #: Temp Mesh
v2d = doSlice(v)
out = tM.__plot_image_tensor2D(
v2d,
v_type=("CCv" if view == "vec" else "CC"),
grid=grid,
view=view,
ax=ax,
pcolor_opts=pcolor_opts,
stream_opts=stream_opts,
grid_opts=grid_opts,
range_x=range_x,
range_y=range_y,
sample_grid=sample_grid,
stream_threshold=stream_threshold,
stream_thickness=stream_thickness,
)
ax.set_xlabel("y" if normal == "X" else "x")
ax.set_ylabel("y" if normal == "Z" else "z")
ax.set_title("Slice {0:.0f}".format(ind))
return out
# CylindricalMesh plotting
def __plotCylTensorMesh(self, plotType, *args, **kwargs):
if not self.is_symmetric:
raise NotImplementedError("We have not yet implemented this type of view.")
if plotType not in ["plot_image", "plot_grid"]:
raise TypeError("plotType must be either 'plot_grid' or 'plot_image'.")
if len(args) > 0:
val = args[0]
v_type = kwargs.get("v_type", None)
mirror = kwargs.pop("mirror", None)
mirror_data = kwargs.pop("mirror_data", None)
if mirror_data is not None and mirror is None:
mirror = True
if v_type is not None:
if v_type.upper() != "CCV":
if v_type.upper() == "F":
val = mkvc(self.aveF2CCV * val)
if mirror_data is not None:
mirror_data = mkvc(self.aveF2CCV * mirror_data)
kwargs["v_type"] = "CCv" # now the vector is cell centered
if v_type.upper() == "E":
val = mkvc(self.aveE2CCV * val)
if mirror_data is not None:
mirror_data = mkvc(self.aveE2CCV * mirror_data)
args = (val,) + args[1:]
if mirror:
# create a mirrored mesh
hx = np.hstack([np.flipud(self.h[0]), self.h[0]])
origin0 = self.origin[0] - self.h[0].sum()
M = discretize.TensorMesh([hx, self.h[2]], origin=[origin0, self.origin[2]])
if mirror_data is None:
mirror_data = val
if len(val) == self.nC: # only a single value at cell centers
val = val.reshape(self.vnC[0], self.vnC[2], order="F")
mirror_val = mirror_data.reshape(self.vnC[0], self.vnC[2], order="F")
val = mkvc(np.vstack([np.flipud(mirror_val), val]))
elif len(val) == 2 * self.nC:
val_x = val[: self.nC].reshape(self.vnC[0], self.vnC[2], order="F")
val_z = val[self.nC :].reshape(self.vnC[0], self.vnC[2], order="F")
mirror_x = mirror_data[: self.nC].reshape(
self.vnC[0], self.vnC[2], order="F"
)
mirror_z = mirror_data[self.nC :].reshape(
self.vnC[0], self.vnC[2], order="F"
)
val_x = mkvc(
np.vstack([-1.0 *
|
np.flipud(mirror_x)
|
numpy.flipud
|
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class Test_cutout(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked(self, mock_inplace):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
mock_inplace.return_value = "foo"
rng = iarandom.RNG(0)
image_aug = iaa.cutout(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="gaussian",
cval=1,
fill_per_channel=0.5,
seed=rng)
assert mock_inplace.call_count == 1
assert image_aug == "foo"
args = mock_inplace.call_args_list[0][0]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(args[1], 10)
assert np.isclose(args[2], 20)
assert np.isclose(args[3], 30)
assert np.isclose(args[4], 40)
assert args[5] == "gaussian"
assert args[6] == 1
assert np.isclose(args[7], 0.5)
assert args[8] is rng
class Test_cutout_(unittest.TestCase):
def test_with_simple_image(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
mask = np.zeros(image.shape, dtype=bool)
mask[20:40, 10:30, :] = True
overlap_inside = np.sum(image_aug[mask] == 0) / np.sum(mask)
overlap_outside = np.sum(image_aug[~mask] > 0) / np.sum(~mask)
assert image_aug is image
assert overlap_inside >= 1.0 - 1e-4
assert overlap_outside >= 1.0 - 1e-4
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_constant_")
def test_fill_mode_constant_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("constant", mock_fill)
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_gaussian_")
def test_fill_mode_gaussian_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("gaussian", mock_fill)
@classmethod
def _test_with_fill_mode_mocked(cls, fill_mode, mock_fill):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
mock_fill.return_value = image
seed = iarandom.RNG(0)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode=fill_mode,
cval=0,
fill_per_channel=False,
seed=seed)
assert mock_fill.call_count == 1
args = mock_fill.call_args_list[0][0]
kwargs = mock_fill.call_args_list[0][1]
assert image_aug is image
assert args[0] is image
assert kwargs["x1"] == 10
assert kwargs["y1"] == 20
assert kwargs["x2"] == 30
assert kwargs["y2"] == 40
assert kwargs["cval"] == 0
assert kwargs["per_channel"] is False
assert kwargs["random_state"] is seed
def test_zero_height(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=20,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_zero_height_width(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=10,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_fully_outside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-50,
y1=150,
x2=-1,
y2=200,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_partially_inside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=-25,
y1=-25,
x2=25,
y2=25,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.all(image_aug[0:25, 0:25] == 0)
assert np.all(image_aug[0:25, 25:] > 0)
assert np.all(image_aug[25:, :] > 0)
def test_zero_sized_axes(self):
shapes = [(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0, 1, 1),
(1, 1, 0),
(1, 0, 1),
(1, 0),
(0, 1),
(0, 0)]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-5,
y1=-5,
x2=5,
y2=5,
fill_mode="constant",
cval=0)
assert np.array_equal(image_aug, image_cp)
class Test_fill_rectangle_gaussian_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=20,
x2=60,
y2=70,
cval=0,
per_channel=False,
random_state=rng)
assert np.array_equal(image_aug[:20, :],
image_cp[:20, :])
assert not np.array_equal(image_aug[20:70, 10:60],
image_cp[20:70, 10:60])
assert np.isclose(np.average(image_aug[20:70, 10:60]), 127.5,
rtol=0, atol=5.0)
assert np.isclose(np.std(image_aug[20:70, 10:60]), 255.0/2.0/3.0,
rtol=0, atol=2.5)
def test_per_channel(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=False,
random_state=iarandom.RNG(0))
image_aug_pc = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
diff11 = (image_aug[..., 0] != image_aug[..., 1])
diff12 = (image_aug[..., 0] != image_aug[..., 2])
diff21 = (image_aug_pc[..., 0] != image_aug_pc[..., 1])
diff22 = (image_aug_pc[..., 0] != image_aug_pc[..., 2])
assert not np.any(diff11)
assert not np.any(diff12)
assert np.any(diff21)
assert np.any(diff22)
def test_deterministic_with_same_seed(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug_pc1 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc2 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc3 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(1))
assert np.array_equal(image_aug_pc1, image_aug_pc2)
assert not np.array_equal(image_aug_pc2, image_aug_pc3)
def test_no_channels(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = image.reshape((1, 10))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=per_channel,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 3, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(nb_channels=nb_channels,
per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)),
(1, 1, nb_channels))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=10,
x2=300-10,
y2=300-10,
cval=0,
per_channel=per_channel,
random_state=rng)
rect = image_aug[10:-10, 10:-10]
p_true = np.sum(rect) / rect.size
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect, image_cp[10:-10, 10:-10])
assert np.isclose(p_true, 0.5, rtol=0, atol=0.1)
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_int_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = int(max_value) - int(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect,
image_cp[10:-10, 10:-10])
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
min_value = 0.0
center_value = 0.5
max_value = 1.0
dynamic_range = np.float128(max_value) - np.float128(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert np.any(np.isclose(image, min_value,
rtol=0, atol=1e-4))
assert np.any(np.isclose(image, max_value,
rtol=0, atol=1e-4))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
assert not np.allclose(rect, image_cp[10:-10, 10:-10],
rtol=0, atol=1e-4)
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.allclose(image_aug[..., 0],
image_aug[..., c],
rtol=0, atol=1e-4)
class Test_fill_rectangle_constant_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_but_per_channel_is_false(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 25)
def test_iterable_cval_with_per_channel_is_true_channel_mismatch(self):
image = np.mod(np.arange(100*100*5), 256).astype(np.uint8).reshape(
(100, 100, 5))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
assert np.all(image_aug[20:70, 10:60, 3] == 21)
assert np.all(image_aug[20:70, 10:60, 4] == 17)
def test_single_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 17)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
def test_no_channels_single_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=per_channel, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_no_channels_iterable_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100*nb_channels), 256
).astype(np.uint8).reshape((100, 100, nb_channels))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
if per_channel:
for c in np.arange(nb_channels):
val = 17 if c % 2 == 0 else 21
assert np.all(image_aug[20:70, 10:60, c] == val)
else:
assert np.all(image_aug[20:70, 10:60, :] == 17)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[0, 1], per_channel=per_channel,
random_state=None)
rect = image_aug[10:-10, 10:-10]
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0] == 0)
assert np.all(image_aug[10:-10, 10:-10, 1] == 1)
assert np.all(image_aug[10:-10, 10:-10, 2] == 0)
else:
assert np.all(image_aug[20:70, 10:60] == 0)
def test_other_dtypes_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0]
== min_value)
assert np.all(image_aug[10:-10, 10:-10, 1]
== 10)
assert np.all(image_aug[10:-10, 10:-10, 2]
== max_value)
else:
assert np.all(image_aug[-10:-10, 10:-10] == min_value)
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
# Use this here instead of any(isclose(...)) because
# the latter one leads to overflow warnings.
assert image.flat[0] <= np.float128(min_value) + 1.0
assert image.flat[4] >= np.float128(max_value) - 1.0
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert image_aug.dtype.name == dtype
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
if per_channel:
assert np.allclose(image_aug[10:-10, 10:-10, 0],
np.float128(min_value),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 1],
np.float128(10),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 2],
np.float128(max_value),
rtol=0, atol=1e-4)
else:
assert np.allclose(image_aug[-10:-10, 10:-10],
np.float128(min_value),
rtol=0, atol=1e-4)
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestCutout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Cutout()
assert aug.nb_iterations.value == 1
assert isinstance(aug.position[0], iap.Uniform)
assert isinstance(aug.position[1], iap.Uniform)
assert np.isclose(aug.size.value, 0.2)
assert aug.squared.value == 1
assert aug.fill_mode.value == "constant"
assert aug.cval.value == 128
assert aug.fill_per_channel.value == 0
def test___init___custom(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
assert aug.nb_iterations.value == 1
assert np.isclose(aug.position[0].value, 0.5)
assert np.isclose(aug.position[1].value, 0.5)
assert np.isclose(aug.size.value, 0.1)
assert np.isclose(aug.squared.p.value, 0.6)
assert aug.fill_mode.a == ["gaussian", "constant"]
assert np.isclose(aug.cval.a.value, 0)
assert np.isclose(aug.cval.b.value, 255)
assert np.isclose(aug.fill_per_channel.p.value, 0.5)
def test___init___fill_mode_is_stochastic_param(self):
param = iap.Deterministic("constant")
aug = iaa.Cutout(fill_mode=param)
assert aug.fill_mode is param
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_false(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=False,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.2*30))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.2*30))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_true(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=True,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.1*10))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.1*10))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
def test_simple_image(self):
aug = iaa.Cutout(nb_iterations=2,
position=(
iap.DeterministicList([0.2, 0.8]),
iap.DeterministicList([0.2, 0.8])
),
size=0.2,
fill_mode="constant",
cval=iap.DeterministicList([0, 0, 0, 1, 1, 1]))
image = np.full((100, 100, 3), 255, dtype=np.uint8)
for _ in np.arange(3):
images_aug = aug(images=[image, image])
for image_aug in images_aug:
values = np.unique(image_aug)
assert len(values) == 3
assert 0 in values
assert 1 in values
assert 255 in values
def test_batch_contains_only_non_image_data(self):
aug = iaa.Cutout()
segmap_arr = np.ones((3, 3, 1), dtype=np.int32)
segmap = ia.SegmentationMapsOnImage(segmap_arr, shape=(3, 3, 3))
segmap_aug = aug.augment_segmentation_maps(segmap)
assert np.array_equal(segmap.get_arr(), segmap_aug.get_arr())
def test_sampling_when_position_is_stochastic_parameter(self):
# sampling of position works slightly differently when it is a single
# parameter instead of tuple (paramX, paramY), so we have an extra
# test for that situation here
param = iap.DeterministicList([0.5, 0.6])
aug = iaa.Cutout(position=param)
samples = aug._draw_samples([
np.zeros((3, 3, 3), dtype=np.uint8),
np.zeros((3, 3, 3), dtype=np.uint8)
], iarandom.RNG(0))
assert np.allclose(samples.pos_x, [0.5, 0.5])
assert np.allclose(samples.pos_y, [0.6, 0.6])
def test_by_comparison_to_official_implementation(self):
image = np.ones((10, 8, 2), dtype=np.uint8)
aug = iaa.Cutout(1, position="uniform", size=0.2, squared=True,
cval=0)
aug_official = _CutoutOfficial(n_holes=1, length=int(10*0.2))
dropped = np.zeros((10, 8, 2), dtype=np.int32)
dropped_official = np.copy(dropped)
height = np.zeros((10, 8, 2), dtype=np.int32)
width = np.copy(height)
height_official = np.copy(height)
width_official = np.copy(width)
nb_iterations = 3 * 1000
images_aug = aug(images=[image] * nb_iterations)
for image_aug in images_aug:
image_aug_off = aug_official(image)
mask = (image_aug == 0)
mask_off = (image_aug_off == 0)
dropped += mask
dropped_official += mask_off
ydrop = np.max(mask, axis=(2, 1))
xdrop = np.max(mask, axis=(2, 0))
wx = np.where(xdrop)
wy = np.where(ydrop)
x1 = wx[0][0]
x2 = wx[0][-1]
y1 = wy[0][0]
y2 = wy[0][-1]
ydrop_off = np.max(mask_off, axis=(2, 1))
xdrop_off = np.max(mask_off, axis=(2, 0))
wx_off = np.where(xdrop_off)
wy_off = np.where(ydrop_off)
x1_off = wx_off[0][0]
x2_off = wx_off[0][-1]
y1_off = wy_off[0][0]
y2_off = wy_off[0][-1]
height += (
np.full(height.shape, 1 + (y2 - y1), dtype=np.int32)
* mask)
width += (
np.full(width.shape, 1 + (x2 - x1), dtype=np.int32)
* mask)
height_official += (
np.full(height_official.shape, 1 + (y2_off - y1_off),
dtype=np.int32)
* mask_off)
width_official += (
np.full(width_official.shape, 1 + (x2_off - x1_off),
dtype=np.int32)
* mask_off)
dropped_prob = dropped / nb_iterations
dropped_prob_off = dropped_official / nb_iterations
height_avg = height / (dropped + 1e-4)
height_avg_off = height_official / (dropped_official + 1e-4)
width_avg = width / (dropped + 1e-4)
width_avg_off = width_official / (dropped_official + 1e-4)
prob_max_diff = np.max(np.abs(dropped_prob - dropped_prob_off))
height_avg_max_diff = np.max(np.abs(height_avg - height_avg_off))
width_avg_max_diff = np.max(np.abs(width_avg - width_avg_off))
assert prob_max_diff < 0.04
assert height_avg_max_diff < 0.3
assert width_avg_max_diff < 0.3
def test_determinism(self):
aug = iaa.Cutout(nb_iterations=(1, 3),
size=(0.1, 0.2),
fill_mode=["gaussian", "constant"],
cval=(0, 255))
image = np.mod(
np.arange(100*100*3), 256
).reshape((100, 100, 3)).astype(np.uint8)
sums = []
for _ in np.arange(10):
aug_det = aug.to_deterministic()
image_aug1 = aug_det(image=image)
image_aug2 = aug_det(image=image)
assert np.array_equal(image_aug1, image_aug2)
sums.append(np.sum(image_aug1))
assert len(np.unique(sums)) > 1
def test_get_parameters(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
params = aug.get_parameters()
assert params[0] is aug.nb_iterations
assert params[1] is aug.position
assert params[2] is aug.size
assert params[3] is aug.squared
assert params[4] is aug.fill_mode
assert params[5] is aug.cval
assert params[6] is aug.fill_per_channel
def test_pickleable(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
runtest_pickleable_uint8_img(aug)
# this is mostly copy-pasted cutout code from
# https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
# we use this to compare our implementation against
# we changed some pytorch to numpy stuff
class _CutoutOfficial(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of
it.
"""
# h = img.size(1)
# w = img.size(2)
h = img.shape[0]
w = img.shape[1]
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
# note that in the paper they normalize to 0-mean,
# i.e. 0 here is actually not black but grayish pixels
mask[y1: y2, x1: x2] = 0
# mask = torch.from_numpy(mask)
# mask = mask.expand_as(img)
if img.ndim != 2:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, img.shape[-1]))
img = img * mask
return img
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarsePepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert
|
np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
|
numpy.allclose
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 3 14:18:22 2018
@author: <NAME>
Preprocessing functions helper
"""
import numpy as np
import itertools
from functionhelper.bunchdatatype import Bunch
dtype = np.float64
def normalize(A, new_range, old_range = None):
"""
Normalize the input dataset
INPUT
A Original dataset (numpy array) [rows are samples, cols are features]
new_range The range of data after normalizing
old_range The old range of data before normalizing
OUTPUT
Normalized dataset
"""
D = A.copy()
n, m = D.shape
for i in range(m):
v = D[:, i]
if old_range is None:
minv = np.nanmin(v)
maxv = np.nanmax(v)
else:
minv = old_range[0]
maxv = old_range[1]
if minv == maxv:
v = np.ones(n) * 0.5;
else:
v = new_range[0] + (new_range[1] - new_range[0]) * (v - minv) / (maxv - minv)
D[:, i] = v;
return D
def load_dataset(path, percentTr, isNorm = False, new_range = [0, 1], old_range = None, class_col = -1):
"""
Load file containing dataset and convert data in the file to training and testing datasets. Class labels are located in the last column in the file
Note: Missing value in the input file must be question sign ?
Xtr, Xtest, patClassIdTr, patClassIdTest = load_dataset(path, percentTr, True, [0, 1])
INPUT
path the path to the data file (including file name)
percentTr the percentage of data used for training (0 <= percentTr <= 1)
isNorm identify whether normalizing datasets or not, True => Normalized
new_range new range of datasets after normalization
old_range the range of original datasets before normalization (all features use the same range)
class_col -1: the class label is the last column in the dataset
otherwise: the class label is the first column in the dataset
OUTPUT
Xtr Training dataset
Xtest Testing dataset
patClassIdTr Training class labels
patClassIdTest Testing class labels
"""
lstData = []
with open(path) as f:
for line in f:
nums = np.fromstring(line.rstrip('\n').replace(',', ' ').replace('?', 'nan'), dtype=dtype, sep=' ').tolist()
if len(nums) > 0:
lstData.append(nums)
# if (a.size == 0):
# a = nums.reshape(1, -1)
# else:
# a = np.concatenate((a, nums.reshape(1, -1)), axis=0)
A = np.array(lstData, dtype=dtype)
YA, XA = A.shape
if class_col == -1:
X_data = A[:, 0:XA-1]
classId_dat = A[:, -1]
else:
# class label is the first column
X_data = A[:, 1:]
classId_dat = A[:, 0]
classLabels = np.unique(classId_dat)
# class labels must start from 1, class label = 0 means no label
if classLabels.size > 1 and np.size(np.nonzero(classId_dat < 1)) > 0:
classId_dat = classId_dat + 1 + np.min(classId_dat)
classLabels = classLabels + 1 + np.min(classLabels)
if isNorm:
X_data = normalize(X_data, new_range, old_range)
if percentTr != 1 and percentTr != 0:
noClasses = classLabels.size
Xtr = np.empty((0, XA - 1), dtype=dtype)
Xtest = np.empty((0, XA - 1), dtype=dtype)
patClassIdTr = np.array([], dtype=np.int64)
patClassIdTest = np.array([], dtype=np.int64)
for k in range(noClasses):
idx = np.nonzero(classId_dat == classLabels[k])[0]
# randomly shuffle indices of elements belonging to class classLabels[k]
if percentTr != 1 and percentTr != 0:
idx = idx[np.random.permutation(len(idx))]
noTrain = int(len(idx) * percentTr + 0.5)
# Attach data of class k to corresponding datasets
Xtr_tmp = X_data[idx[0:noTrain], :]
Xtr = np.concatenate((Xtr, Xtr_tmp), axis=0)
patClassId_tmp = np.full(noTrain, classLabels[k], dtype=np.int64)
patClassIdTr = np.append(patClassIdTr, patClassId_tmp)
patClassId_tmp = np.full(len(idx) - noTrain, classLabels[k], dtype=np.int64)
Xtest = np.concatenate((Xtest, X_data[idx[noTrain:len(idx)], :]), axis=0)
patClassIdTest = np.concatenate((patClassIdTest, patClassId_tmp))
else:
if percentTr == 1:
Xtr = X_data
patClassIdTr = np.array(classId_dat, dtype=np.int64)
Xtest = np.array([])
patClassIdTest = np.array([])
else:
Xtr = np.array([])
patClassIdTr = np.array([])
Xtest = X_data
patClassIdTest = np.array(classId_dat, dtype=np.int64)
return (Xtr, Xtest, patClassIdTr, patClassIdTest)
def load_dataset_without_class_label(path, percentTr, isNorm = False, new_range = [0, 1]):
"""
Load file containing dataset without class label and convert data in the file to training and testing datasets.
Xtr, Xtest = load_dataset_without_class_label(path, percentTr, True, [0, 1])
INPUT
path the path to the data file (including file name)
percentTr the percentage of data used for training (0 <= percentTr <= 1)
isNorm identify whether normalizing datasets or not, True => Normalized
new_range new range of datasets after normalization
OUTPUT
Xtr Training dataset
Xtest Testing dataset
"""
lstData = []
with open(path) as f:
for line in f:
nums = np.fromstring(line.rstrip('\n').replace(',', ' '), dtype=dtype, sep=' ').tolist()
if len(nums) > 0:
lstData.append(nums)
# if (X_data.size == 0):
# X_data = nums.reshape(1, -1)
# else:
# X_data = np.concatenate((X_data, nums.reshape(1, -1)), axis = 0)
X_data = np.array(lstData, dtype=dtype)
if isNorm:
X_data = normalize(X_data, new_range)
# randomly shuffle indices of elements in the dataset
numSamples = X_data.shape[0]
newInds =
|
np.random.permutation(numSamples)
|
numpy.random.permutation
|
import numpy as np
import pandas as pd
def check_types(X, y, formula):
if not formula:
if not isinstance(X, np.ndarray):
raise TypeError(f"X must be a numpy array if no formula is supplied, "
"got a {type(X)}")
if not isinstance(y, np.ndarray):
raise TypeError(f"y must be a numpy array if no formula is supplied, "
"got a {type(y)}")
if formula:
if not isinstance(X, pd.DataFrame):
raise TypeError(f"X must be a DataFrame if a formula is supplied, "
"got a {type(X)}")
def is_commensurate(X, y):
return X.shape[0] == y.shape[0]
def check_commensurate(X, y):
if not is_commensurate(X, y):
raise ValueError("X and y are not commensurate.")
def has_intercept_column(X):
return np.all(X[:, 0] == 1.0)
def check_intercept(X):
if not has_intercept_column(X):
raise ValueError("First column in matrix X is not an intercept.")
def has_same_length(v, w):
return v.shape[0] == w.shape[0]
def check_offset(y, offset):
if not has_same_length(y, offset):
raise ValueError("Offset array and y are not the same length.")
def check_sample_weights(y, sample_weights):
if not has_same_length(y, sample_weights):
raise ValueError("Sample weights array and y are not the same length.")
def has_converged(loss, loss_prev, tol):
if loss_prev == np.inf:
return False
rel_change =
|
np.abs((loss - loss_prev) / loss_prev)
|
numpy.abs
|
"""Model evaluation.
This module can be used to evaluate any kind of weather model (machine learning,
NWP, heuristics, human forecasting, etc.). This module is completely agnostic
of where the forecasts come from.
--- REFERENCES ---
<NAME>., 2009: Visualizing multiple measures of forecast quality. Weather
and Forecasting, 24 (2), 601-608.
<NAME>., <NAME>., and <NAME>., 2017: Machine learning for real-
time prediction of damaging straight-line convective wind. Weather and
Forecasting, 2017, in press.
"""
import copy
import pickle
import os.path
import numpy
import pandas
from matplotlib import pyplot
import sklearn.metrics
from gewittergefahr.gg_utils import histograms
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import prediction_io
from gewittergefahr.deep_learning import deep_learning_utils as dl_utils
# TODO(thunderhoser): This file works for binary classification only.
TOLERANCE = 1e-6
DUMMY_TARGET_NAME = 'tornado_lead-time=0000-3600sec_distance=00000-10000m'
MIN_PROB_FOR_XENTROPY = numpy.finfo(float).eps
MAX_PROB_FOR_XENTROPY = 1. - numpy.finfo(float).eps
MIN_OPTIMIZATION_STRING = 'min'
MAX_OPTIMIZATION_STRING = 'max'
VALID_OPTIMIZATION_STRINGS = [
MIN_OPTIMIZATION_STRING, MAX_OPTIMIZATION_STRING
]
NUM_TRUE_POSITIVES_KEY = 'num_true_positives'
NUM_FALSE_POSITIVES_KEY = 'num_false_positives'
NUM_FALSE_NEGATIVES_KEY = 'num_false_negatives'
NUM_TRUE_NEGATIVES_KEY = 'num_true_negatives'
BSS_KEY = 'brier_skill_score'
BRIER_SCORE_KEY = 'brier_score'
RESOLUTION_KEY = 'resolution'
RELIABILITY_KEY = 'reliability'
UNCERTAINTY_KEY = 'uncertainty'
POD_BY_THRESHOLD_KEY = 'pod_by_threshold'
POFD_BY_THRESHOLD_KEY = 'pofd_by_threshold'
SR_BY_THRESHOLD_KEY = 'success_ratio_by_threshold'
MEAN_FORECAST_BY_BIN_KEY = 'mean_forecast_by_bin'
EVENT_FREQ_BY_BIN_KEY = 'event_frequency_by_bin'
FORECAST_PROBABILITIES_KEY = 'forecast_probabilities'
OBSERVED_LABELS_KEY = 'observed_labels'
BEST_THRESHOLD_KEY = 'best_prob_threshold'
ALL_THRESHOLDS_KEY = 'all_prob_thresholds'
NUM_EXAMPLES_BY_BIN_KEY = 'num_examples_by_forecast_bin'
DOWNSAMPLING_DICT_KEY = 'downsampling_dict'
EVALUATION_TABLE_KEY = 'evaluation_table'
POD_KEY = 'pod'
POFD_KEY = 'pofd'
SUCCESS_RATIO_KEY = 'success_ratio'
FOCN_KEY = 'focn'
ACCURACY_KEY = 'accuracy'
CSI_KEY = 'csi'
FREQUENCY_BIAS_KEY = 'frequency_bias'
PEIRCE_SCORE_KEY = 'peirce_score'
HEIDKE_SCORE_KEY = 'heidke_score'
AUC_KEY = 'auc'
AUPD_KEY = 'aupd'
EVALUATION_TABLE_COLUMNS = [
NUM_TRUE_POSITIVES_KEY, NUM_FALSE_POSITIVES_KEY, NUM_FALSE_NEGATIVES_KEY,
NUM_TRUE_NEGATIVES_KEY, POD_KEY, POFD_KEY, SUCCESS_RATIO_KEY, FOCN_KEY,
ACCURACY_KEY, CSI_KEY, FREQUENCY_BIAS_KEY, PEIRCE_SCORE_KEY,
HEIDKE_SCORE_KEY, POD_BY_THRESHOLD_KEY, POFD_BY_THRESHOLD_KEY, AUC_KEY,
SR_BY_THRESHOLD_KEY, AUPD_KEY, MEAN_FORECAST_BY_BIN_KEY,
EVENT_FREQ_BY_BIN_KEY, RELIABILITY_KEY, RESOLUTION_KEY, BSS_KEY
]
EVALUATION_DICT_KEYS = [
FORECAST_PROBABILITIES_KEY, OBSERVED_LABELS_KEY, BEST_THRESHOLD_KEY,
ALL_THRESHOLDS_KEY, NUM_EXAMPLES_BY_BIN_KEY, DOWNSAMPLING_DICT_KEY,
EVALUATION_TABLE_KEY
]
MIN_BINARIZATION_THRESHOLD = 0.
MAX_BINARIZATION_THRESHOLD = 1. + TOLERANCE
DEFAULT_NUM_RELIABILITY_BINS = 20
DEFAULT_FORECAST_PRECISION = 1e-4
THRESHOLD_ARG_FOR_UNIQUE_FORECASTS = 'unique_forecasts'
DEFAULT_GRID_SPACING = 0.01
FIGURE_WIDTH_INCHES = 15
FIGURE_HEIGHT_INCHES = 15
DEFAULT_COLOUR_MAP_OBJECT = pyplot.get_cmap('plasma')
def _check_forecast_probs_and_observed_labels(
forecast_probabilities, observed_labels):
"""Error-checks forecast probabilities and observed labels.
N = number of forecasts
:param forecast_probabilities: length-N numpy array with forecast
probabilities of some event (e.g., tornado).
:param observed_labels: length-N integer numpy array of observed labels
(1 for "yes", 0 for "no").
"""
error_checking.assert_is_numpy_array(
forecast_probabilities, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(forecast_probabilities, 0.)
error_checking.assert_is_leq_numpy_array(forecast_probabilities, 1.)
num_forecasts = len(forecast_probabilities)
expected_dim = numpy.array([num_forecasts], dtype=int)
error_checking.assert_is_integer_numpy_array(observed_labels)
error_checking.assert_is_numpy_array(
observed_labels, exact_dimensions=expected_dim)
error_checking.assert_is_geq_numpy_array(observed_labels, 0)
error_checking.assert_is_leq_numpy_array(observed_labels, 1)
def _check_forecast_and_observed_labels(forecast_labels, observed_labels):
"""Error-checks forecast and observed labels.
N = number of forecasts
:param forecast_labels: length-N integer numpy array of forecast labels
(1 for "yes", 0 for "no").
:param observed_labels: Same but for observed labels.
"""
error_checking.assert_is_integer_numpy_array(forecast_labels)
error_checking.assert_is_numpy_array(forecast_labels, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(forecast_labels, 0)
error_checking.assert_is_leq_numpy_array(forecast_labels, 1)
num_forecasts = len(forecast_labels)
expected_dim = numpy.array([num_forecasts], dtype=int)
error_checking.assert_is_integer_numpy_array(observed_labels)
error_checking.assert_is_numpy_array(
observed_labels, exact_dimensions=expected_dim)
error_checking.assert_is_geq_numpy_array(observed_labels, 0)
error_checking.assert_is_leq_numpy_array(observed_labels, 1)
def _pad_binarization_thresholds(thresholds):
"""Pads an array of binarization thresholds.
Specifically, this method ensures that the array contains 0 and a number
slightly greater than 1. This ensures that:
[1] For the lowest threshold, POD = POFD = 1, which is the top-right corner
of the ROC curve.
[2] For the highest threshold, POD = POFD = 0, which is the bottom-left
corner of the ROC curve.
:param thresholds: 1-D numpy array of binarization thresholds.
:return: thresholds: 1-D numpy array of binarization thresholds (possibly
with new elements).
"""
thresholds = numpy.sort(thresholds)
if thresholds[0] > MIN_BINARIZATION_THRESHOLD:
thresholds = numpy.concatenate((
numpy.array([MIN_BINARIZATION_THRESHOLD]), thresholds
))
if thresholds[-1] < MAX_BINARIZATION_THRESHOLD:
thresholds = numpy.concatenate((
thresholds, numpy.array([MAX_BINARIZATION_THRESHOLD])
))
return thresholds
def _split_forecast_probs_into_bins(forecast_probabilities, num_bins):
"""Splits forecast probabilities into bins.
N = number of forecasts
:param forecast_probabilities: length-N numpy array of forecast
probabilities.
:param num_bins: Number of bins into which forecasts will be discretized.
:return: bin_index_by_forecast: length-N numpy array of indices. If
bin_index_by_forecast[i] = j, the [i]th forecast belongs in the [j]th
bin.
"""
return histograms.create_histogram(
input_values=forecast_probabilities, num_bins=num_bins, min_value=0.,
max_value=1.
)[0]
def get_binarization_thresholds(
threshold_arg, forecast_probabilities=None,
forecast_precision=DEFAULT_FORECAST_PRECISION):
"""Returns list of binarization thresholds.
To understand the role of binarization thresholds, see
binarize_forecast_probs.
:param threshold_arg: Main threshold argument. May be in one of 3 formats.
[1] threshold_arg = "unique_forecasts". In this case all unique forecast
probabilities will become binarization thresholds.
[2] 1-D numpy array. In this case threshold_arg will be treated as an array
of binarization thresholds.
[3] Positive integer. In this case threshold_arg will be treated as the
number of binarization thresholds, equally spaced from 0...1.
:param forecast_probabilities:
[used only if threshold_arg = "unique_forecasts"]
1-D numpy array of forecast probabilities to binarize.
:param forecast_precision:
[used only if threshold_arg = "unique_forecasts"]
Before computing unique forecast probabilities, they will all be rounded
to the nearest `forecast_precision`. This prevents the number of
thresholds from becoming ridiculous (millions).
:return: binarization_thresholds: 1-D numpy array of binarization
thresholds.
:raises: ValueError: if threshold_arg cannot be interpreted.
"""
if isinstance(threshold_arg, str):
if threshold_arg != THRESHOLD_ARG_FOR_UNIQUE_FORECASTS:
error_string = (
'If string, threshold_arg must be "{0:s}". Instead, got '
'"{1:s}".'
).format(THRESHOLD_ARG_FOR_UNIQUE_FORECASTS, threshold_arg)
raise ValueError(error_string)
error_checking.assert_is_geq(forecast_precision, 0.)
error_checking.assert_is_leq(forecast_precision, 0.01)
binarization_thresholds = numpy.unique(rounder.round_to_nearest(
forecast_probabilities + 0., forecast_precision
))
elif isinstance(threshold_arg, numpy.ndarray):
binarization_thresholds = copy.deepcopy(threshold_arg)
error_checking.assert_is_numpy_array(
binarization_thresholds, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(
binarization_thresholds, MIN_BINARIZATION_THRESHOLD)
error_checking.assert_is_leq_numpy_array(
binarization_thresholds, MAX_BINARIZATION_THRESHOLD)
else:
num_thresholds = copy.deepcopy(threshold_arg)
error_checking.assert_is_integer(num_thresholds)
error_checking.assert_is_geq(num_thresholds, 2)
binarization_thresholds = numpy.linspace(
0, 1, num=num_thresholds, dtype=float)
return _pad_binarization_thresholds(binarization_thresholds)
def binarize_forecast_probs(forecast_probabilities, binarization_threshold):
"""Binarizes probabilistic forecasts, turning them into deterministic ones.
N = number of forecasts
:param forecast_probabilities: length-N numpy array with forecast
probabilities of some event (e.g., tornado).
:param binarization_threshold: Binarization threshold (f*). All forecasts
>= f* will be turned into "yes" forecasts; all forecasts < f* will be
turned into "no".
:return: forecast_labels: length-N integer numpy array of deterministic
forecasts (1 for "yes", 0 for "no").
"""
error_checking.assert_is_numpy_array(
forecast_probabilities, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(forecast_probabilities, 0.)
error_checking.assert_is_leq_numpy_array(forecast_probabilities, 1.)
error_checking.assert_is_geq(
binarization_threshold, MIN_BINARIZATION_THRESHOLD)
error_checking.assert_is_leq(
binarization_threshold, MAX_BINARIZATION_THRESHOLD)
forecast_labels = numpy.full(len(forecast_probabilities), 0, dtype=int)
forecast_labels[forecast_probabilities >= binarization_threshold] = 1
return forecast_labels
def find_best_binarization_threshold(
forecast_probabilities, observed_labels, threshold_arg,
criterion_function, optimization_direction=MAX_OPTIMIZATION_STRING,
forecast_precision=DEFAULT_FORECAST_PRECISION):
"""Finds the best binarization threshold.
:param forecast_probabilities: See documentation for
`_check_forecast_probs_and_observed_labels`.
:param observed_labels: See doc for
`_check_forecast_probs_and_observed_labels`.
:param threshold_arg: See doc for `get_binarization_thresholds`.
:param criterion_function: Criterion to be either minimized or maximized.
This must be a function that takes input `contingency_table_as_dict` and
returns a single float. See `get_csi` in this module for an example.
:param optimization_direction: Direction in which criterion function is
optimized. Options are "min" and "max".
:param forecast_precision: See doc for `get_binarization_thresholds`.
:return: best_threshold: Best binarization threshold.
:return: best_criterion_value: Value of criterion function at said
threshold.
:raises: ValueError: if `optimization_direction not in
VALID_OPTIMIZATION_STRINGS`.
"""
error_checking.assert_is_string(optimization_direction)
if optimization_direction not in VALID_OPTIMIZATION_STRINGS:
error_string = (
'\n\n{0:s}\nValid optimization directions (listed above) do not '
'include "{1:s}".'
).format(str(VALID_OPTIMIZATION_STRINGS), optimization_direction)
raise ValueError(error_string)
possible_thresholds = get_binarization_thresholds(
threshold_arg=threshold_arg,
forecast_probabilities=forecast_probabilities,
forecast_precision=forecast_precision)
num_thresholds = len(possible_thresholds)
criterion_values = numpy.full(num_thresholds, numpy.nan)
for i in range(num_thresholds):
these_forecast_labels = binarize_forecast_probs(
forecast_probabilities=forecast_probabilities,
binarization_threshold=possible_thresholds[i]
)
this_contingency_table_as_dict = get_contingency_table(
forecast_labels=these_forecast_labels,
observed_labels=observed_labels)
criterion_values[i] = criterion_function(this_contingency_table_as_dict)
if optimization_direction == MAX_OPTIMIZATION_STRING:
best_criterion_value = numpy.nanmax(criterion_values)
best_probability_threshold = possible_thresholds[
numpy.nanargmax(criterion_values)
]
else:
best_criterion_value = numpy.nanmin(criterion_values)
best_probability_threshold = possible_thresholds[
numpy.nanargmin(criterion_values)
]
return best_probability_threshold, best_criterion_value
def get_contingency_table(forecast_labels, observed_labels):
"""Computes contingency table.
N = number of forecasts
:param forecast_labels: See documentation for
_check_forecast_and_observed_labels.
:param observed_labels: See doc for _check_forecast_and_observed_labels.
:return: contingency_table_as_dict: Dictionary with the following keys.
contingency_table_as_dict['num_true_positives']: Number of true positives.
contingency_table_as_dict['num_false_positives']: Number of false positives.
contingency_table_as_dict['num_false_negatives']: Number of false negatives.
contingency_table_as_dict['num_true_negatives']: Number of true negatives.
"""
_check_forecast_and_observed_labels(forecast_labels, observed_labels)
true_positive_indices = numpy.where(numpy.logical_and(
forecast_labels == 1, observed_labels == 1
))[0]
false_positive_indices = numpy.where(numpy.logical_and(
forecast_labels == 1, observed_labels == 0
))[0]
false_negative_indices = numpy.where(numpy.logical_and(
forecast_labels == 0, observed_labels == 1
))[0]
true_negative_indices = numpy.where(numpy.logical_and(
forecast_labels == 0, observed_labels == 0
))[0]
return {
NUM_TRUE_POSITIVES_KEY: len(true_positive_indices),
NUM_FALSE_POSITIVES_KEY: len(false_positive_indices),
NUM_FALSE_NEGATIVES_KEY: len(false_negative_indices),
NUM_TRUE_NEGATIVES_KEY: len(true_negative_indices)
}
def get_pod(contingency_table_as_dict):
"""Computes POD (probability of detection).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: probability_of_detection: POD.
"""
denominator = (
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY]
)
if denominator == 0:
return numpy.nan
numerator = float(contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY])
return numerator / denominator
def get_fom(contingency_table_as_dict):
"""Computes FOM (frequency of misses).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: frequency_of_misses: FOM.
"""
return 1. - get_pod(contingency_table_as_dict)
def get_pofd(contingency_table_as_dict):
"""Computes POFD (probability of false detection).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: probability_of_false_detection: POFD.
"""
denominator = (
contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_TRUE_NEGATIVES_KEY]
)
if denominator == 0:
return numpy.nan
numerator = float(contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY])
return numerator / denominator
def get_npv(contingency_table_as_dict):
"""Computes NPV (negative predictive value).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: negative_predictive_value: NPV.
"""
return 1. - get_pofd(contingency_table_as_dict)
def get_success_ratio(contingency_table_as_dict):
"""Computes success ratio.
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: success_ratio: Success ratio.
"""
denominator = (
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY]
)
if denominator == 0:
return numpy.nan
numerator = float(contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY])
return numerator / denominator
def get_far(contingency_table_as_dict):
"""Computes FAR (false-alarm rate).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: false_alarm_rate: FAR.
"""
return 1. - get_success_ratio(contingency_table_as_dict)
def get_dfr(contingency_table_as_dict):
"""Computes DFR (detection-failure ratio).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: detection_failure_ratio: DFR.
"""
denominator = (
contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY] +
contingency_table_as_dict[NUM_TRUE_NEGATIVES_KEY]
)
if denominator == 0:
return numpy.nan
numerator = float(contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY])
return numerator / denominator
def get_focn(contingency_table_as_dict):
"""Computes FOCN (frequency of correct nulls).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: frequency_of_correct_nulls: FOCN.
"""
return 1. - get_dfr(contingency_table_as_dict)
def get_accuracy(contingency_table_as_dict):
"""Computes accuracy.
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: accuracy: Accuracy.
"""
denominator = (
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY] +
contingency_table_as_dict[NUM_TRUE_NEGATIVES_KEY]
)
if denominator == 0:
return numpy.nan
numerator = float(
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_TRUE_NEGATIVES_KEY]
)
return numerator / denominator
def get_csi(contingency_table_as_dict):
"""Computes CSI (critical success index).
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: critical_success_index: CSI.
"""
denominator = (
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY]
)
if denominator == 0:
return numpy.nan
numerator = float(contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY])
return numerator / denominator
def get_frequency_bias(contingency_table_as_dict):
"""Computes frequency bias.
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: frequency_bias: Frequency bias.
"""
denominator = (
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY]
)
if denominator == 0:
return numpy.nan
numerator = float(
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY]
)
return numerator / denominator
def get_peirce_score(contingency_table_as_dict):
"""Computes Peirce score.
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: peirce_score: Peirce score.
"""
return (
get_pod(contingency_table_as_dict) -
get_pofd(contingency_table_as_dict)
)
def get_heidke_score(contingency_table_as_dict):
"""Computes Heidke score.
:param contingency_table_as_dict: Dictionary created by
get_contingency_table.
:return: heidke_score: Heidke score.
"""
num_positives = (
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY]
)
num_events = (
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY]
)
num_negatives = (
contingency_table_as_dict[NUM_TRUE_NEGATIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_NEGATIVES_KEY]
)
num_non_events = (
contingency_table_as_dict[NUM_TRUE_NEGATIVES_KEY] +
contingency_table_as_dict[NUM_FALSE_POSITIVES_KEY]
)
num_examples = num_positives + num_negatives
try:
expected_num_correct = float(
num_positives * num_events + num_negatives * num_non_events
) / num_examples
except ZeroDivisionError:
return numpy.nan
numerator = float(
contingency_table_as_dict[NUM_TRUE_POSITIVES_KEY] +
contingency_table_as_dict[NUM_TRUE_NEGATIVES_KEY] -
expected_num_correct
)
denominator = num_examples - expected_num_correct
try:
return numerator / denominator
except ZeroDivisionError:
return numpy.nan
def get_brier_score(forecast_probabilities=None, observed_labels=None):
"""Computes Brier score.
N = number of forecasts
:param forecast_probabilities: See documentation for
`_check_forecast_probs_and_observed_labels`.
:param observed_labels: See doc for
`_check_forecast_probs_and_observed_labels`.
:return: brier_score: Brier score.
"""
_check_forecast_probs_and_observed_labels(
forecast_probabilities, observed_labels)
return numpy.mean((forecast_probabilities - observed_labels) ** 2)
def get_cross_entropy(forecast_probabilities=None, observed_labels=None):
"""Computes cross-entropy.
:param forecast_probabilities: See documentation for
`_check_forecast_probs_and_observed_labels`.
:param observed_labels: See doc for
`_check_forecast_probs_and_observed_labels`.
:return: cross_entropy: Cross-entropy.
"""
_check_forecast_probs_and_observed_labels(
forecast_probabilities, observed_labels)
forecast_probabilities = numpy.maximum(
forecast_probabilities, MIN_PROB_FOR_XENTROPY)
forecast_probabilities = numpy.minimum(
forecast_probabilities, MAX_PROB_FOR_XENTROPY)
observed_labels = observed_labels.astype(numpy.float)
return -numpy.mean(
observed_labels * numpy.log2(forecast_probabilities) +
(1 - observed_labels) * numpy.log2(1 - forecast_probabilities)
)
def get_points_in_roc_curve(
forecast_probabilities=None, observed_labels=None, threshold_arg=None,
forecast_precision=DEFAULT_FORECAST_PRECISION):
"""Determines points in ROC (receiver operating characteristic) curve.
N = number of forecasts
T = number of binarization thresholds
:param forecast_probabilities: See documentation for
`_check_forecast_probs_and_observed_labels`.
:param observed_labels: See doc for
`_check_forecast_probs_and_observed_labels`.
:param threshold_arg: See documentation for get_binarization_thresholds.
:param forecast_precision: See doc for get_binarization_thresholds.
:return: pofd_by_threshold: length-T numpy array of POFD values, to be
plotted on the x-axis.
:return: pod_by_threshold: length-T numpy array of POD values, to be plotted
on the y-axis.
"""
_check_forecast_probs_and_observed_labels(
forecast_probabilities, observed_labels)
binarization_thresholds = get_binarization_thresholds(
threshold_arg=threshold_arg,
forecast_probabilities=forecast_probabilities,
forecast_precision=forecast_precision)
num_thresholds = len(binarization_thresholds)
pofd_by_threshold = numpy.full(num_thresholds, numpy.nan)
pod_by_threshold =
|
numpy.full(num_thresholds, numpy.nan)
|
numpy.full
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.