prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 16:36:44 2021
@author: <NAME>
Distribution and use is not permitted with the knowledge of the author
"""
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import os
from scipy.interpolate import make_interp_spline
"""
Control parameters and NN intitilaisaions
"""
Re_tau1 = [180, 550, 1000, 2000]
loss_tol = 1e-20
dummy_idx = 60
layers = [1]+[30]*3 +[2] #DNN layers
num_iter = 60000 #max DNN iteations
print_skip = 1000 #printing NN outpluts after every "nth" iteration
AdaAF = True
AdaN = 5.
Ini_a = 1./AdaN
#-----------------------------------------------------------------------------------------------------------------
"""
Get DNS data and Spline fitting
"""
for Re_tau in Re_tau1:
dummy_idx += 2
if Re_tau == 180:
U_tau = 0.57231059E-01
nu = 1/0.32500000E+04
#rho = 0.834 #rho_150C
data = np.loadtxt('DNS_data_channel/ReTau='+np.str(Re_tau)+'.txt')
#half channel data
y_plus, U_plus, uv_plus, uu_plus, vv_plus = data[:,1], data[:,2], data[:,10], data[:,3], data[:,4]
elif Re_tau == 550:
U_tau = 0.48904658E-01
nu = 1/0.11180000E+05
data = np.loadtxt('DNS_data_channel/ReTau='+np.str(Re_tau)+'.txt')
#half channel data
y_plus, U_plus, uv_plus, uu_plus, vv_plus = data[:,1], data[:,2], data[:,10], data[:,3], data[:,4]
elif Re_tau == 950:
Re_tau = 950
U_tau = 0.45390026E-01
nu = 1/0.20580000E+05
data = np.loadtxt('DNS_data_channel/ReTau='+np.str(Re_tau)+'.txt')
#half channel data
y_plus, U_plus, uv_plus, uu_plus, vv_plus = data[:,1], data[:,2], data[:,10], data[:,3], data[:,4]
elif Re_tau == 1000:
U_tau = 0.0499
nu = 5E-5
dPdx = 0.0025
#import data
data = np.loadtxt('DNS_data_channel/ReTau='+np.str(Re_tau)+'.txt')
#half channel data
y_plus, U_plus, uv_plus, uu_plus, vv_plus = data[:,0], data[:,1], data[:,2], data[:,3], data[:,4]
elif Re_tau == 2000:
U_tau = 0.41302030E-01
nu = 1/0.48500000E+05
#rho = (1.026 + 0.994) / 2 #rho_160F + rho_180F / 2
data = np.loadtxt('DNS_data_channel/ReTau='+np.str(Re_tau)+'.txt')
#half channel data
y_plus, U_plus, uv_plus, uu_plus, vv_plus = data[:,1], data[:,2], data[:,10], data[:,3], data[:,4]
elif Re_tau == 5200:
U_tau = 4.14872e-02
nu = 8.00000e-06
#import data
data = np.loadtxt('DNS_data_channel/ReTau='+np.str(Re_tau)+'.txt')
#half channel data
y_plus, U_plus = data[:,1], data[:,2]
else:
raise "Valid Re_tau = 180, 550, 950, 1000, 2000, 5200"
new_Re_tau = y_plus[-1]
dPdx_plus = -1/ new_Re_tau
#Curve fitting
shape = U_plus.shape
shape = shape[0]
U = np.zeros((shape), dtype = "float64")
uv = np.zeros((shape), dtype = "float64")
spl = make_interp_spline(y_plus, U_plus)
spl2 = make_interp_spline(y_plus, uv_plus)
dUdy_plus = (U_plus[1:] - U_plus[:-1]) / (y_plus[1:] - y_plus[:-1])
yp_train = np.linspace(0, new_Re_tau, num=new_Re_tau*3, endpoint = True)
num_training_pts = yp_train.shape
num_training_pts = num_training_pts[0]
#-----------------------------------------------------------------------------------------------------------------
"""
Neural Network
1. Full connected architechure
"""
def xavier_init(size): # weight intitailing
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2.0/(in_dim + out_dim))
#variable creatuion inn tensor flow - intilatisation
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev,dtype=tf.float64,seed=1704), dtype=tf.float64)
"""A fully-connected NN"""
def DNN(X, layers,weights,biases):
L = len(layers)
H = X
for l in range(0,L-2): # (X*w(X*w + b) + b)...b) Full conected neural network
W = weights[l]
b = biases[l]
#H = tf.nn.tanh(tf.add(tf.matmul(H, W), b))
H = tf.tanh(AdaN*a*tf.add(tf.matmul(H, W), b) )# H - activation function?
#H = tf.tanh(a*tf.add(tf.matmul(H, W), b))
#H = tf.nn.tan(tf.add(tf.matmul(H, W), b))
#the loops are not in the same hirecachy as the loss functions
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b) # Y - output - final yayer
return Y
if AdaAF:
a = tf.Variable(Ini_a, dtype=tf.float64)
else:
a = tf.constant(Ini_a, dtype=tf.float64)
L = len(layers)
weights = [xavier_init([layers[l], layers[l+1]]) for l in range(0, L-1)]
biases = [tf.Variable( tf.zeros((1, layers[l+1]),dtype=tf.float64)) for l in range(0, L-1)]
#-----------------------------------------------------------------------------------------------------------------
dnn_out = DNN((yp_train.reshape(-1,1)), layers, weights, biases) #fractional order - aplha
U_train = dnn_out[:,0]
uv_train = -tf.abs(dnn_out[:,1])
rhs = dPdx_plus*yp_train + 1
#yp_train = tf.stack(yp_train)
#U_x_train = tf.gradients(U_train, yp_train)[0]
U_x_train = (U_train[1:] - U_train[:-1])/ (yp_train[1:] - yp_train[:-1])
eq1 = U_x_train - uv_train[1:]
U_loss = tf.square(U_train[0] - U_plus[0]) + tf.square(U_train[-1] - U_plus[-1])
uv_loss = tf.square(uv_train[0] - uv_plus[0]) + tf.square(uv_train[-1] - uv_plus[-1])
loss = 100*tf.reduce_mean(tf.abs(eq1 - rhs[1:])) + U_loss + uv_loss
optimizer = tf.train.AdamOptimizer(learning_rate=1.0E-4).minimize(loss)
loss_max = 1.0e16
lss = []
os.mkdir('raw_results/Re_tau ='+np.str(Re_tau)+'_coeff-aux-pts_beta='+np.str(dummy_idx))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_iter+1):
sess.run(optimizer)
loss_val = sess.run(loss)
lss.append([i, loss_val])
#if i % print_skip == 0:
if loss_val > loss_tol:
if i % print_skip == 0:
U_val = np.array(sess.run(U_train))
uv_val = np.array(sess.run(uv_train))
loss_val = sess.run(loss)
print("loss = "+np.str(loss_val)+"; iter ="+np.str(i))
fig= plt.figure()
plt.semilogx (yp_train.reshape((-1,1)), U_val.reshape(-1)/np.max(U_plus), 'r', label = "U_val")
plt.semilogx (y_plus.reshape((-1,1)), U_plus/np.max(U_plus) , 'k--', label = "U_dns")
plt.semilogx (yp_train.reshape((-1,1)), uv_val.reshape(-1), 'g', label = "uv_val")
plt.semilogx (y_plus.reshape((-1,1)), uv_plus , 'b--', label = "uv_dns")
plt.legend()
plt.xlabel("y+")
plt.ylabel("U(y+)/uv(y+)")
#plt.title('Couette Flow Re_tau ='+np.str(Re_tau)+'_coeff-aux-pts_beta='+np.str(dummy_idx))
plt.savefig('raw_results/Re_tau ='+np.str(Re_tau)+'_coeff-aux-pts_beta='+
|
np.str(dummy_idx)
|
numpy.str
|
from dataclasses import dataclass
import xarray as xr
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
from typing import List
from src.configure import params_from_file
from src.analysis import load_data, relative_improvement
from src.evaluation_geographic import GeographicValidation
from src.evaluation_plots import plot_histogram
from src.inference import Mask
@dataclass
class ScoreData():
percentile: xr.DataArray = None
ifs: xr.DataArray = None
dnn: xr.DataArray = None
class PlotSpatialHSSScores():
def __init__(self, data: ScoreData, percentile: float, configs: dict, out_path: str, plot_percentiles=True):
self.data = data
self.configs = configs
self.percentile = percentile
self.mask_threshold = -1
self.fname = f'{out_path}geographic_{percentile}th_percentiles_and_hss.png'
self.in_path = params_from_file('paths')
self.plot_percentiles = plot_percentiles
self.figsize = (19,10)
def get_coordinates(self):
ds = xr.open_dataset(f'{self.in_path.dataset_path}/{self.in_path.dataset_training}')
self.lats = ds.latitude
self.lons = ds.longitude
def plot_geographic_percentiles(self):
eval = GeographicValidation(self.lats, self.lons,
orography_flag=False,
mask_threshold=None,
clean_threshold=None,
show_coordinates=False)
data = self.data.percentile
eval.plot_single('Percentile', data, data,
configs=self.configs, single_plot=False)
def plot_ifs_geographic_hss_scores(self):
eval = GeographicValidation(self.lats, self.lons,
orography_flag=False,
mask_threshold=self.mask_threshold,
clean_threshold=None,
show_coordinates=False
)
metric_name = 'HSS'
data = self.data.ifs
self.configs['HSS']['title'] = None
self.configs['HSS']['cbar_title'] = 'IFS HSS'
eval.plot_single(metric_name, data, data,
configs=self.configs, single_plot=False)
def plot_hss_geographic_hss_scores(self):
eval = GeographicValidation(self.lats, self.lons,
orography_flag=False,
mask_threshold=self.mask_threshold,
clean_threshold=None,
show_coordinates=False
)
metric_name = 'HSS'
self.configs['HSS']['title'] = None
self.configs['HSS']['cbar_title'] = 'DNN (WMSE-MS-SSIM) HSS'
data = self.data.dnn
eval.plot_single(metric_name, data, data,
configs=self.configs, single_plot=False)
def plot(self):
self.get_coordinates()
plt.rcParams.update({'font.size': 11})
if self.plot_percentiles:
plt.figure(figsize=self.figsize, dpi=300)
ax1 = plt.subplot(311)
ax1.annotate("a", ha="center", va="center", size=13,
xy=(0.985, 0.945), xycoords=ax1,
bbox=dict(boxstyle="square,pad=0.3", fc="white", ec="k", lw=1))
self.plot_geographic_percentiles()
else:
plt.figure(figsize=self.figsize, dpi=300)
if self.plot_percentiles:
ax2 = plt.subplot(312)
annotate = "b"
else:
ax2 = plt.subplot(211)
annotate = "a"
ax2.annotate(annotate, ha="center", va="center", size=13,
xy=(0.985, 0.945), xycoords=ax2,
bbox=dict(boxstyle="square,pad=0.3", fc="white", ec="k", lw=1))
self.plot_ifs_geographic_hss_scores()
if self.plot_percentiles:
ax3 = plt.subplot(313)
annotate = "c"
else:
ax3 = plt.subplot(212)
annotate = "b"
ax3.annotate(annotate, ha="center", va="center", size=13,
xy=(0.985, 0.945), xycoords=ax3,
bbox=dict(boxstyle="square,pad=0.3", fc="white", ec="k", lw=1))
self.plot_hss_geographic_hss_scores()
plt.tight_layout()
if self.fname is not None:
print(self.fname)
plt.savefig(self.fname, dpi=300, format='png', bbox_inches='tight')
plt.show()
class PlotSingleFrames():
def __init__(self, dnn_data, ifs_data, trmm_data,
timestamps=['2012-07-16T00', '2013-07-16T00', '2014-07-16T00']):
self.dnn_data = dnn_data
self.ifs_data = ifs_data
self.trmm_data = trmm_data
self.min_precipitation_threshold_in_mm_per_3hours = 0.1
path = '/path/to/training_dataset'
self.ds = xr.open_dataset(path, chunks={'time': 1})
self.lats = self.ds.latitude
self.lons = self.ds.longitude
self.color_map = 'YlGnBu'
self.vmin = 0
self.vmax = 11.5
self.timestamps = timestamps
self.fname = f'/path/to/figures/single_frames.png'
def get_time_indeces(self):
first_test_set_index = np.where(self.ds.time == np.datetime64(f'2012-06-01T00:00:00.000000000'))[0][0]
index_2012 = np.where(self.ds.time == np.datetime64(f'{self.timestamps[0]}:00:00.000000000'))[0][0] - first_test_set_index
index_2013 = np.where(self.ds.time == np.datetime64(f'{self.timestamps[1]}:00:00.000000000'))[0][0] - first_test_set_index
index_2014 = np.where(self.ds.time == np.datetime64(f'{self.timestamps[2]}:00:00.000000000'))[0][0] - first_test_set_index
self.time_idx = [index_2012, index_2013, index_2014]
def get_data(self):
self.get_time_indeces()
self.dnn_frames = []
self.ifs_frames = []
self.trmm_frames = []
for t in self.time_idx:
self.dnn_frames.append(np.where(self.dnn_data[t] <
self.min_precipitation_threshold_in_mm_per_3hours,
0, self.dnn_data[t]))
self.ifs_frames.append(np.where(self.ifs_data[t] <
self.min_precipitation_threshold_in_mm_per_3hours,
0, self.ifs_data[t]))
self.trmm_frames.append(np.where(self.trmm_data[t] <
self.min_precipitation_threshold_in_mm_per_3hours,
0, self.trmm_data[t]))
def single_plot(self, data,
show_cbar=True,
show_latitude_labels=True,
show_longitude_labels=True,
):
m = Basemap(llcrnrlon=self.lons[0], llcrnrlat=self.lats[0],
urcrnrlon=self.lons[-1], urcrnrlat=self.lats[-1],
projection='merc', lon_0=0, lat_0=20, resolution='c')
m.drawparallels([-30, 0, 30],
labels=[show_latitude_labels, 0, 0, 0], linewidth=.5)
m.drawmeridians([-120, -60, 0, 60, 120],
labels=[0, 0, 0, show_longitude_labels], linewidth=.5)
m.drawcoastlines()
Lon, Lat = np.meshgrid(self.lons, self.lats)
x, y = m(Lon, Lat)
m.pcolormesh(x, y, data, vmin=self.vmin, vmax=self.vmax, cmap=self.color_map)
if show_cbar:
plt.colorbar(fraction=0.016, pad=0.04, extend='both', label=r'Precipitation [mm/3hr]')
def plot(self):
self.get_data()
plt.figure(figsize=(15, 6))
plt.rcParams.update({'font.size': 11})
for i in range(len(self.time_idx)):
plt.subplot(3,3,1+3*i)
plt.title(f'IFS, time: {self.timestamps[i]}')
self.single_plot(self.ifs_frames[i], show_cbar=False,
show_latitude_labels=True,
show_longitude_labels=i==2)
plt.subplot(3,3,2+3*i)
plt.title(f'DNN (WMSE-MS-SSIM), time: {self.timestamps[i]}')
self.single_plot(self.dnn_frames[i], show_cbar=False,
show_latitude_labels=False,
show_longitude_labels=i==2)
plt.subplot(3,3,3+3*i)
plt.title(f'TRMM, time: {self.timestamps[i]}')
self.single_plot(self.trmm_frames[i], show_cbar=True,
show_latitude_labels=False,
show_longitude_labels=i==2)
plt.tight_layout()
if self.fname is not None:
print(self.fname)
plt.savefig(self.fname, dpi=300, format='png')
plt.show()
class ScoresPerPercentile():
def __init__(self):
self.heidke_skill_score = {}
self.f1 = {}
self.critical_success_index = {}
self.false_alarm_ratio = {}
self.probability_of_detection = {}
def set_data(self, data, percentile):
self.heidke_skill_score[percentile] = data['heidke_skill_score'][0]
self.f1[percentile] = data['f1'][0]
self.critical_success_index[percentile] = data['critical_success_index'][0]
self.false_alarm_ratio[percentile] = data['false_alarm_ratio'][0]
self.probability_of_detection[percentile] = data['probability_of_detection'][0]
@dataclass
class ModelTestSetData():
dnn = None
dnn_mssim = None
dnn_weigthed = None
linear = None
qm = None
raw_ifs = None
class PlotHistograms():
def __init__(self, path='/path/to/models/'):
self.path = path
self.model_file_names = {
'dnn_weighted': 'dnn_weighted.npy',
'dnn_mssim': 'dnn_mssim.npy',
'dnn': 'dnn.npy',
'qm': 'qm.npy',
'linear': 'linear.npy'}
def load_data(self):
fname = f"{self.path}/{self.model_file_names['dnn_weighted']}"
self.dnn_weighted = np.load(fname)[0]
fname = f"{self.path}/{self.model_file_names['dnn']}"
self.dnn_mse = np.load(fname)[0]
fname = f"{self.path}/{self.model_file_names['dnn']}"
self.trmm = np.load(fname)[1]
fname = f"{self.path}/{self.model_file_names['dnn']}"
self.ifs = np.load(fname)[2]
fname = f"{self.path}/{self.model_file_names['dnn_mssim']}"
self.dnn_mssim = np.load(fname)[0]
fname = f"{self.path}/{self.model_file_names['qm']}"
self.qm = np.load(fname)[0]
fname = f"{self.path}/{self.model_file_names['linear']}"
self.linear =
|
np.load(fname)
|
numpy.load
|
import traceback
from collections import OrderedDict
from enum import Enum
from functools import reduce
from math import pi
from typing import Any, Callable, Dict, Iterator, List, MutableMapping, NamedTuple, Optional, Set, Tuple, Union
import numpy as np
import SimpleITK
from scipy.spatial.distance import cdist
from sympy import symbols
from .. import autofit as af
from ..algorithm_describe_base import AlgorithmProperty, Register
from ..channel_class import Channel
from ..class_generator import enum_register
from ..mask_partition_utils import BorderRim, MaskDistanceSplit
from ..universal_const import UNIT_SCALE, Units
from ..utils import class_to_dict
from .measurement_base import AreaType, Leaf, MeasurementEntry, MeasurementMethodBase, Node, PerComponent
# TODO change image to channel in signature of measurement calculate_property
class ProhibitedDivision(Exception):
pass
class SettingsValue(NamedTuple):
function: Callable
help_message: str
arguments: Optional[dict]
is_component: bool
default_area: Optional[AreaType] = None
class ComponentsInfo(NamedTuple):
segmentation_components: np.ndarray
mask_components: np.ndarray
components_translation: Dict[int, List[int]]
def empty_fun(_a0=None, _a1=None):
"""This function is be used as dummy reporting function."""
pass
MeasurementValueType = Union[float, List[float], str]
MeasurementResultType = Tuple[MeasurementValueType, str]
MeasurementResultInputType = Tuple[MeasurementValueType, str, Tuple[PerComponent, AreaType]]
FILE_NAME_STR = "File name"
class MeasurementResult(MutableMapping[str, MeasurementResultType]):
"""
Class for storage measurements info.
"""
def __init__(self, components_info: ComponentsInfo):
self.components_info = components_info
self._data_dict = OrderedDict()
self._units_dict: Dict[str, str] = dict()
self._type_dict: Dict[str, Tuple[PerComponent, AreaType]] = dict()
self._units_dict["Mask component"] = ""
self._units_dict["Segmentation component"] = ""
def __str__(self):
text = ""
for key, val in self._data_dict.items():
text += f"{key}: {val}; type {self._type_dict[key]}, units {self._units_dict[key]}\n"
return text
def __setitem__(self, k: str, v: MeasurementResultInputType) -> None:
self._data_dict[k] = v[0]
self._units_dict[k] = v[1]
self._type_dict[k] = v[2]
if k == FILE_NAME_STR:
self._data_dict.move_to_end(FILE_NAME_STR, False)
def __delitem__(self, v: str) -> None:
del self._data_dict[v]
del self._units_dict[v]
del self._type_dict[v]
def __getitem__(self, k: str) -> MeasurementResultType:
return self._data_dict[k], self._units_dict[k]
def __len__(self) -> int:
return len(self._data_dict)
def __iter__(self) -> Iterator[str]:
return iter(self._data_dict)
def set_filename(self, path_fo_file: str):
"""
Set name of file to be presented as first position.
"""
self._data_dict[FILE_NAME_STR] = path_fo_file
self._type_dict[FILE_NAME_STR] = PerComponent.No, AreaType.ROI
self._units_dict[FILE_NAME_STR] = ""
self._data_dict.move_to_end(FILE_NAME_STR, False)
def get_component_info(self) -> Tuple[bool, bool]:
"""
Get information which type of components are in storage.
:return: has_mask_components, has_segmentation_components
"""
has_mask_components = any([x == PerComponent.Yes and y != AreaType.ROI for x, y in self._type_dict.values()])
has_segmentation_components = any(
[x == PerComponent.Yes and y == AreaType.ROI for x, y in self._type_dict.values()]
)
return has_mask_components, has_segmentation_components
def get_labels(self) -> List[str]:
"""Get labels for measurement. Base are keys of this storage.
If has mask components, or has segmentation_components then add this labels"""
has_mask_components, has_segmentation_components = self.get_component_info()
labels = list(self._data_dict.keys())
index = 1 if FILE_NAME_STR in self._data_dict else 0
if has_mask_components:
labels.insert(index, "Mask component")
if has_segmentation_components:
labels.insert(index, "Segmentation component")
return labels
def get_units(self) -> List[str]:
return [self._units_dict[x] for x in self.get_labels()]
def get_global_names(self):
"""Get names for only parameters which are not 'PerComponent.Yes'"""
labels = list(self._data_dict.keys())
return [x for x in labels if self._type_dict[x] != PerComponent.Yes]
def get_global_parameters(self):
"""Get only parameters which are not 'PerComponent.Yes'"""
if FILE_NAME_STR in self._data_dict:
name = self._data_dict[FILE_NAME_STR]
res = [name]
iterator = iter(self._data_dict.keys())
next(iterator)
else:
res = []
iterator = iter(self._data_dict.keys())
for el in iterator:
per_comp = self._type_dict[el][0]
val = self._data_dict[el]
if per_comp != PerComponent.Yes:
res.append(val)
return res
def get_separated(self) -> List[List[MeasurementValueType]]:
"""Get measurements separated for each component"""
has_mask_components, has_segmentation_components = self.get_component_info()
if not (has_mask_components or has_segmentation_components):
return [list(self._data_dict.values())]
if has_mask_components and has_segmentation_components:
translation = self.components_info.components_translation
component_info = [(x, y) for x in translation.keys() for y in translation[x]]
elif has_mask_components:
component_info = [(0, x) for x in self.components_info.mask_components]
else:
component_info = [(x, 0) for x in self.components_info.segmentation_components]
counts = len(component_info)
mask_to_pos = {val: i for i, val in enumerate(self.components_info.mask_components)}
segmentation_to_pos = {val: i for i, val in enumerate(self.components_info.segmentation_components)}
if FILE_NAME_STR in self._data_dict:
name = self._data_dict[FILE_NAME_STR]
res = [[name] for _ in range(counts)]
iterator = iter(self._data_dict.keys())
next(iterator)
else:
res = [[] for _ in range(counts)]
iterator = iter(self._data_dict.keys())
if has_segmentation_components:
for i, num in enumerate(component_info):
res[i].append(num[0])
if has_mask_components:
for i, num in enumerate(component_info):
res[i].append(num[1])
for el in iterator:
per_comp, area_type = self._type_dict[el]
val = self._data_dict[el]
if per_comp != PerComponent.Yes:
for i in range(counts):
res[i].append(val)
else:
if area_type == AreaType.ROI:
for i, (seg, _mask) in enumerate(component_info):
res[i].append(val[segmentation_to_pos[seg]])
else:
for i, (_seg, mask) in enumerate(component_info):
res[i].append(val[mask_to_pos[mask]])
return res
class MeasurementProfile:
PARAMETERS = ["name", "chosen_fields", "reversed_brightness", "use_gauss_image", "name_prefix"]
def __init__(self, name, chosen_fields: List[MeasurementEntry], name_prefix=""):
self.name = name
self.chosen_fields: List[MeasurementEntry] = chosen_fields
self._need_mask = False
for cf_val in chosen_fields:
self._need_mask = self._need_mask or self.need_mask(cf_val.calculation_tree)
self.name_prefix = name_prefix
def to_dict(self):
return {"name": self.name, "chosen_fields": self.chosen_fields, "name_prefix": self.name_prefix}
def need_mask(self, tree):
if isinstance(tree, Leaf):
return tree.area == AreaType.Mask or tree.area == AreaType.Mask_without_ROI
else:
return self.need_mask(tree.left) or self.need_mask(tree.right)
def _need_mask_without_segmentation(self, tree):
if isinstance(tree, Leaf):
return tree.area == AreaType.Mask_without_ROI
else:
return self._need_mask_without_segmentation(tree.left) or self._need_mask_without_segmentation(tree.right)
def _get_par_component_and_area_type(self, tree: Union[Node, Leaf]) -> Tuple[PerComponent, AreaType]:
if isinstance(tree, Leaf):
method = MEASUREMENT_DICT[tree.name]
area_type = method.area_type(tree.area)
if tree.per_component == PerComponent.Mean:
return PerComponent.No, area_type
return tree.per_component, area_type
else:
left_par, left_area = self._get_par_component_and_area_type(tree.left)
right_par, right_area = self._get_par_component_and_area_type(tree.left)
if PerComponent.Yes == left_par or PerComponent.Yes == right_par:
res_par = PerComponent.Yes
else:
res_par = PerComponent.No
area_set = {left_area, right_area}
if len(area_set) == 1:
res_area = area_set.pop()
elif AreaType.ROI in area_set:
res_area = AreaType.ROI
else:
res_area = AreaType.Mask_without_ROI
return res_par, res_area
def get_channels_num(self) -> Set[Channel]:
resp = set()
for el in self.chosen_fields:
resp.update(el.get_channel_num(MEASUREMENT_DICT))
return resp
def __str__(self):
text = "Set name: {}\n".format(self.name)
if self.name_prefix != "":
text += "Name prefix: {}\n".format(self.name_prefix)
text += "Measurements list:\n"
for el in self.chosen_fields:
text += "{}\n".format(el.name)
return text
def get_component_info(self, unit: Units):
"""
:return: list[((str, str), bool)]
"""
res = []
# Fixme remove binding to 3 dimensions
for el in self.chosen_fields:
res.append(
(
(self.name_prefix + el.name, el.get_unit(unit, 3)),
self._is_component_measurement(el.calculation_tree),
)
)
return res
def get_parameters(self):
return class_to_dict(self, *self.PARAMETERS)
def is_any_mask_measurement(self):
for el in self.chosen_fields:
if self.need_mask(el.calculation_tree):
return True
return False
def _is_component_measurement(self, node):
if isinstance(node, Leaf):
return node.per_component == PerComponent.Yes
else:
return self._is_component_measurement(node.left) or self._is_component_measurement(node.right)
def calculate_tree(
self, node: Union[Node, Leaf], segmentation_mask_map: ComponentsInfo, help_dict: dict, kwargs: dict
) -> Tuple[Union[float, np.ndarray], symbols, AreaType]:
"""
Main function for calculation tree of measurements. It is executed recursively
:param node: measurement to calculate
:param segmentation_mask_map: map from mask segmentation components to mask components. Needed for division
:param help_dict: dict to cache calculation result. It reduce recalculations of same measurements.
:param kwargs: additional info needed by measurements
:return: measurement value
"""
if isinstance(node, Leaf):
method: MeasurementMethodBase = MEASUREMENT_DICT[node.name]
kw = dict(kwargs)
kw.update(node.dict)
hash_str = hash_fun_call_name(method, node.dict, node.area, node.per_component, node.channel)
area_type = method.area_type(node.area)
if hash_str in help_dict:
val = help_dict[hash_str]
else:
if node.channel is not None:
kw["channel"] = kw[f"chanel_{node.channel}"]
kw["channel_num"] = node.channel
else:
kw["channel_num"] = -1
kw["help_dict"] = help_dict
kw["_area"] = node.area
kw["_per_component"] = node.per_component
kw["_cache"] = True
if area_type == AreaType.Mask:
kw["area_array"] = kw["mask"]
elif area_type == AreaType.Mask_without_ROI:
kw["area_array"] = kw["mask_without_segmentation"]
elif area_type == AreaType.ROI:
kw["area_array"] = kw["segmentation"]
else:
raise ValueError(f"Unknown area type {node.area}")
if node.per_component != PerComponent.No:
kw["_cache"] = False
val = []
area_array = kw["area_array"]
if area_type == AreaType.ROI:
components = segmentation_mask_map.segmentation_components
else:
components = segmentation_mask_map.mask_components
for i in components:
kw["area_array"] = area_array == i
val.append(method.calculate_property(**kw))
if node.per_component == PerComponent.Mean:
val = np.mean(val) if len(val) else 0
else:
val = np.array(val)
else:
val = method.calculate_property(**kw)
help_dict[hash_str] = val
unit: symbols = method.get_units(3) if kw["channel"].shape[0] > 1 else method.get_units(2)
if node.power != 1:
return pow(val, node.power), pow(unit, node.power), area_type
return val, unit, area_type
elif isinstance(node, Node):
left_res, left_unit, left_area = self.calculate_tree(node.left, segmentation_mask_map, help_dict, kwargs)
right_res, right_unit, right_area = self.calculate_tree(
node.right, segmentation_mask_map, help_dict, kwargs
)
if node.op == "/":
if isinstance(left_res, np.ndarray) and isinstance(right_res, np.ndarray) and left_area != right_area:
area_set = {left_area, right_area}
if area_set == {AreaType.ROI, AreaType.Mask_without_ROI}:
raise ProhibitedDivision("This division is prohibited")
if area_set == {AreaType.ROI, AreaType.Mask}:
res = []
# TODO Test this part of code
for val, num in zip(left_res, segmentation_mask_map.segmentation_components):
div_vals = segmentation_mask_map.components_translation[num]
if len(div_vals) != 1:
raise ProhibitedDivision("Cannot calculate when object do not belongs to one mask area")
if left_area == AreaType.ROI:
res.append(val / right_res[div_vals[0] - 1])
else:
res.append(right_res[div_vals[0] - 1] / val)
return np.array(res), left_unit / right_unit, AreaType.ROI
left_area = AreaType.Mask_without_ROI
return left_res / right_res, left_unit / right_unit, left_area
raise ValueError("Wrong measurement: {}".format(node))
@staticmethod
def get_segmentation_to_mask_component(segmentation: np.ndarray, mask: Optional[np.ndarray]) -> ComponentsInfo:
"""
Calculate map from segmentation component num to mask component num
:param segmentation: numpy array with segmentation labeled as positive integers
:param mask: numpy array with mask labeled as positive integer
:return: map
"""
components = np.unique(segmentation)
if components[0] == 0 or components[0] is None:
components = components[1:]
mask_components = np.unique(mask)
if mask_components[0] == 0 or mask_components[0] is None:
mask_components = mask_components[1:]
res = OrderedDict()
if mask is None:
res = {i: [] for i in components}
elif np.max(mask) == 1:
res = {i: [1] for i in components}
else:
for num in components:
res[num] = list(np.unique(mask[segmentation == num]))
return ComponentsInfo(components, mask_components, res)
def get_component_and_area_info(self) -> List[Tuple[PerComponent, AreaType]]:
"""For each measurement check if is per component and in which types """
res = []
for el in self.chosen_fields:
tree = el.calculation_tree
res.append(self._get_par_component_and_area_type(tree))
return res
def calculate(
self,
channel: np.ndarray,
segmentation: np.ndarray,
mask: Optional[np.ndarray],
voxel_size,
result_units: Units,
range_changed: Callable[[int, int], Any] = None,
step_changed: Callable[[int], Any] = None,
time: int = 0,
time_pos: int = 0,
**kwargs,
) -> MeasurementResult:
"""
Calculate measurements on given set of parameters
:param channel: main channel on which measurements should be calculated
:param segmentation: array with segmentation labeled as positive
:param full_mask:
:param mask:
:param voxel_size:
:param result_units:
:param range_changed: callback function to set information about steps range
:param step_changed: callback function fo set information about steps done
:param time: which data point should be measured
:param time_pos: axis of time
:param kwargs: additional data required by measurements. Ex additional channels
:return: measurements
"""
def get_time(array: np.ndarray):
if array is not None and array.ndim == 4:
return array.take(time, axis=time_pos)
return array
if range_changed is None:
range_changed = empty_fun
if step_changed is None:
step_changed = empty_fun
if self._need_mask and mask is None:
raise ValueError("measurement need mask")
channel = channel.astype(np.float)
help_dict = dict()
segmentation_mask_map = self.get_segmentation_to_mask_component(segmentation, mask)
result = MeasurementResult(segmentation_mask_map)
result_scalar = UNIT_SCALE[result_units.value]
kw = {
"channel": get_time(channel),
"segmentation": get_time(segmentation),
"mask": get_time(mask),
"voxel_size": voxel_size,
"result_scalar": result_scalar,
}
for el in kwargs.keys():
if not el.startswith("channel_"):
raise ValueError(f"unknown parameter {el} of calculate function")
for num in self.get_channels_num():
if f"channel_{num}" not in kwargs:
raise ValueError(f"channel_{num} need to be passed as argument of calculate function")
kw.update(kwargs)
for el in self.chosen_fields:
if self._need_mask_without_segmentation(el.calculation_tree):
mm = mask.copy()
mm[kw["segmentation"] > 0] = 0
kw["mask_without_segmentation"] = mm
break
range_changed(0, len(self.chosen_fields))
for i, el in enumerate(self.chosen_fields):
step_changed(i)
tree, user_name = el.calculation_tree, el.name
component_and_area = self._get_par_component_and_area_type(tree)
try:
val, unit, _area = self.calculate_tree(tree, segmentation_mask_map, help_dict, kw)
if isinstance(val, np.ndarray):
val = list(val)
result[self.name_prefix + user_name] = val, str(unit).format(str(result_units)), component_and_area
except ZeroDivisionError:
result[self.name_prefix + user_name] = "Div by zero", "", component_and_area
except TypeError:
traceback.print_exc()
result[self.name_prefix + user_name] = "None div", "", component_and_area
except AttributeError:
result[self.name_prefix + user_name] = "No attribute", "", component_and_area
except ProhibitedDivision as e:
result[self.name_prefix + user_name] = e.args[0], "", component_and_area
return result
def calculate_main_axis(area_array: np.ndarray, channel: np.ndarray, voxel_size):
# TODO check if it produces good values
if len(channel.shape) == 4:
if channel.shape[0] != 1:
raise ValueError("This measurements do not support time data")
channel = channel[0]
cut_img = np.copy(channel)
cut_img[area_array == 0] = 0
if np.all(cut_img == 0):
return (0,) * len(voxel_size)
orientation_matrix, _ = af.find_density_orientation(cut_img, voxel_size, 1)
center_of_mass = af.density_mass_center(cut_img, voxel_size)
positions = np.array(np.nonzero(cut_img), dtype=np.float64)
for i, v in enumerate(reversed(voxel_size), start=1):
positions[-i] *= v
positions[-i] -= center_of_mass[i - 1]
centered = np.dot(orientation_matrix.T, positions)
size = np.max(centered, axis=1) - np.min(centered, axis=1)
return size
def get_main_axis_length(
index: int, area_array: np.ndarray, channel: np.ndarray, voxel_size, result_scalar, _cache=False, **kwargs
):
_cache = _cache and "_area" in kwargs and "_per_component" in kwargs
if _cache:
help_dict: Dict = kwargs["help_dict"]
_area: AreaType = kwargs["_area"]
_per_component: PerComponent = kwargs["_per_component"]
hash_name = hash_fun_call_name(calculate_main_axis, {}, _area, _per_component, kwargs["channel_num"])
if hash_name not in help_dict:
help_dict[hash_name] = calculate_main_axis(area_array, channel, [x * result_scalar for x in voxel_size])
return help_dict[hash_name][index]
else:
return calculate_main_axis(area_array, channel, [x * result_scalar for x in voxel_size])[index]
def hash_fun_call_name(
fun: Union[Callable, MeasurementMethodBase],
arguments: Dict,
area: AreaType,
per_component: PerComponent,
channel: Channel,
) -> str:
"""
Calculate string for properly cache measurements result.
:param fun: method for which hash string should be calculated
:param arguments: its additional arguments
:param area: type of rea
:param per_component: If it is per component
:param channel: channel number on which calculation is performed
:return: unique string for such set of arguments
"""
if hasattr(fun, "__module__"):
fun_name = f"{fun.__module__}.{fun.__name__}"
else:
fun_name = fun.__name__
return "{}: {} # {} & {} * {}".format(fun_name, arguments, area, per_component, channel)
class Volume(MeasurementMethodBase):
text_info = "Volume", "Calculate volume of current segmentation"
@classmethod
def calculate_property(cls, area_array, voxel_size, result_scalar, **_): # pylint: disable=W0221
return np.count_nonzero(area_array) * pixel_volume(voxel_size, result_scalar)
@classmethod
def get_units(cls, ndim):
return symbols("{}") ** ndim
class Voxels(MeasurementMethodBase):
text_info = "Voxels", "Calculate number of voxels of current segmentation"
@classmethod
def calculate_property(cls, area_array, **_): # pylint: disable=W0221
return np.count_nonzero(area_array)
@classmethod
def get_units(cls, ndim):
return symbols("1")
# From <NAME>., & <NAME>. (2002). Computing the diameter of a point set,
# 12(6), 489–509. https://doi.org/10.1142/S0218195902001006
def double_normal(point_index: int, point_positions: np.ndarray, points_array: np.ndarray):
"""
:param point_index: index of starting points
:param point_positions: points array of size (points_num, number of dimensions)
:param points_array: bool matrix with information about which points are in set
:return:
"""
delta = 0
dn = 0, 0
while True:
new_delta = delta
points_array[point_index] = 0
dist_array = np.sum(np.array((point_positions - point_positions[point_index]) ** 2), 1)
dist_array[points_array == 0] = 0
point2_index = np.argmax(dist_array)
if dist_array[point2_index] > new_delta:
delta = dist_array[point2_index]
dn = point_index, point2_index
point_index = point2_index
if new_delta == delta:
return dn, delta
def iterative_double_normal(points_positions: np.ndarray):
"""
:param points_positions: points array of size (points_num, number of dimensions)
:return: square power of diameter, 2-tuple of points index gave information which points ar chosen
"""
delta = 0
dn = 0, 0
point_index = 0
points_array = np.ones(points_positions.shape[0], dtype=np.bool)
while True:
dn_r, delta_r = double_normal(point_index, points_positions, points_array)
if delta_r > delta:
delta = delta_r
dn = dn_r
mid_point = (points_positions[dn[0]] + points_positions[dn[1]]) / 2
dist_array = np.sum(np.array((points_positions - mid_point) ** 2), 1)
dist_array[~points_array] = 0
if np.any(dist_array >= delta / 4):
point_index = np.argmax(dist_array)
else:
break
else:
break
return delta, dn
class Diameter(MeasurementMethodBase):
text_info = "Diameter", "Diameter of area"
@staticmethod
def calculate_property(area_array, voxel_size, result_scalar, **_): # pylint: disable=W0221
pos = np.transpose(np.nonzero(get_border(area_array))).astype(np.float)
if pos.size == 0:
return 0
for i, val in enumerate([x * result_scalar for x in reversed(voxel_size)], start=1):
pos[:, -i] *= val
diam_sq = iterative_double_normal(pos)[0]
return np.sqrt(diam_sq)
@classmethod
def get_units(cls, ndim):
return symbols("{}")
class DiameterOld(MeasurementMethodBase):
text_info = "Diameter old", "Diameter of area (Very slow)"
@staticmethod
def calculate_property(area_array, voxel_size, result_scalar, **_): # pylint: disable=W0221
return calc_diam(get_border(area_array), [x * result_scalar for x in voxel_size])
@classmethod
def get_units(cls, ndim):
return symbols("{}")
class PixelBrightnessSum(MeasurementMethodBase):
text_info = "Pixel brightness sum", "Sum of pixel brightness for current segmentation"
@staticmethod
def calculate_property(area_array: np.ndarray, channel: np.ndarray, **_): # pylint: disable=W0221
"""
:param area_array: mask for area
:param channel: data. same shape like area_type
:return: Pixels brightness sum on given area
"""
if area_array.shape != channel.shape:
if area_array.size == channel.size:
channel = channel.reshape(area_array.shape)
else:
raise ValueError("channel and mask do not fit each other")
if np.any(area_array):
return np.sum(channel[area_array > 0])
return 0
@classmethod
def get_units(cls, ndim):
return symbols("Pixel_brightness")
@classmethod
def need_channel(cls):
return True
class ComponentsNumber(MeasurementMethodBase):
text_info = "Components number", "Calculate number of connected components on segmentation"
@staticmethod
def calculate_property(area_array, **_): # pylint: disable=W0221
return np.unique(area_array).size - 1
@classmethod
def get_starting_leaf(cls):
return Leaf(cls.text_info[0], per_component=PerComponent.No)
@classmethod
def get_units(cls, ndim):
return symbols("count")
class MaximumPixelBrightness(MeasurementMethodBase):
text_info = "Maximum pixel brightness", "Calculate maximum pixel brightness for current area"
@staticmethod
def calculate_property(area_array, channel, **_):
if area_array.shape != channel.shape:
if area_array.size == channel.size:
channel = channel.reshape(area_array.shape)
else:
raise ValueError("channel and mask do not fit each other")
if np.any(area_array):
return np.max(channel[area_array > 0])
else:
return 0
@classmethod
def get_units(cls, ndim):
return symbols("Pixel_brightness")
@classmethod
def need_channel(cls):
return True
class MinimumPixelBrightness(MeasurementMethodBase):
text_info = "Minimum pixel brightness", "Calculate minimum pixel brightness for current area"
@staticmethod
def calculate_property(area_array, channel, **_):
if area_array.shape != channel.shape:
if area_array.size == channel.size:
channel = channel.reshape(area_array.shape)
else:
raise ValueError("channel and mask do not fit each other")
if np.any(area_array):
return np.min(channel[area_array > 0])
else:
return 0
@classmethod
def get_units(cls, ndim):
return symbols("Pixel_brightness")
@classmethod
def need_channel(cls):
return True
class MeanPixelBrightness(MeasurementMethodBase):
text_info = "Mean pixel brightness", "Calculate mean pixel brightness for current area"
@staticmethod
def calculate_property(area_array, channel, **_): # pylint: disable=W0221
if area_array.shape != channel.shape:
if area_array.size == channel.size:
channel = channel.reshape(area_array.shape)
else:
raise ValueError("channel and mask do not fit each other")
if np.any(area_array):
return
|
np.mean(channel[area_array > 0])
|
numpy.mean
|
import os
import tempfile
os.environ["MPLCONFIGDIR"] = tempfile.gettempdir()
import itertools as it
import warnings
from multiprocessing import Pool, cpu_count
import numpy as np
import iri2016 as ion
import pymap3d as pm
from tqdm import tqdm
from datetime import datetime, timedelta
from time import time
class OrderError(Exception):
"""
Exception indicating incorrect order of simulation routines.
"""
pass
def check_latlon(lat, lon):
if not -90 <= lat <= 90:
raise ValueError("Latitude of the instrument must be in range [-90, 90]")
if not -180 <= lon < 180:
raise ValueError("Longitude of the instrument must be in range [-180, 180]")
def srange(theta, alt, RE=6378000.):
"""
Calculates the distance in meters from the telescope to the point (theta, alt).
Parameters
----------
theta : float
Zenith angle in radians
alt : float
Altitude in meters
RE : float, optional
Radius of the Earth in meters
Returns
-------
r : float
Range in meters
"""
r = -RE * np.cos(theta) + np.sqrt((RE * np.cos(theta)) ** 2 + alt ** 2 + 2 * alt * RE)
return r
def col_nicolet(height):
"""
#TODO
"""
a = -0.16184565
b = 28.02068763
return np.exp(a * height + b)
def col_setty(height):
"""
#TODO
"""
a = -0.16018896
b = 26.14939429
return np.exp(a * height + b)
def nu_p(n_e):
"""
Plasma frequency from electron density
Parameters
----------
n_e : float
Electron density
Returns
-------
float
Plasma frequency in Hz
"""
e = 1.60217662e-19
m_e = 9.10938356e-31
epsilon0 = 8.85418782e-12
if n_e < 0:
raise ValueError('Number density cannot be < 0.')
return 1 / (2 * np.pi) * np.sqrt((n_e * e ** 2) / (m_e * epsilon0))
def n_f(n_e, freq):
"""
Refractive index of F-layer from electron density
Parameters
----------
n_e : float
Electron density
freq : float
Signal frequency in Hz
"""
return (1 - (nu_p(n_e) / freq) ** 2) ** 0.5
def refr_angle(n1, n2, phi):
"""
Angle of refracted ray using Snell's law.
Parameters
----------
n1 : float
Refractive index in previous medium
n2 : float
Refractive index in current medium
phi : float
Angle of incident ray in rad
Returns
-------
float
Angle in rad
"""
return np.arcsin(n1 / n2 * np.sin(phi))
def d_atten(nu, theta, h_d, delta_hd, nu_p, nu_c):
"""
#TODO
"""
R_E = 6371000
c = 2.99792458e8
delta_s = delta_hd * (1 + h_d / R_E) * (np.cos(theta) ** 2 + 2 * h_d / R_E) ** (-0.5)
f =
|
np.exp(-(2 * np.pi * nu_p ** 2 * nu_c * delta_s) / (c * (nu_c ** 2 + nu ** 2)))
|
numpy.exp
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 15:01:41 2020
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
from matplotlib import pyplot as plt
from astropy.io import fits
from linefit import linefit
# from linefit import linear
# ----- Basic parameters ----- #
redshift = 0.3033
dir_vbin = 'vorbin/'
dir_lines = 'lines2/'
os.system('rm -rfv '+dir_lines+'*')
os.system('mkdir '+dir_lines+'check/')
# ----- Loading Voronoi binned data ----- #
vb = np.load(dir_vbin+'vorbin_array.npz')
# wav, sci, var, cont
data_vbin = fits.getdata(dir_vbin+'vbin.fits').astype('int')
nvbin =
|
np.unique(data_vbin)
|
numpy.unique
|
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import offsetbox
# from skimage.transform import resize
def save_variable(variable, name_of_variable, path_to_save='./'):
# https://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
if not os.path.exists(path_to_save): # https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
os.makedirs(path_to_save)
file_address = path_to_save + name_of_variable + '.pckl'
f = open(file_address, 'wb')
pickle.dump(variable, f)
f.close()
def load_variable(name_of_variable, path='./'):
# https://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
file_address = path + name_of_variable + '.pckl'
f = open(file_address, 'rb')
variable = pickle.load(f)
f.close()
return variable
def save_np_array_to_txt(variable, name_of_variable, path_to_save='./'):
if type(variable) is list:
variable = np.asarray(variable)
# https://stackoverflow.com/questions/22821460/numpy-save-2d-array-to-text-file/22822701
if not os.path.exists(path_to_save): # https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
os.makedirs(path_to_save)
file_address = path_to_save + name_of_variable + '.txt'
np.set_printoptions(threshold=np.inf, linewidth=np.inf) # turn off summarization, line-wrapping
with open(file_address, 'w') as f:
f.write(
|
np.array2string(variable, separator=', ')
|
numpy.array2string
|
from __future__ import print_function
import numpy as np
import unittest
import discretize
from pymatsolver import Solver
MESHTYPES = ['uniformTensorMesh']
def getxBCyBC_CC(mesh, alpha, beta, gamma):
"""
This is a subfunction generating mixed-boundary condition:
.. math::
\nabla \cdot \vec{j} = -\nabla \cdot \vec{j}_s = q
\rho \vec{j} = -\nabla \phi \phi
\alpha \phi + \beta \frac{\partial \phi}{\partial r} = \gamma \ at \ r
= \partial \Omega
xBC = f_1(\alpha, \beta, \gamma)
yBC = f(\alpha, \beta, \gamma)
Computes xBC and yBC for cell-centered discretizations
"""
if mesh.dim == 1: # 1D
if (len(alpha) != 2 or len(beta) != 2 or len(gamma) != 2):
raise Exception("Lenght of list, alpha should be 2")
fCCxm, fCCxp = mesh.cellBoundaryInd
nBC = fCCxm.sum()+fCCxp.sum()
h_xm, h_xp = mesh.gridCC[fCCxm], mesh.gridCC[fCCxp]
alpha_xm, beta_xm, gamma_xm = alpha[0], beta[0], gamma[0]
alpha_xp, beta_xp, gamma_xp = alpha[1], beta[1], gamma[1]
# h_xm, h_xp = mesh.gridCC[fCCxm], mesh.gridCC[fCCxp]
h_xm, h_xp = mesh.hx[0], mesh.hx[-1]
a_xm = gamma_xm/(0.5*alpha_xm-beta_xm/h_xm)
b_xm = (0.5*alpha_xm+beta_xm/h_xm)/(0.5*alpha_xm-beta_xm/h_xm)
a_xp = gamma_xp/(0.5*alpha_xp-beta_xp/h_xp)
b_xp = (0.5*alpha_xp+beta_xp/h_xp)/(0.5*alpha_xp-beta_xp/h_xp)
xBC_xm = 0.5*a_xm
xBC_xp = 0.5*a_xp/b_xp
yBC_xm = 0.5*(1.-b_xm)
yBC_xp = 0.5*(1.-1./b_xp)
xBC = np.r_[xBC_xm, xBC_xp]
yBC = np.r_[yBC_xm, yBC_xp]
elif mesh.dim == 2: # 2D
if (len(alpha) != 4 or len(beta) != 4 or len(gamma) != 4):
raise Exception("Lenght of list, alpha should be 4")
fxm, fxp, fym, fyp = mesh.faceBoundaryInd
nBC = fxm.sum()+fxp.sum()+fxm.sum()+fxp.sum()
alpha_xm, beta_xm, gamma_xm = alpha[0], beta[0], gamma[0]
alpha_xp, beta_xp, gamma_xp = alpha[1], beta[1], gamma[1]
alpha_ym, beta_ym, gamma_ym = alpha[2], beta[2], gamma[2]
alpha_yp, beta_yp, gamma_yp = alpha[3], beta[3], gamma[3]
# h_xm, h_xp = mesh.gridCC[fCCxm,0], mesh.gridCC[fCCxp,0]
# h_ym, h_yp = mesh.gridCC[fCCym,1], mesh.gridCC[fCCyp,1]
h_xm = mesh.hx[0]*np.ones_like(alpha_xm)
h_xp = mesh.hx[-1]*np.ones_like(alpha_xp)
h_ym = mesh.hy[0]*np.ones_like(alpha_ym)
h_yp = mesh.hy[-1]*np.ones_like(alpha_yp)
a_xm = gamma_xm/(0.5*alpha_xm-beta_xm/h_xm)
b_xm = (0.5*alpha_xm+beta_xm/h_xm)/(0.5*alpha_xm-beta_xm/h_xm)
a_xp = gamma_xp/(0.5*alpha_xp-beta_xp/h_xp)
b_xp = (0.5*alpha_xp+beta_xp/h_xp)/(0.5*alpha_xp-beta_xp/h_xp)
a_ym = gamma_ym/(0.5*alpha_ym-beta_ym/h_ym)
b_ym = (0.5*alpha_ym+beta_ym/h_ym)/(0.5*alpha_ym-beta_ym/h_ym)
a_yp = gamma_yp/(0.5*alpha_yp-beta_yp/h_yp)
b_yp = (0.5*alpha_yp+beta_yp/h_yp)/(0.5*alpha_yp-beta_yp/h_yp)
xBC_xm = 0.5*a_xm
xBC_xp = 0.5*a_xp/b_xp
yBC_xm = 0.5*(1.-b_xm)
yBC_xp = 0.5*(1.-1./b_xp)
xBC_ym = 0.5*a_ym
xBC_yp = 0.5*a_yp/b_yp
yBC_ym = 0.5*(1.-b_ym)
yBC_yp = 0.5*(1.-1./b_yp)
sortindsfx = np.argsort(np.r_[np.arange(mesh.nFx)[fxm],
np.arange(mesh.nFx)[fxp]])
sortindsfy = np.argsort(np.r_[np.arange(mesh.nFy)[fym],
np.arange(mesh.nFy)[fyp]])
xBC_x = np.r_[xBC_xm, xBC_xp][sortindsfx]
xBC_y = np.r_[xBC_ym, xBC_yp][sortindsfy]
yBC_x = np.r_[yBC_xm, yBC_xp][sortindsfx]
yBC_y = np.r_[yBC_ym, yBC_yp][sortindsfy]
xBC = np.r_[xBC_x, xBC_y]
yBC = np.r_[yBC_x, yBC_y]
elif mesh.dim == 3: # 3D
if (len(alpha) != 6 or len(beta) != 6 or len(gamma) != 6):
raise Exception("Lenght of list, alpha should be 6")
# fCCxm,fCCxp,fCCym,fCCyp,fCCzm,fCCzp = mesh.cellBoundaryInd
fxm, fxp, fym, fyp, fzm, fzp = mesh.faceBoundaryInd
nBC = fxm.sum()+fxp.sum()+fxm.sum()+fxp.sum()
alpha_xm, beta_xm, gamma_xm = alpha[0], beta[0], gamma[0]
alpha_xp, beta_xp, gamma_xp = alpha[1], beta[1], gamma[1]
alpha_ym, beta_ym, gamma_ym = alpha[2], beta[2], gamma[2]
alpha_yp, beta_yp, gamma_yp = alpha[3], beta[3], gamma[3]
alpha_zm, beta_zm, gamma_zm = alpha[4], beta[4], gamma[4]
alpha_zp, beta_zp, gamma_zp = alpha[5], beta[5], gamma[5]
# h_xm, h_xp = mesh.gridCC[fCCxm,0], mesh.gridCC[fCCxp,0]
# h_ym, h_yp = mesh.gridCC[fCCym,1], mesh.gridCC[fCCyp,1]
# h_zm, h_zp = mesh.gridCC[fCCzm,2], mesh.gridCC[fCCzp,2]
h_xm = mesh.hx[0]*np.ones_like(alpha_xm)
h_xp = mesh.hx[-1]*np.ones_like(alpha_xp)
h_ym = mesh.hy[0]*np.ones_like(alpha_ym)
h_yp = mesh.hy[-1]*np.ones_like(alpha_yp)
h_zm = mesh.hz[0]*np.ones_like(alpha_zm)
h_zp = mesh.hz[-1]*np.ones_like(alpha_zp)
a_xm = gamma_xm/(0.5*alpha_xm-beta_xm/h_xm)
b_xm = (0.5*alpha_xm+beta_xm/h_xm)/(0.5*alpha_xm-beta_xm/h_xm)
a_xp = gamma_xp/(0.5*alpha_xp-beta_xp/h_xp)
b_xp = (0.5*alpha_xp+beta_xp/h_xp)/(0.5*alpha_xp-beta_xp/h_xp)
a_ym = gamma_ym/(0.5*alpha_ym-beta_ym/h_ym)
b_ym = (0.5*alpha_ym+beta_ym/h_ym)/(0.5*alpha_ym-beta_ym/h_ym)
a_yp = gamma_yp/(0.5*alpha_yp-beta_yp/h_yp)
b_yp = (0.5*alpha_yp+beta_yp/h_yp)/(0.5*alpha_yp-beta_yp/h_yp)
a_zm = gamma_zm/(0.5*alpha_zm-beta_zm/h_zm)
b_zm = (0.5*alpha_zm+beta_zm/h_zm)/(0.5*alpha_zm-beta_zm/h_zm)
a_zp = gamma_zp/(0.5*alpha_zp-beta_zp/h_zp)
b_zp = (0.5*alpha_zp+beta_zp/h_zp)/(0.5*alpha_zp-beta_zp/h_zp)
xBC_xm = 0.5*a_xm
xBC_xp = 0.5*a_xp/b_xp
yBC_xm = 0.5*(1.-b_xm)
yBC_xp = 0.5*(1.-1./b_xp)
xBC_ym = 0.5*a_ym
xBC_yp = 0.5*a_yp/b_yp
yBC_ym = 0.5*(1.-b_ym)
yBC_yp = 0.5*(1.-1./b_yp)
xBC_zm = 0.5*a_zm
xBC_zp = 0.5*a_zp/b_zp
yBC_zm = 0.5*(1.-b_zm)
yBC_zp = 0.5*(1.-1./b_zp)
sortindsfx = np.argsort(np.r_[np.arange(mesh.nFx)[fxm],
np.arange(mesh.nFx)[fxp]])
sortindsfy = np.argsort(np.r_[np.arange(mesh.nFy)[fym],
np.arange(mesh.nFy)[fyp]])
sortindsfz = np.argsort(np.r_[np.arange(mesh.nFz)[fzm],
np.arange(mesh.nFz)[fzp]])
xBC_x = np.r_[xBC_xm, xBC_xp][sortindsfx]
xBC_y = np.r_[xBC_ym, xBC_yp][sortindsfy]
xBC_z = np.r_[xBC_zm, xBC_zp][sortindsfz]
yBC_x = np.r_[yBC_xm, yBC_xp][sortindsfx]
yBC_y = np.r_[yBC_ym, yBC_yp][sortindsfy]
yBC_z = np.r_[yBC_zm, yBC_zp][sortindsfz]
xBC = np.r_[xBC_x, xBC_y, xBC_z]
yBC = np.r_[yBC_x, yBC_y, yBC_z]
return xBC, yBC
class Test1D_InhomogeneousMixed(discretize.Tests.OrderTest):
name = "1D - Mixed"
meshTypes = MESHTYPES
meshDimension = 1
expectedOrders = 2
meshSizes = [4, 8, 16, 32]
def getError(self):
# Test function
def phi_fun(x): return np.cos(np.pi*x)
def j_fun(x): return np.pi*np.sin(np.pi*x)
def phi_deriv(x): return -j_fun(x)
def q_fun(x): return (np.pi**2)*np.cos(np.pi*x)
xc_ana = phi_fun(self.M.gridCC)
q_ana = q_fun(self.M.gridCC)
j_ana = j_fun(self.M.gridFx)
# Get boundary locations
vecN = self.M.vectorNx
vecC = self.M.vectorCCx
# Setup Mixed B.C (alpha, beta, gamma)
alpha_xm, alpha_xp = 1., 1.
beta_xm, beta_xp = 1., 1.
alpha = np.r_[alpha_xm, alpha_xp]
beta = np.r_[beta_xm, beta_xp]
vecN = self.M.vectorNx
vecC = self.M.vectorCCx
phi_bc = phi_fun(vecN[[0, -1]])
phi_deriv_bc = phi_deriv(vecN[[0, -1]])
gamma = alpha*phi_bc + beta*phi_deriv_bc
x_BC, y_BC = getxBCyBC_CC(self.M, alpha, beta, gamma)
sigma = np.ones(self.M.nC)
Mfrho = self.M.getFaceInnerProduct(1./sigma)
MfrhoI = self.M.getFaceInnerProduct(1./sigma, invMat=True)
V = discretize.utils.sdiag(self.M.vol)
Div = V*self.M.faceDiv
P_BC, B = self.M.getBCProjWF_simple()
q = q_fun(self.M.gridCC)
M = B*self.M.aveCC2F
G = Div.T - P_BC*discretize.utils.sdiag(y_BC)*M
# Mrhoj = D.T V phi + P_BC*discretize.utils.sdiag(y_BC)*M phi - P_BC*x_BC
rhs = V*q + Div*MfrhoI*P_BC*x_BC
A = Div*MfrhoI*G
if self.myTest == 'xc':
# TODO: fix the null space
Ainv = Solver(A)
xc = Ainv*rhs
err = np.linalg.norm((xc-xc_ana), np.inf)
else:
NotImplementedError
return err
def test_order(self):
print("==== Testing Mixed boudary conduction for CC-problem ====")
self.name = "1D"
self.myTest = 'xc'
self.orderTest()
class Test2D_InhomogeneousMixed(discretize.Tests.OrderTest):
name = "2D - Mixed"
meshTypes = MESHTYPES
meshDimension = 2
expectedOrders = 2
meshSizes = [4, 8, 16, 32]
def getError(self):
# Test function
def phi_fun(x):
return np.cos(np.pi*x[:, 0])*np.cos(np.pi*x[:, 1])
def j_funX(x):
return +np.pi*np.sin(np.pi*x[:, 0])*np.cos(np.pi*x[:, 1])
def j_funY(x):
return +np.pi*np.cos(np.pi*x[:, 0])*np.sin(np.pi*x[:, 1])
def phideriv_funX(x):
return -j_funX(x)
def phideriv_funY(x):
return -j_funY(x)
def q_fun(x):
return +2*(np.pi**2)*phi_fun(x)
xc_ana = phi_fun(self.M.gridCC)
q_ana = q_fun(self.M.gridCC)
jX_ana = j_funX(self.M.gridFx)
jY_ana = j_funY(self.M.gridFy)
j_ana = np.r_[jX_ana, jY_ana]
# Get boundary locations
fxm, fxp, fym, fyp = self.M.faceBoundaryInd
gBFxm = self.M.gridFx[fxm, :]
gBFxp = self.M.gridFx[fxp, :]
gBFym = self.M.gridFy[fym, :]
gBFyp = self.M.gridFy[fyp, :]
# Setup Mixed B.C (alpha, beta, gamma)
alpha_xm = np.ones_like(gBFxm[:, 0])
alpha_xp = np.ones_like(gBFxp[:, 0])
beta_xm = np.ones_like(gBFxm[:, 0])
beta_xp = np.ones_like(gBFxp[:, 0])
alpha_ym = np.ones_like(gBFym[:, 1])
alpha_yp = np.ones_like(gBFyp[:, 1])
beta_ym = np.ones_like(gBFym[:, 1])
beta_yp = np.ones_like(gBFyp[:, 1])
phi_bc_xm, phi_bc_xp = phi_fun(gBFxm), phi_fun(gBFxp)
phi_bc_ym, phi_bc_yp = phi_fun(gBFym), phi_fun(gBFyp)
phiderivX_bc_xm = phideriv_funX(gBFxm)
phiderivX_bc_xp = phideriv_funX(gBFxp)
phiderivY_bc_ym = phideriv_funY(gBFym)
phiderivY_bc_yp = phideriv_funY(gBFyp)
def gamma_fun(alpha, beta, phi, phi_deriv):
return alpha*phi + beta*phi_deriv
gamma_xm = gamma_fun(alpha_xm, beta_xm, phi_bc_xm, phiderivX_bc_xm)
gamma_xp = gamma_fun(alpha_xp, beta_xp, phi_bc_xp, phiderivX_bc_xp)
gamma_ym = gamma_fun(alpha_ym, beta_ym, phi_bc_ym, phiderivY_bc_ym)
gamma_yp = gamma_fun(alpha_yp, beta_yp, phi_bc_yp, phiderivY_bc_yp)
alpha = [alpha_xm, alpha_xp, alpha_ym, alpha_yp]
beta = [beta_xm, beta_xp, beta_ym, beta_yp]
gamma = [gamma_xm, gamma_xp, gamma_ym, gamma_yp]
x_BC, y_BC = getxBCyBC_CC(self.M, alpha, beta, gamma)
sigma =
|
np.ones(self.M.nC)
|
numpy.ones
|
import time
import sys
import os
import numpy as np
from os.path import join
from datetime import datetime
from scipy import spatial
import cv2
sys.path.append('../../')
import gsom.applications.video_highlights.temporal_features.hoof_data_parser as Parser
from gsom.util import utilities as Utils
from gsom.util import display as Display_Utils
from gsom.params import params as Params
from gsom.core4 import core_controller as Core
from gsom.util import kmeans_cluster_gsom as KMeans_Cluster
def recluster_gsom(converted_feature_vector_dictionary, SF, learning_itr, smoothing_irt, temporal_contexts,
forget_threshold, dataset):
print('Re-clustering process started\n\n')
count = 0
cluster_no_threshold = 2
no_subclusters = 10
recluster_arr = []
final_cluster_out = []
dynamic_highlights = []
dynamic_recluster_start = time.time()
excluded_time = 0
for element in range(len(converted_feature_vector_dictionary)):
# Init GSOM Parameters
gsom_params = Params.GSOMParameters(SF, learning_itr, smoothing_irt, distance=Params.DistanceFunction.EUCLIDEAN,
temporal_context_count=temporal_contexts, forget_itr_count=forget_threshold)
generalise_params = Params.GeneraliseParameters(gsom_params)
# convert input data to run gsom
input_data = {
0: np.matrix(converted_feature_vector_dictionary[element]['feature_vec'])
}
# for i in range(len(converted_feature_vector_dictionary[element]['feature_vec'])):
# input_data[i] = np.matrix(converted_feature_vector_dictionary[element]['feature_vec'])
# Setup the age threshold based on the input vector length
generalise_params.setup_age_threshold(input_data[0].shape[0])
recluster_excluded_start = time.time()
# Mock output location
output_loc = 'temporal/re-cluster/' + str(count)
output_loc_images = join(output_loc, 'images/')
if not os.path.exists(output_loc):
os.makedirs(output_loc)
if not os.path.exists(output_loc_images):
os.makedirs(output_loc_images)
recluster_excluded_end = time.time()
excluded_time += (recluster_excluded_end - recluster_excluded_start)
# Process the clustering algorithm
controller = Core.Controller(generalise_params)
controller_start = time.time()
result_dict = controller.run(input_vector_db=input_data,
# return the list/map from here
plot_for_itr=0,
output_loc=output_loc
)
print('Algorithm for ' + str(count) + ' completed in ', round(time.time() - controller_start, 2), '(s)')
# saved_name = Utils.Utilities.save_object(result_dict, join(output_loc, 'gsom_nodemap_SF-{}'.format(SF)))
gsom_nodemap = result_dict[0]['gsom']
# # Display
# display = Display_Utils.Display(result_dict[0]['gsom'], None)
# # display.setup_labels_for_gsom_nodemap(labels, 2, 'Latent Space of {} : SF={}'.format(dataset, SF),
# # join(output_loc, 'latent_space_' + str(SF) + '_hitvalues'))
# display.setup_labels_for_gsom_nodemap(converted_feature_vector_dictionary[element]['frame_label'], 3,
# 'Latent Space of {} : SF={}'.format(dataset, SF),
# join(output_loc, 'latent_space_' + str(SF) + '_labels'))
print('Completed.')
count += 1
recluster_arr.append({
'gsom': gsom_nodemap,
'frame_labels': converted_feature_vector_dictionary[element]['frame_label'],
'feature_vec': converted_feature_vector_dictionary[element]['feature_vec']
})
kmeans_som = KMeans_Cluster.KMeansSOM()
gsom_array = kmeans_som._gsom_to_array(gsom_nodemap)
gsom_array_length = len(gsom_array)
if gsom_array_length < no_subclusters:
no_subclusters = gsom_array_length
gsom_list, centroids, labels = kmeans_som.cluster_GSOM(gsom_nodemap, no_subclusters)
farthest_clusters = select_farthest_clusters(
converted_feature_vector_dictionary[element]['cluster_centroid'],
centroids,
cluster_no_threshold
)
final_cluster_out.append({
element: farthest_clusters
})
frame_node_list = []
no_of_nodes = len(gsom_list)
for x in range(no_of_nodes):
gsom_node_weights = gsom_list[x]
for key, node in gsom_nodemap.items():
if len(node.get_mapped_labels()) > 0:
if gsom_node_weights.tolist() == node.recurrent_weights[0].tolist():
label_indices = node.get_mapped_labels()
frame_labels = []
for index in label_indices:
frame_labels.append(converted_feature_vector_dictionary[element]['frame_label'][index])
frame_node_list.append([key, node, labels[x], frame_labels])
break
for each_item in frame_node_list:
for frame in each_item[3]:
if each_item[2] == farthest_clusters:
highlight_output = join("temporal/re-cluster/highlights/" + str(element) + "/" + str(each_item[2]) + "/")
file_path = join("temporal/re-cluster/highlights/" + str(element) + "/" + str(each_item[2]) + "/",
str(frame) + ".jpg")
# return the frames from the dynamic highlights
dynamic_highlights.append(str(frame))
# if not os.path.exists(highlight_output):
# os.makedirs(highlight_output)
# cv2.imwrite(file_path, original_frame_list[int(frame)])
# print(recluster_arr)
# print(final_cluster_out)
dynamic_recluster_end = time.time()
print("Dynamic Feature level 2 reclusterd: " + str(dynamic_recluster_end-dynamic_recluster_start-excluded_time))
return recluster_arr, dynamic_highlights
def cosine_distance(vec1, vec2):
mag1 = np.linalg.norm(vec1)
mag2 =
|
np.linalg.norm(vec2)
|
numpy.linalg.norm
|
import timeit
from datetime import datetime
import socket
import os
import glob
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torch
from tensorboardX import SummaryWriter
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from dataloaders.dataset import VideoDataset
from network import C3D_model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device being used:", device)
nEpochs = 150 # Number of epochs for training
useTest = True # See evolution of the test set when training
nTestInterval = 1 # Run on test set every nTestInterval epochs
snapshot = 40 # Store a model every snapshot epochs
lr = 1e-4 # Learning rate
dataset = 'CAER'
num_classes = 7
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0
save_dir = os.path.join(save_dir_root, 'run', 'run_' + str(run_id))
modelName = 'C3D'
saveName = modelName + '-' + dataset
def train_model(dataset=dataset, save_dir=save_dir, num_classes=num_classes, lr=lr,
num_epochs=nEpochs, save_epoch=snapshot, useTest=useTest, test_interval=nTestInterval):
if modelName == 'C3D':
model = C3D_model.C3D(num_classes=num_classes, pretrained=True)
train_params = [{'params': C3D_model.get_1x_lr_params(model), 'lr': lr},
{'params': C3D_model.get_10x_lr_params(model), 'lr': lr * 10}]
else:
print('We only implemented C3D models.')
raise NotImplementedError
criterion = nn.CrossEntropyLoss() # standard crossentropy loss for classification
optimizer = optim.SGD(train_params, lr=lr, momentum=0.9, weight_decay=5e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30,
gamma=0.1) # the scheduler divides the lr by 10 every 10 epochs
print("Training {} from scratch...".format(modelName))
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
model.to(device)
criterion.to(device)
log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
print('Training model on {} dataset...'.format(dataset))
train_dataloader = DataLoader(VideoDataset(dataset=dataset, split='train',clip_len=16), batch_size=16, shuffle=True, num_workers=4)
val_dataloader = DataLoader(VideoDataset(dataset=dataset, split='val', clip_len=16), batch_size=16, num_workers=4)
test_dataloader = DataLoader(VideoDataset(dataset=dataset, split='test', clip_len=16), batch_size=16, num_workers=4)
trainval_loaders = {'train': train_dataloader, 'val': val_dataloader}
trainval_sizes = {x: len(trainval_loaders[x].dataset) for x in ['train', 'val']}
test_size = len(test_dataloader.dataset)
#store epoch loss and accuracy to plot a graph
epoch_loss_gra =
|
np.zeros(num_epochs)
|
numpy.zeros
|
import tensorflow as tf
import numpy
from .GaussianBoundaryCondition import GaussianBoundaryCondition
class HarmonicOscillatorWavefunction(tf.keras.layers.Layer):
"""Implememtation of the harmonic oscillator wave funtions
Create a polynomial, up to `degree` in every dimension `n`, that is the
exact solution to the harmonic oscillator wave function.
Extends:
tf.keras.layers.Layer
"""
def __init__(self, n : int, nparticles : int, degree : int, alpha : float, dtype=tf.float64):
"""Initializer
Create a harmonic oscillator wave function
Arguments:
n {int} -- Dimension of the oscillator (1 <= n <= 3)
nparticles {int} -- Number of particles
degree {int} -- Degree of the solution (broadcastable to n)
alpha {float} -- Alpha parameter (m * omega / hbar)
Raises:
Exception -- [description]
"""
tf.keras.layers.Layer.__init__(self, dtype=dtype)
self.n = n
if self.n < 1 or self.n > 3:
raise Exception("Dimension must be 1, 2, or 3 for HarmonicOscillatorWavefunction")
if nparticles > 1:
raise Exception("HarmonicOscillatorWavefunction is only for 1 particle for testing.")
# Use numpy to broadcast to the right dimension:
degree = numpy.asarray(degree, dtype=numpy.int32)
degree = numpy.broadcast_to(degree, (self.n,))
self.type = dtype
# Degree of the polynomial:
self.degree = degree
if numpy.min(self.degree) < 0 or numpy.max(self.degree) > 4:
raise Exception("Only the first 5 hermite polynomials are supported")
alpha = numpy.asarray(alpha, dtype=numpy.int32)
alpha = numpy.broadcast_to(alpha, (self.n,))
self.alpha = alpha
# Normalization:
self.norm = numpy.power(self.alpha / numpy.pi, 0.25)
self.norm = numpy.prod(self.norm)
# Craft the polynomial coefficients:
# Add one to the degree since they start at "0"
# Polynomial is of shape [degree, largest_dimension]
self.polynomial = numpy.zeros(shape=(max(self.degree) + 1, self.n), dtype=numpy.float64)
# Loop over the coefficents and set them:
# Loop over dimension:
self.polynomial_norm = numpy.zeros(shape=(self.n,), dtype=numpy.float64)
for _n in range(self.n):
# Loop over degree:
_d = self.degree[_n]
if _d == 0:
self.polynomial[0][_n] = 1.0
elif _d == 1:
self.polynomial[1][_n] = 2.0
elif _d == 2:
self.polynomial[0][_n] = -2.0
self.polynomial[2][_n] = 4.0
elif _d == 3:
self.polynomial[1][_n] = -12.0
self.polynomial[3][_n] = 8.0
elif _d == 4:
self.polynomial[0][_n] = 12.0
self.polynomial[2][_n] = -48.0
self.polynomial[4][_n] = 16.0
# Set the polynomial normalization as a function of the degree
# For each dimension:
self.polynomial_norm[_n] = 1.0 / numpy.sqrt(2**_d *
|
numpy.math.factorial(_d)
|
numpy.math.factorial
|
import numpy as np
import numpy.matlib as mat
from scipy.linalg import solve
from scipy.ndimage import gaussian_filter
from scipy.optimize import basinhopping, minimize_scalar
from kineticmodel import KineticModel
from kineticmodel import integrate as km_integrate
class SRTM_Zhou2003(KineticModel):
'''
Compute distribution volume ratio (DVR) and relative delivery (R1) kinetic
parameters from dynamic PET data based on a simplified reference tissue
model (SRTM). The nonlinear SRTM equations are linearized using integrals of
time activity curves (TACs) of the reference and target tissues. Kinetic
parameters are then estimated by weighted linear regression (WLR).
If provided, the spatially smoothed TAC of the target region is used in
the computation of DVR as part of the linear regression with spatial
constraint (LRSC) approach.
To obtain the R1 estimate that incorporates spatial smoothness based on
LRSC, run refine_R1() after running fit().
Reference:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Linear regression with spatial constraint to generate parametric images of
ligand-receptor dynamic PET studies with a simplified reference tissue model.
Neuroimage. 2003;18:975–989.
'''
# This class will compute the following results:
result_names = [ # estimated parameters
'BP',
'DVR',
'R1','k2','k2a',
'R1_lrsc','k2_lrsc','k2a_lrsc',
# model fit indicators
'noiseVar_eqDVR','noiseVar_eqR1']
def fit(self, smoothTAC=None):
'''
Estimate parameters of the SRTM Zhou 2003 model.
Args:
smoothTAC (numpy.ndarray): optional. 1- or 2-D array, where each row
corresponds to a (spatially) smoothed time activity curve
'''
if smoothTAC is not None:
if smoothTAC.ndim==1:
if not len(smoothTAC)==len(self.t):
raise ValueError('smoothTAC and t must have same length')
# make smoothTAC into a row vector
smoothTAC = smoothTAC[np.newaxis,:]
elif smoothTAC.ndim==2:
if not smoothTAC.shape==self.TAC.shape:
raise ValueError('smoothTAC and TAC must have same shape')
else:
raise ValueError('smoothTAC must be 1- or 2-dimensional')
n = len(self.t)
m = 3
# Numerical integration of reference TAC
intrefTAC = km_integrate(self.refTAC,self.t,self.startActivity)
# Compute BP/DVR, R1, k2, k2a
for k, TAC in enumerate(self.TAC):
W = mat.diag(self.weights[k,:])
# Numerical integration of target TAC
intTAC = km_integrate(TAC,self.t,self.startActivity)
# ----- Get DVR -----
# Set up the weighted linear regression model
# based on Eq. 9 in Zhou et al.
# Per the recommendation in first paragraph on p. 979 of Zhou et al.,
# smoothed TAC is used in the design matrix, if provided.
if smoothTAC is None:
X = np.column_stack((intrefTAC, self.refTAC, -TAC))
else:
X = np.column_stack((intrefTAC, self.refTAC, -smoothTAC[k,:].flatten()))
y = intTAC
try:
b = solve(X.T @ W @ X, X.T @ W @ y)
residual = y - X @ b
# unbiased estimator of noise variance
noiseVar_eqDVR = residual.T @ W @ residual / (n-m)
DVR = b[0]
#R1 = b[1] / b[2]
#k2 = b[0] / b[2]
BP = DVR - 1
except:
DVR = BP = noiseVar_eqDVR = 0
# ----- Get R1 -----
# Set up the weighted linear regression model
# based on Eq. 8 in Zhou et al.
#X = np.mat(np.column_stack((self.refTAC,intrefTAC,-intTAC)))
X = np.column_stack((self.refTAC,intrefTAC,-intTAC))
#y = np.mat(TAC).T
y = TAC
try:
b = solve(X.T @ W @ X, X.T @ W @ y)
residual = y - X @ b
# unbiased estimator of noise variance
noiseVar_eqR1 = residual.T @ W @ residual / (n-m)
R1 = b[0]
k2 = b[1]
k2a = b[2]
except:
R1 = k2 = k2a = noiseVar_eqR1 = 0
self.results['BP'][k] = BP
self.results['DVR'][k] = DVR
self.results['R1'][k] = R1
self.results['k2'][k] = k2
self.results['k2a'][k] = k2a
self.results['noiseVar_eqDVR'][k] = noiseVar_eqDVR
self.results['noiseVar_eqR1'][k] = noiseVar_eqR1
return self
def refine_R1(self, smoothR1, smoothk2, smoothk2a, h):
'''
Ridge regression to get better R1, k2, k2a estimates
Args:
smoothR1 (float): R1 value to drive the estimate toward
smoothk2 (float): k2 value to drive the estimate toward
smoothk2a (float): k2a value to drive the estimate toward
h (numpy.ndarray): 1-D array consisting of the diagonal elements
of the matrix used to compute the weighted norm
'''
if not smoothR1.ndim==smoothk2.ndim==smoothk2a.ndim==1:
raise ValueError('smoothR1, smoothk2, smoothk2a must be 1-D')
if not len(smoothR1)==len(smoothk2)==len(smoothk2a)==self.TAC.shape[0]:
raise ValueError('Length of smoothR1, smoothk2, smoothk2a must be \
equal to the number of rows of TAC')
if not h.ndim==2:
raise ValueError('h must be 2-D')
if not h.shape==(self.TAC.shape[0], 3):
raise ValueError('Number of rows of h must equal the number of rows of TAC, \
and the number of columns of h must be 3')
# Numerical integration of reference TAC
intrefTAC = km_integrate(self.refTAC,self.t,self.startActivity)
for k, TAC in enumerate(self.TAC):
W = mat.diag(self.weights[k,:])
# Numerical integration of target TAC
intTAC = km_integrate(TAC,self.t,self.startActivity)
# ----- Get R1 incorporating spatial constraint -----
# Set up the ridge regression model
# based on Eq. 11 in Zhou et al.
#X = np.mat(np.column_stack((self.refTAC,intrefTAC,-intTAC)))
X = np.column_stack((self.refTAC,intrefTAC,-intTAC))
#y = np.mat(TAC).T
y = TAC
H = mat.diag(h[k,:])
b_sc = np.array((smoothR1[k],smoothk2[k],smoothk2a[k])) #.reshape(-1,1)
try:
b = solve(X.T @ W @ X + H, X.T @ W @ y + H @ b_sc)
R1_lrsc = b[0]
k2_lrsc = b[1]
k2a_lrsc = b[2]
except:
R1_lrsc = k2_lrsc = k2a_lrsc = 0
self.results['R1_lrsc'][k] = R1_lrsc
self.results['k2_lrsc'][k] = k2_lrsc
self.results['k2a_lrsc'][k] = k2a_lrsc
return self
class SRTM_Lammertsma1996(KineticModel):
'''
Compute binding potential (BP) and relative delivery (R1) kinetic parameters
from dynamic PET data based on a simplified reference tissue model (SRTM).
Reference:
Lammertsma AA, Hume SP. Simplified reference tissue model for PET receptor
studies. NeuroImage. 1996 Dec;4(3 Pt 1):153-8.
'''
# This class will compute the following results:
result_names = [ # estimated parameters
'BP','R1','k2']#,
# model fit indicators
#'akaike']
def fit(self):
n = len(self.t)
m = 4 # 3 model parameters + noise variance
def make_srtm_est(startActivity):
'''
Wrapper to construct the SRTM TAC estimation function with a given
startActivity.
Args:
startActivity (str): determines initial condition for integration.
See integrate in kineticmodel.py
Returns:
srtm_est (function): function to compute fitted TAC given t,
refTAC, BP, R1, k2
'''
def srtm_est(X, BPnd, R1, k2):
'''
Compute fitted TAC given t, refTAC, BP, R1, k2.
Args:
X (tuple): first element is t, second element is intrefTAC
BPnd (float): binding potential
R1 (float): R1
k2 (float): k2
Returns:
TAC_est (numpy.ndarray): 1-D array
estimated time activity curve
'''
t, refTAC = X
k2a = k2/(BPnd+1)
# Convolution of reference TAC and exp(-k2a*t) = exp(-k2a*t) * Numerical integration of
# refTAC(t)*exp(k2a*t).
exp_k2a_t =
|
np.exp(k2a*t)
|
numpy.exp
|
import numpy as np
from gym_fishing.envs.base_fishing_env import BaseFishingEnv
class Allen(BaseFishingEnv):
def __init__(
self,
r=0.3,
K=1,
C=0.5,
sigma=0.0,
init_state=0.75,
Tmax=100,
file=None,
):
super().__init__(
params={"r": r, "K": K, "sigma": sigma, "C": C, "x0": init_state},
Tmax=Tmax,
file=file,
)
def population_draw(self):
self.fish_population = allen(self.fish_population, self.params)
return self.fish_population
class BevertonHolt(BaseFishingEnv):
def __init__(
self, r=0.3, K=1, sigma=0.0, init_state=0.75, Tmax=100, file=None
):
super().__init__(
params={"r": r, "K": K, "sigma": sigma, "x0": init_state},
Tmax=Tmax,
file=file,
)
def population_draw(self):
self.fish_population = beverton_holt(self.fish_population, self.params)
return self.fish_population
class Myers(BaseFishingEnv):
def __init__(
self,
r=1.0,
K=1.0,
M=1.0,
theta=3.0,
sigma=0.0,
init_state=1.5,
Tmax=100,
file=None,
):
super().__init__(
params={
"r": r,
"K": K,
"sigma": sigma,
"theta": theta,
"M": M,
"x0": init_state,
},
Tmax=Tmax,
file=file,
)
def population_draw(self):
self.fish_population = myers(self.fish_population, self.params)
return self.fish_population
# (r =.7, beta = 1.2, q = 3, b = 0.15, a = 0.2) # lower-state peak is optimal
# (r =.7, beta = 1.5, q = 3, b = 0.15, a = 0.2) # higher-state peak is optimal
class May(BaseFishingEnv):
def __init__(
self,
r=0.7,
K=1.5,
M=1.5,
q=3,
b=0.15,
sigma=0.0,
a=0.2,
init_state=0.75,
Tmax=100,
file=None,
):
super().__init__(
params={
"r": r,
"K": K,
"sigma": sigma,
"q": q,
"b": b,
"a": a,
"M": M,
"x0": init_state,
},
Tmax=Tmax,
file=file,
)
def population_draw(self):
self.fish_population = may(self.fish_population, self.params)
return self.fish_population
class Ricker(BaseFishingEnv):
def __init__(
self, r=0.3, K=1, sigma=0.0, init_state=0.75, Tmax=100, file=None
):
super().__init__(
params={"r": r, "K": K, "sigma": sigma, "x0": init_state},
Tmax=Tmax,
file=file,
)
def population_draw(self):
self.fish_population = ricker(self.fish_population, self.params)
return self.fish_population
class NonStationary(BaseFishingEnv):
def __init__(
self,
r=0.8,
K=1,
sigma=0.0,
alpha=-0.007,
init_state=0.75,
Tmax=100,
file=None,
):
super().__init__(
params={
"r": r,
"K": K,
"sigma": sigma,
"alpha": alpha,
"x0": init_state,
},
Tmax=Tmax,
file=file,
)
def population_draw(self):
self.params["r"] = self.params["r"] + self.params["alpha"]
self.fish_population = beverton_holt(self.fish_population, self.params)
return self.fish_population
class ModelUncertainty(BaseFishingEnv):
def __init__(
self,
models=["allen", "beverton_holt", "myers", "may", "ricker"],
params={
"allen": {"r": 0.3, "K": 1.0, "sigma": 0.0, "C": 0.5, "x0": 0.75},
"beverton_holt": {"r": 0.3, "K": 1, "sigma": 0.0, "x0": 0.75},
"myers": {
"r": 1.0,
"K": 1.0,
"M": 1.0,
"theta": 3.0,
"sigma": 0.0,
"x0": 1.5,
},
"may": {
"r": 0.7,
"K": 1.5,
"M": 1.5,
"q": 3,
"b": 0.15,
"sigma": 0.0,
"a": 0.2,
"x0": 0.75,
},
"ricker": {"r": 0.3, "K": 1, "sigma": 0.0, "x0": 0.75},
},
Tmax=100,
file=None,
):
super().__init__(
Tmax=Tmax,
file=file,
)
self.model = np.random.choice(models)
self.models = models
self.params = params
def population_draw(self):
f = population_model[self.model]
p = self.params[self.model]
self.fish_population = f(self.fish_population, p)
return self.fish_population
def reset(self):
self.state = np.array([self.init_state / self.K - 1])
self.fish_population = self.init_state
self.model = np.random.choice(self.models)
self.years_passed = 0
self.reward = 0
self.harvest = 0
return self.state
# Growth Functions #
def allen(x, params):
with np.errstate(divide="ignore"):
mu = (
np.log(x)
+ params["r"]
* (1 - x / params["K"])
* (1 - params["C"])
/ params["K"]
)
return np.maximum(0,
|
np.random.lognormal(mu, params["sigma"])
|
numpy.random.lognormal
|
import apricot
import numpy as np
import torch
import torch.nn.functional as F
from scipy.sparse import csr_matrix
from .dataselectionstrategy import DataSelectionStrategy
from torch.utils.data.sampler import SubsetRandomSampler
class SubmodularSelectionStrategy(DataSelectionStrategy):
"""
This class extends :class:`selectionstrategies.supervisedlearning.dataselectionstrategy.DataSelectionStrategy`
to include submodular optmization functions using apricot for data selection.
Parameters
----------
trainloader: class
Loading the training data using pytorch DataLoader
valloader: class
Loading the validation data using pytorch DataLoader
model: class
Model architecture used for training
loss_type: class
The type of loss criterion
device: str
The device being utilized - cpu | cuda
num_classes: int
The number of target classes in the dataset
linear_layer: bool
Apply linear transformation to the data
if_convex: bool
If convex or not
selection_type: str
PerClass or Supervised
submod_func_type: str
The type of submodular optimization function. Must be one of
'facility-location', 'graph-cut', 'sum-redundancy', 'saturated-coverage'
"""
def __init__(self, trainloader, valloader, model, loss,
device, num_classes, linear_layer, if_convex, selection_type, submod_func_type, optimizer):
"""
Constructer method
"""
super().__init__(trainloader, valloader, model, num_classes, linear_layer, loss, device)
self.if_convex = if_convex
self.selection_type = selection_type
self.submod_func_type = submod_func_type
self.optimizer = optimizer
def distance(self, x, y, exp=2):
"""
Compute the distance.
Parameters
----------
x: Tensor
First input tensor
y: Tensor
Second input tensor
exp: float, optional
The exponent value (default: 2)
Returns
----------
dist: Tensor
Output tensor
"""
n = x.size(0)
m = y.size(0)
d = x.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
dist = torch.pow(x - y, exp).sum(2)
# dist = torch.exp(-1 * torch.pow(x - y, 2).sum(2))
return dist
def compute_score(self, model_params, idxs):
"""
Compute the score of the indices.
Parameters
----------
model_params: OrderedDict
Python dictionary object containing models parameters
idxs: list
The indices
"""
trainset = self.trainloader.sampler.data_source
subset_loader = torch.utils.data.DataLoader(trainset, batch_size=self.trainloader.batch_size, shuffle=False,
sampler=SubsetRandomSampler(idxs),
pin_memory=True)
self.model.load_state_dict(model_params)
self.N = 0
g_is = []
if self.if_convex:
for batch_idx, (inputs, targets) in enumerate(subset_loader):
inputs, targets = inputs, targets
if self.selection_type == 'PerBatch':
self.N += 1
g_is.append(inputs.view(inputs.size()[0], -1).mean(dim=0).view(1, -1))
else:
self.N += inputs.size()[0]
g_is.append(inputs.view(inputs.size()[0], -1))
else:
embDim = self.model.get_embedding_dim()
for batch_idx, (inputs, targets) in enumerate(subset_loader):
inputs, targets = inputs.to(self.device), targets.to(self.device, non_blocking=True)
if self.selection_type == 'PerBatch':
self.N += 1
else:
self.N += inputs.size()[0]
out, l1 = self.model(inputs, freeze=True, last=True)
loss = self.loss(out, targets).sum()
l0_grads = torch.autograd.grad(loss, out)[0]
if self.linear_layer:
l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)
l1_grads = l0_expand * l1.repeat(1, self.num_classes)
if self.selection_type == 'PerBatch':
g_is.append(torch.cat((l0_grads, l1_grads), dim=1).mean(dim=0).view(1, -1))
else:
g_is.append(torch.cat((l0_grads, l1_grads), dim=1))
else:
if self.selection_type == 'PerBatch':
g_is.append(l0_grads.mean(dim=0).view(1, -1))
else:
g_is.append(l0_grads)
self.dist_mat = torch.zeros([self.N, self.N], dtype=torch.float32)
first_i = True
if self.selection_type == 'PerBatch':
g_is = torch.cat(g_is, dim=0)
self.dist_mat = self.distance(g_is, g_is).cpu()
else:
for i, g_i in enumerate(g_is, 0):
if first_i:
size_b = g_i.size(0)
first_i = False
for j, g_j in enumerate(g_is, 0):
self.dist_mat[i * size_b: i * size_b + g_i.size(0),
j * size_b: j * size_b + g_j.size(0)] = self.distance(g_i, g_j).cpu()
self.const = torch.max(self.dist_mat).item()
self.dist_mat = (self.const - self.dist_mat).numpy()
def compute_gamma(self, idxs):
"""
Compute the gamma values for the indices.
Parameters
----------
idxs: list
The indices
Returns
----------
gamma: list
Gradient values of the input indices
"""
if self.selection_type == 'PerClass':
gamma = [0 for i in range(len(idxs))]
best = self.dist_mat[idxs] # .to(self.device)
rep = np.argmax(best, axis=0)
for i in rep:
gamma[i] += 1
elif self.selection_type == 'Supervised':
gamma = [0 for i in range(len(idxs))]
best = self.dist_mat[idxs] # .to(self.device)
rep = np.argmax(best, axis=0)
for i in range(rep.shape[1]):
gamma[rep[0, i]] += 1
return gamma
def get_similarity_kernel(self):
"""
Obtain the similarity kernel.
Returns
----------
kernel: ndarray
Array of kernel values
"""
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
if batch_idx == 0:
labels = targets
else:
tmp_target_i = targets
labels = torch.cat((labels, tmp_target_i), dim=0)
kernel = np.zeros((labels.shape[0], labels.shape[0]))
for target in np.unique(labels):
x = np.where(labels == target)[0]
# prod = np.transpose([np.tile(x, len(x)), np.repeat(x, len(x))])
for i in x:
kernel[i, x] = 1
return kernel
def select(self, budget, model_params):
"""
Data selection method using different submodular optimization
functions.
Parameters
----------
budget: int
The number of data points to be selected
model_params: OrderedDict
Python dictionary object containing models parameters
optimizer: str
The optimization approach for data selection. Must be one of
'random', 'modular', 'naive', 'lazy', 'approximate-lazy', 'two-stage',
'stochastic', 'sample', 'greedi', 'bidirectional'
Returns
----------
total_greedy_list: list
List containing indices of the best datapoints
gammas: list
List containing gradients of datapoints present in greedySet
"""
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
if batch_idx == 0:
x_trn, labels = inputs, targets
else:
tmp_inputs, tmp_target_i = inputs, targets
labels = torch.cat((labels, tmp_target_i), dim=0)
per_class_bud = int(budget / self.num_classes)
total_greedy_list = []
gammas = []
if self.selection_type == 'PerClass':
for i in range(self.num_classes):
idxs = torch.where(labels == i)[0]
self.compute_score(model_params, idxs)
if self.submod_func_type == 'facility-location':
fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0,
metric='precomputed',
n_samples=per_class_bud,
optimizer=self.optimizer)
elif self.submod_func_type == 'graph-cut':
fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',
n_samples=per_class_bud, optimizer=self.optimizer)
elif self.submod_func_type == 'sum-redundancy':
fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',
n_samples=per_class_bud,
optimizer=self.optimizer)
elif self.submod_func_type == 'saturated-coverage':
fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0,
metric='precomputed',
n_samples=per_class_bud,
optimizer=self.optimizer)
sim_sub = fl.fit_transform(self.dist_mat)
greedyList = list(
|
np.argmax(sim_sub, axis=1)
|
numpy.argmax
|
#!/usr/bin/env python
# Copyright 2019 <NAME> & <NAME>
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import modules and functions
import os
import sys
import numpy as np
import pickle
import stdb
from obstools.atacr.classes import StaNoise, Power, Cross, Rotation
from obstools.atacr import utils, options, plot
def main():
# Run Input Parser
(opts, indb) = options.get_cleanspec_options()
# Load Database
db = stdb.io.load_db(fname=indb)
# Construct station key loop
allkeys = db.keys()
sorted(allkeys)
# Extract key subset
if len(opts.stkeys) > 0:
stkeys = []
for skey in opts.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
sorted(stkeys)
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Path where spectra are located
specpath = 'SPECTRA/' + stkey + '/'
if not os.path.isdir(specpath):
print("Path to "+specpath+" doesn`t exist - aborting")
sys.exit()
# Path where average spectra will be saved
avstpath = 'AVG_STA/' + stkey + '/'
if not os.path.isdir(avstpath):
print("Path to "+avstpath+" doesn`t exist - creating it")
os.makedirs(avstpath)
# Path where plots will be saved
if opts.saveplot:
plotpath = avstpath + 'PLOTS/'
if not os.path.isdir(plotpath):
os.makedirs(plotpath)
else:
plotpath = False
# Get catalogue search start time
if opts.startT is None:
tstart = sta.startdate
else:
tstart = opts.startT
# Get catalogue search end time
if opts.endT is None:
tend = sta.enddate
else:
tend = opts.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
# Update Display
print()
print("|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
# Filename for output average spectra
dstart = str(tstart.year).zfill(4)+'.'+str(tstart.julday).zfill(3)+'-'
dend = str(tend.year).zfill(4)+'.'+str(tend.julday).zfill(3)+'.'
fileavst = avstpath + dstart + dend + 'avg_sta.pkl'
if os.path.exists(fileavst):
if not opts.ovr:
print("* -> file "+fileavst+" exists - continuing")
continue
# Containers for power and cross spectra
coh_all = []
ph_all = []
coh_12_all = []
coh_1Z_all = []
coh_1P_all = []
coh_2Z_all = []
coh_2P_all = []
coh_ZP_all = []
ph_12_all = []
ph_1Z_all = []
ph_1P_all = []
ph_2Z_all = []
ph_2P_all = []
ph_ZP_all = []
ad_12_all = []
ad_1Z_all = []
ad_1P_all = []
ad_2Z_all = []
ad_2P_all = []
ad_ZP_all = []
nwins = []
t1 = tstart
# Initialize StaNoise object
stanoise = StaNoise()
# Loop through each day withing time range
while t1 < tend:
year = str(t1.year).zfill(4)
jday = str(t1.julday).zfill(3)
tstamp = year+'.'+jday+'.'
filespec = specpath + tstamp + 'spectra.pkl'
# Load file if it exists
if os.path.exists(filespec):
print()
print(
"*******************************************" +
"*****************")
print('* Calculating noise spectra for key ' +
stkey+' and day '+year+'.'+jday)
print("* -> file "+filespec+" found - loading")
file = open(filespec, 'rb')
daynoise = pickle.load(file)
file.close()
stanoise += daynoise
else:
t1 += 3600.*24.
continue
coh_all.append(daynoise.rotation.coh)
ph_all.append(daynoise.rotation.ph)
# Coherence
coh_12_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c12,
daynoise.power.c11,
daynoise.power.c22), 50))
coh_1Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1Z,
daynoise.power.c11,
daynoise.power.cZZ), 50))
coh_1P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1P,
daynoise.power.c11,
daynoise.power.cPP), 50))
coh_2Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2Z,
daynoise.power.c22,
daynoise.power.cZZ), 50))
coh_2P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2P,
daynoise.power.c22,
daynoise.power.cPP), 50))
coh_ZP_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.cZP,
daynoise.power.cZZ,
daynoise.power.cPP), 50))
# Phase
try:
ph_12_all.append(
180./np.pi*utils.phase(daynoise.cross.c12))
except:
ph_12_all.append(None)
try:
ph_1Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c1Z))
except:
ph_1Z_all.append(None)
try:
ph_1P_all.append(
180./np.pi*utils.phase(daynoise.cross.c1P))
except:
ph_1P_all.append(None)
try:
ph_2Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c2Z))
except:
ph_2Z_all.append(None)
try:
ph_2P_all.append(
180./np.pi*utils.phase(daynoise.cross.c2P))
except:
ph_2P_all.append(None)
try:
ph_ZP_all.append(
180./np.pi*utils.phase(daynoise.cross.cZP))
except:
ph_ZP_all.append(None)
# Admittance
ad_12_all.append(utils.smooth(utils.admittance(
daynoise.cross.c12, daynoise.power.c11), 50))
ad_1Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1Z, daynoise.power.c11), 50))
ad_1P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1P, daynoise.power.c11), 50))
ad_2Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2Z, daynoise.power.c22), 50))
ad_2P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2P, daynoise.power.c22), 50))
ad_ZP_all.append(utils.smooth(utils.admittance(
daynoise.cross.cZP, daynoise.power.cZZ), 50))
t1 += 3600.*24.
# Convert to numpy arrays
coh_all = np.array(coh_all)
ph_all = np.array(ph_all)
coh_12_all =
|
np.array(coh_12_all)
|
numpy.array
|
from __future__ import print_function, division
import os
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
eps = np.finfo(float).eps #small number to avoid zeros
class Foundation_Type_Testset(Dataset):
def __init__(self, image_folder, transform=None,mask_buildings=False, load_masks=False):
self.transform = transform
self.img_paths = []
self.mask_paths = []
self.filenames = []
self.mask_buildings = mask_buildings
self.load_masks = load_masks
if not os.path.isdir(image_folder):
if os.path.isfile(image_folder):
# The following format is to be consistent with os.walk output
file_list = [[os.path.split(image_folder)[0],'None',[os.path.split(image_folder)[1]]]]
else:
print('Error: Image folder or file {} not found.'.format(image_folder))
exit()
else:
file_list = os.walk(image_folder, followlinks=True)
for root, _, fnames in sorted(file_list):
for fname in sorted(fnames):
if 'jpg' in fname or 'png' in fname:
if 'mask' in fname:
continue
img_path = os.path.join(root, fname)
if self.load_masks:
_, file_extension = os.path.splitext(img_path)
mask_filename = fname.replace(file_extension, '-mask.png')
mask_path = os.path.join(root, mask_filename)
if not os.path.isfile(mask_path):
print('No mask for {}. Skipping'.format(fname))
continue
self.mask_paths.append(mask_path)
self.filenames.append(fname)
self.img_paths.append(img_path)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = self.img_paths[idx]
image = Image.open(img_name)
if self.mask_buildings and self.load_masks:
image = np.array(image)
mask_filename = self.mask_paths[idx]
mask = Image.open(mask_filename)
mask = np.array(mask)
# Filter building labels
mask[np.where((mask != 25) & (mask != 1))] = 0
image[mask == 0, :] = 0
image = Image.fromarray(np.uint8(image))
if (self.transform):
image = self.transform(image)
fname = self.filenames[idx]
return (image, fname)
class Foundation_Type_Binary(Dataset):
def __init__(self, image_folder, transform=None,mask_buildings=False, load_masks=False):
self.transform = transform
self.classes = ['Raised','Not Raised']
self.img_paths = []
self.mask_paths = []
labels = []
self.mask_buildings = mask_buildings
self.load_masks = load_masks
assert os.path.isdir(image_folder),'Image folder {} not found or not a path'.format(image_folder)
for root, _, fnames in sorted(os.walk(image_folder, followlinks=True)):
for fname in sorted(fnames):
if 'jpg' in fname or 'png' in fname:
if 'mask' in fname:
continue
img_path = os.path.join(root, fname)
_, file_extension = os.path.splitext(img_path)
mask_filename = fname.replace(file_extension, '-mask.png')
mask_path = os.path.join(root, mask_filename)
if not os.path.isfile(mask_path):
print('No mask for {}. Skipping'.format(fname))
continue
labels.append(os.path.dirname(img_path).split(os.path.sep)[-1])
self.img_paths.append(img_path)
self.mask_paths.append(mask_path)
self.train_labels = np.zeros(len(labels))
for class_id in ['5001', '5005', '5002', '5003']:
idx = np.where(np.array(labels) == class_id)[0]
self.train_labels[idx] = 0
for class_id in ['5004', '5006']: # Piles Piers and Posts
idx = np.where(np.array(labels) == class_id)[0]
self.train_labels[idx] = 1
# Train weights for optional weighted sampling
self.train_weights = np.ones(len(self.train_labels))
self.train_weights[self.train_labels == 0] = np.sum(self.train_labels == 0) / len(self.train_labels)
self.train_weights[self.train_labels == 1] = np.sum(self.train_labels == 1) / len(self.train_labels)
self.train_weights = 1-self.train_weights
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = self.img_paths[idx]
image = Image.open(img_name)
if self.mask_buildings and self.load_masks:
image = np.array(image)
mask_filename = self.mask_paths[idx]
mask = Image.open(mask_filename)
mask = np.array(mask)
# Filter building labels
mask[np.where((mask != 25) & (mask != 1))] = 0
image[mask == 0, :] = 0
image = Image.fromarray(
|
np.uint8(image)
|
numpy.uint8
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import random
from random import shuffle, randint, choice
from collections import defaultdict
from paddle.io import IterableDataset
class Rating(object):
def __init__(self, trainingSet, testSet):
self.evalSettings = {"cv": 5, "b": 1} # "-cv 5 -b 1"
self.user = {} # map user names to id
self.item = {} # map item names to id
self.id2user = {}
self.id2item = {}
self.userMeans = {} # mean values of users's ratings
self.itemMeans = {} # mean values of items's ratings
self.globalMean = 0
self.trainSet_u = defaultdict(dict)
self.trainSet_i = defaultdict(dict)
self.testSet_u = defaultdict(
dict) # test set in the form of [user][item]=rating
self.testSet_i = defaultdict(
dict) # test set in the form of [item][user]=rating
self.rScale = [] # rating scale
self.trainingData = trainingSet[:]
self.testData = testSet[:]
self.__generateSet()
self.__computeItemMean()
self.__computeUserMean()
self.__globalAverage()
def __generateSet(self):
scale = set()
for i, entry in enumerate(self.trainingData):
userName, itemName, rating = entry
# makes the rating within the range [0, 1].
# rating = normalize(float(rating), self.rScale[-1], self.rScale[0])
# self.trainingData[i][2] = rating
# order the user
if userName not in self.user:
self.user[userName] = len(self.user)
self.id2user[self.user[userName]] = userName
# order the item
if itemName not in self.item:
self.item[itemName] = len(self.item)
self.id2item[self.item[itemName]] = itemName
# userList.append
self.trainSet_u[userName][itemName] = rating
self.trainSet_i[itemName][userName] = rating
scale.add(float(rating))
self.rScale = list(scale)
self.rScale.sort()
for entry in self.testData:
userName, itemName, rating = entry
self.testSet_u[userName][itemName] = rating
self.testSet_i[itemName][userName] = rating
def __globalAverage(self):
total = sum(self.userMeans.values())
if total == 0:
self.globalMean = 0
else:
self.globalMean = total / len(self.userMeans)
def __computeUserMean(self):
for u in self.user:
self.userMeans[u] = sum(self.trainSet_u[u].values()) / len(
self.trainSet_u[u])
def __computeItemMean(self):
for c in self.item:
self.itemMeans[c] = sum(self.trainSet_i[c].values()) / len(
self.trainSet_i[c])
def getUserId(self, u):
if u in self.user:
return self.user[u]
def getItemId(self, i):
if i in self.item:
return self.item[i]
def trainingSize(self):
return len(self.user), len(self.item), len(self.trainingData)
def testSize(self):
return len(self.testSet_u), len(self.testSet_i), len(self.testData)
def contains(self, u, i):
if u in self.user and i in self.trainSet_u[u]:
return True
else:
return False
def containsUser(self, u):
if u in self.user:
return True
else:
return False
def containsItem(self, i):
if i in self.item:
return True
else:
return False
def userRated(self, u):
return list(self.trainSet_u[u].keys()), list(self.trainSet_u[u].values(
))
def itemRated(self, i):
return list(self.trainSet_i[i].keys()), list(self.trainSet_i[i].values(
))
def row(self, u):
k, v = self.userRated(u)
vec = np.zeros(len(self.item))
# print vec
for pair in zip(k, v):
iid = self.item[pair[0]]
vec[iid] = pair[1]
return vec
def col(self, i):
k, v = self.itemRated(i)
vec = np.zeros(len(self.user))
# print vec
for pair in zip(k, v):
uid = self.user[pair[0]]
vec[uid] = pair[1]
return vec
def matrix(self):
m = np.zeros((len(self.user), len(self.item)))
for u in self.user:
k, v = self.userRated(u)
vec = np.zeros(len(self.item))
# print vec
for pair in zip(k, v):
iid = self.item[pair[0]]
vec[iid] = pair[1]
m[self.user[u]] = vec
return m
def sRow(self, u):
return self.trainSet_u[u]
def sCol(self, c):
return self.trainSet_i[c]
def rating(self, u, c):
if self.contains(u, c):
return self.trainSet_u[u][c]
return -1
def ratingScale(self):
return self.rScale[0], self.rScale[1]
def elemCount(self):
return len(self.trainingData)
class SparseMatrix():
'matrix used to store raw data'
def __init__(self, triple):
self.matrix_User = {}
self.matrix_Item = {}
for item in triple:
if item[0] not in self.matrix_User:
self.matrix_User[item[0]] = {}
if item[1] not in self.matrix_Item:
self.matrix_Item[item[1]] = {}
self.matrix_User[item[0]][item[1]] = item[2]
self.matrix_Item[item[1]][item[0]] = item[2]
self.elemNum = len(triple)
self.size = (len(self.matrix_User), len(self.matrix_Item))
def sRow(self, r):
if r not in self.matrix_User:
return {}
else:
return self.matrix_User[r]
def sCol(self, c):
if c not in self.matrix_Item:
return {}
else:
return self.matrix_Item[c]
def row(self, r):
if r not in self.matrix_User:
return np.zeros((1, self.size[1]))
else:
array = np.zeros((1, self.size[1]))
ind = list(self.matrix_User[r].keys())
val = list(self.matrix_User[r].values())
array[0][ind] = val
return array
def col(self, c):
if c not in self.matrix_Item:
return np.zeros((1, self.size[0]))
else:
array = np.zeros((1, self.size[0]))
ind = list(self.matrix_Item[c].keys())
val = list(self.matrix_Item[c].values())
array[0][ind] = val
return array
def elem(self, r, c):
if not self.contains(r, c):
return 0
return self.matrix_User[r][c]
def contains(self, r, c):
if r in self.matrix_User and c in self.matrix_User[r]:
return True
return False
def elemCount(self):
return self.elemNum
def size(self):
return self.size
class Social(object):
def __init__(self, relation=None):
self.user = {} # used to store the order of users
self.relation = relation
self.followees = defaultdict(dict)
self.followers = defaultdict(dict)
self.trustMatrix = self.__generateSet()
def __generateSet(self):
triple = []
for line in self.relation:
userId1, userId2, weight = line
# add relations to dict
self.followees[userId1][userId2] = weight
self.followers[userId2][userId1] = weight
# order the user
if userId1 not in self.user:
self.user[userId1] = len(self.user)
if userId2 not in self.user:
self.user[userId2] = len(self.user)
triple.append([self.user[userId1], self.user[userId2], weight])
return SparseMatrix(triple)
def row(self, u):
# return user u's followees
return self.trustMatrix.row(self.user[u])
def col(self, u):
# return user u's followers
return self.trustMatrix.col(self.user[u])
def elem(self, u1, u2):
return self.trustMatrix.elem(u1, u2)
def weight(self, u1, u2):
if u1 in self.followees and u2 in self.followees[u1]:
return self.followees[u1][u2]
else:
return 0
def trustSize(self):
return self.trustMatrix.size
def getFollowers(self, u):
if u in self.followers:
return self.followers[u]
else:
return {}
def getFollowees(self, u):
if u in self.followees:
return self.followees[u]
else:
return {}
def hasFollowee(self, u1, u2):
if u1 in self.followees:
if u2 in self.followees[u1]:
return True
else:
return False
return False
def hasFollower(self, u1, u2):
if u1 in self.followers:
if u2 in self.followers[u1]:
return True
else:
return False
return False
def loadDataSet(file, bTest=False, binarized=False, threshold=3.0):
trainingData, testData = [], []
with open(file) as f:
ratings = f.readlines()
order = ["0", "1", "2"]
for lineNo, line in enumerate(ratings):
items = line.strip().split("\t")
try:
userId = items[int(order[0])]
itemId = items[int(order[1])]
rating = items[int(order[2])]
if binarized:
if float(items[int(order[2])]) < threshold:
continue
else:
rating = 1
except ValueError:
print("Error! Dataset")
if bTest:
testData.append([userId, itemId, float(rating)])
else:
trainingData.append([userId, itemId, float(rating)])
if bTest:
return testData
else:
return trainingData
def loadRelationship(file):
relation = []
with open(file) as f:
relations = f.readlines()
order = ["0", "1"]
for lineNo, line in enumerate(relations):
items = line.strip().split("\t")
userId1 = items[int(order[0])]
userId2 = items[int(order[1])]
weight = 1
relation.append([userId1, userId2, weight])
return relation
def crossValidation(data, k, binarized=False):
if k <= 1 or k > 10:
k = 3
for i in range(k):
trainingSet = []
testSet = []
for ind, line in enumerate(data):
if ind % k == i:
if binarized:
if line[2]:
testSet.append(line[:])
else:
testSet.append(line[:])
else:
trainingSet.append(line[:])
yield trainingSet, testSet
class RecDataset(IterableDataset):
def __init__(self, file_list, config):
super(RecDataset, self).__init__()
self.is_train = config.get("runner.is_train", True)
self.trainingSet = loadDataSet(
config.get("runner.rating_file", None),
bTest=False,
binarized=True,
threshold=1.0)
self.relation = loadRelationship(
config.get("runner.relation_file", None))
self.social = Social(relation=self.relation)
self.batch_size = config.get("runner.train_batch_size", 2000)
for trainingSet, testSet in crossValidation(self.trainingSet, k=5):
self.data = Rating(trainingSet, testSet)
self.trainingSet = trainingSet
self.testSet = testSet
break
_, _, self.train_size = self.data.trainingSize()
_, _, self.test_size = self.data.testSize()
random.seed(2)
def get_dataset(self):
# data clean
cleanList = []
cleanPair = []
for user in self.social.followees:
if user not in self.data.user:
cleanList.append(user)
for u2 in self.social.followees[user]:
if u2 not in self.data.user:
cleanPair.append((user, u2))
for u in cleanList:
del self.social.followees[u]
for pair in cleanPair:
if pair[0] in self.social.followees:
del self.social.followees[pair[0]][pair[1]]
cleanList = []
cleanPair = []
for user in self.social.followers:
if user not in self.data.user:
cleanList.append(user)
for u2 in self.social.followers[user]:
if u2 not in self.data.user:
cleanPair.append((user, u2))
for u in cleanList:
del self.social.followers[u]
for pair in cleanPair:
if pair[0] in self.social.followers:
del self.social.followers[pair[0]][pair[1]]
idx = []
for n, pair in enumerate(self.social.relation):
if pair[0] not in self.data.user or pair[1] not in self.data.user:
idx.append(n)
for item in reversed(idx):
del self.social.relation[item]
return self.data, self.social
def __iter__(self):
count = 0
item_list = list(self.data.item.keys())
if self.is_train:
shuffle(self.data.trainingData)
while count < self.train_size:
output_list = []
user, item = self.data.trainingData[count][
0], self.data.trainingData[count][1]
neg_item = choice(item_list)
while neg_item in self.data.trainSet_u[user]:
neg_item = choice(item_list)
output_list.append(
np.array(self.data.user[user]).astype("int64"))
output_list.append(
np.array(self.data.item[item]).astype("int64"))
output_list.append(
np.array(self.data.item[neg_item]).astype("int64"))
count += 1
yield output_list
else:
while count < self.test_size:
output_list = []
user, item = self.data.testData[count][0], self.data.testData[
count][1]
neg_item = choice(item_list)
output_list.append(
np.array(self.data.user[user]).astype("int64"))
output_list.append(
np.array(self.data.item[item]).astype("int64"))
output_list.append(
|
np.array(self.data.item[neg_item])
|
numpy.array
|
from __future__ import division
import numpy as np
from numpy import einsum
from Florence.Tensor import trace, Voigt
from .MaterialBase import Material
class SteinmannModel(Material):
"""Steinmann's electromechanical model in terms of enthalpy
W(C,E) = W_n(C) + c1*I:(E0 0 E0) + c2*C:(E0 0 E0) - eps_1/2*J*C**(-1):(E0 0 E0)
W_n(C) = mu/2*(C:I-3) - mu*lnJ + lamb/2*(lnJ)**2
Reference:
<NAME>, <NAME>, and <NAME>, "Numerical modelling of non-linear electroelasticity",
International Journal for Numerical Methods in Engineering, 70:685-704, (2007)
"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(SteinmannModel, self).__init__(mtype, ndim, **kwargs)
# REQUIRES SEPARATELY
self.nvar = self.ndim+1
self.energy_type = "enthalpy"
self.nature = "nonlinear"
self.fields = "electro_mechanics"
if self.ndim == 2:
self.H_VoigtSize = 5
elif self.ndim == 3:
self.H_VoigtSize = 9
# LOW LEVEL DISPATCHER
self.has_low_level_dispatcher = True
# self.has_low_level_dispatcher = False
def KineticMeasures(self,F,ElectricFieldx, elem=0):
from Florence.MaterialLibrary.LLDispatch._SteinmannModel_ import KineticMeasures
return KineticMeasures(self, np.ascontiguousarray(F), ElectricFieldx)
def Hessian(self,StrainTensors,ElectricFieldx=0,elem=0,gcounter=0):
mu = self.mu
lamb = self.lamb
c1 = self.c1
c2 = self.c2
eps_1 = self.eps_1
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
E = 1.0*ElectricFieldx.reshape(self.ndim,1)
Ex = E.reshape(E.shape[0])
EE = np.dot(E,E.T)
be = np.dot(b,ElectricFieldx).reshape(self.ndim)
C_Voigt = lamb/J*einsum('ij,kl',I,I) - (lamb*np.log(J) - mu)/J*( einsum('ik,jl',I,I) + einsum('il,jk',I,I) ) + \
eps_1*( einsum('ij,kl',I,EE) + einsum('ij,kl',EE,I) - einsum('ik,jl',EE,I) - einsum('il,jk',EE,I) - \
|
einsum('ik,jl',I,EE)
|
numpy.einsum
|
# Copyright 2020 Lawrence Livermore National Security, LLC and other authors: <NAME>, <NAME>, <NAME>
# SPDX-License-Identifier: MIT
# coding=utf-8
import numpy as np
import tensorflow as tf
from model import generator_c
from skimage.measure import compare_psnr
from PIL import Image
import pybm3d
import os
from skimage.transform import rescale, resize
from skimage import color
from skimage import io
import scipy
'''
*** WARNING: This code is experimental ***
'''
def projector_tf(imgs,phi=None):
csproj = tf.matmul(imgs,tf.squeeze(phi))
return csproj
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def sample_Z(m, n):
return np.random.uniform(-1,1,size=[m, n])
def GPP_color(test_image):
# I_x = I_y = 1024
I_y = 1024
# I_x = 1536
I_x = 768
d_x = d_y = 32
dim_x = d_x*d_y
batch_size = (I_x*I_y)//(dim_x)
n_measure = 0.01
lr_factor = 1.0#*batch_size//64
dim_z = 100
dim_phi = int(n_measure*dim_x)
nIter = 201
n_img_plot_x = I_x//d_x
n_img_plot_y = I_y//d_y
iters = np.array(np.geomspace(10,10,nIter),dtype=int)
modelsave = './gan_models/gen_models_corrupt-colorcifar32'
fname = './test_images/{}.jpg'.format(test_image)
image = io.imread(fname)
x_test = resize(image, (I_x, I_y),anti_aliasing=True,preserve_range=True,mode='reflect')
x_test_ = np.array(x_test)/np.max(x_test)
x_test = []
for i in range(n_img_plot_x):
for j in range(n_img_plot_y):
_x = x_test_[i*d_x:d_x*(i+1),j*d_y:d_y*(j+1)]
x_test.append(_x)
x_test = np.array(x_test)
test_images = x_test[:batch_size,:,:,:]
imsave(test_images,[n_img_plot_x,n_img_plot_y],'cs_outs/gt_sample.png')
tf.reset_default_graph()
tf.set_random_seed(0)
np.random.seed(4321)
Y_obs_ph = tf.placeholder(tf.float32,[batch_size,dim_phi,3])
phi_ph = tf.placeholder(tf.float32,[dim_x,dim_phi])
lr = tf.placeholder(tf.float32)
tmp = 0.*tf.random_uniform([batch_size,dim_z],minval=-1.0,maxval=1.0)
z_prior_ = tf.Variable(tmp,name="z_prior")
G_sample_ = 0.5*generator_c(z_prior_,False,dim_z=dim_z)+0.5
G_sample = tf.image.resize_images(G_sample_,[d_x,d_y])
G_sample_re_r = tf.reshape(G_sample[:,:,:,0],[-1,dim_x])
G_sample_re_g = tf.reshape(G_sample[:,:,:,1],[-1,dim_x])
G_sample_re_b = tf.reshape(G_sample[:,:,:,2],[-1,dim_x])
phi_est = phi_ph
proj_corrected_r = projector_tf(G_sample_re_r,phi_est)
proj_corrected_g = projector_tf(G_sample_re_g,phi_est)
proj_corrected_b = projector_tf(G_sample_re_b,phi_est)
G_loss = 0
G_loss += tf.reduce_mean(tf.square(proj_corrected_r-Y_obs_ph[:,:,0]))
G_loss += tf.reduce_mean(tf.square(proj_corrected_g-Y_obs_ph[:,:,1]))
G_loss += tf.reduce_mean(tf.square(proj_corrected_b-Y_obs_ph[:,:,2]))
opt_loss = G_loss
t_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
g_vars = [var for var in t_vars if 'Generator' in var.name]
solution_opt = tf.train.RMSPropOptimizer(lr).minimize(opt_loss, var_list=[z_prior_])
saver = tf.train.Saver(g_vars)
merged_imgs = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(modelsave)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("************ Generator weights restored! **************")
nb = batch_size
z_test = sample_Z(100,dim_z)
phi_np = np.random.randn(dim_x,dim_phi)
phi_test_np = phi_np
print(np.mean(x_test),
|
np.min(x_test)
|
numpy.min
|
#!/usr/bin/env python3
from flask import Blueprint, Response, render_template, request, session
from ezprobs.geometry import area_circle
from ezprobs.hydraulics import pipe_loss, local_loss
from ezprobs.problems import Parameter, Plot
from ezprobs.units import M, CM, MM, M3PS, KINEMATIC_VISCOSITY, GRAVITY
from ezprobs.dict import DICT_GER, DICT_ENG
from io import BytesIO
from math import sqrt
from scipy.optimize import fsolve
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2021 <NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
bp = Blueprint("pressure_pipe_02", __name__)
def compute_solution():
d = 5 * CM
ha = 150 * CM
hb = 20 * CM
hout = 30 * CM
l = 2 * M
k = 0.3 * MM
nu_entry = 0.5
scale = 1 # over scaling velocity head for better display
q_initial = 3 * 10 ** -3 * M3PS
if request.method == "POST":
d = float(request.form["d"]) * MM
hb = float(request.form["hb"]) * CM
a = area_circle(d / 2)
if hb == ha:
q = 0
elif hb < hout:
q = fsolve(
lambda q: hout
+ (q / a) ** 2 / (2 * GRAVITY)
+ local_loss(nu_entry, a, q)
+ pipe_loss(l, a, k, d, q)
- ha,
1,
)[0]
else:
q = fsolve(
lambda q: hb
+ (q / a) ** 2 / (2 * GRAVITY)
+ local_loss(nu_entry, a, q)
+ pipe_loss(l, a, k, d, q)
- ha,
1,
)[0]
v = q / a
distances = np.array([0, l])
x = np.cumsum(distances)
pipe = np.array([ha-0.85, hout])
energy_horizon = np.full((len(x)), ha)
if hb == ha:
losses = np.array([0, 0])
else:
losses = np.array(
[
local_loss(nu_entry, a, q),
pipe_loss(l, a, k, d, q),
]
)
cum_losses = np.cumsum(losses)
energy_line = energy_horizon - cum_losses
energy_line[0] = energy_horizon[0]-scale*cum_losses[0] # for display
kinetic_energy =
|
np.array([v, v])
|
numpy.array
|
import io
import os
import json
import base64
import numpy as np
import matplotlib.pyplot as plt
import math
from PIL import Image
coco_dataset = {"person": 1, "bicycle": 2, "car": 3, "motorcycle": 4, "airplane": 5, "bus": 6,
"train": 7, "truck": 8, "boat": 9, "traffic light": 10, "fire hydrant": 11, "street sign": 12,
"stop sign": 13, "parking meter": 14, "bench": 15, "bird": 16, "cat": 17, "dog": 18, "horse": 19,
"sheep": 20, "cow": 21, "elephant": 22, "bear": 23, "zebra": 24, "giraffe": 25, "hat": 26, "backpack": 27,
"umbrella": 28, "shoe": 29, "eye glasses": 30, "handbag": 31, "tie": 32, "suitcase": 33, "frisbee": 34,
"skis": 35, "snowboard": 36, "sports ball": 37, "kite": 38, "baseball bat": 39, "baseball glove": 40,
"skateboard": 41, "surfboard": 42, "tennis racket": 43, "bottle": 44, "plate": 45, "wine glass": 46,
"cup": 47, "fork": 48, "knife": 49, "spoon": 50, "bowl": 51, "banana": 52, "apple": 53, "sandwich": 54,
"orange": 55, "broccoli": 56, "carrot": 57, "hot dog": 58, "pizza": 59, "donut": 60, "cake": 61,
"chair": 62, "couch": 63, "potted plant": 64, "bed": 65, "mirror": 66, "dining table": 67, "window": 68,
"desk": 69, "toilet": 70, "door": 71, "tv": 72, "laptop": 73, "mouse": 74, "remote": 75, "keyboard": 76,
"cell phone": 77, "microwave": 78, "oven": 79, "toaster": 80, "sink": 81, "refrigerator": 82,
"blender": 83, "book": 84, "clock": 85, "vase": 86, "scissors": 87, "teddy bear": 88, "hair drier": 89,
"toothbrush": 90, "hair brush": 91}
category_index = {1: "person", 2: "bicycle", 3: "car", 4: "motorcycle", 5: "airplane", 6: "bus",
7: "train", 8: "truck", 9: "boat", 10: "traffic light", 11: "fire hydrant", 12: "street sign",
13: "stop sign", 14: "parking meter", 15: "bench", 16: "bird", 17: "cat", 18: "dog", 19: "horse",
20: "sheep", 21: "cow", 22: "elephant", 23: "bear", 24: "zebra", 25: "giraffe", 26: "hat", 27: "backpack",
28: "umbrella", 29: "shoe", 30: "eye glasses", 31: "handbag", 32: "tie", 33: "suitcase", 34: "frisbee",
35: "skis", 36: "snowboard", 37: "sports ball", 38: "kite", 39: "baseball bat", 40: "baseball glove",
41: "skateboard", 42: "surfboard", 43: "tennis racket", 44: "bottle", 45: "plate", 46: "wine glass",
47: "cup", 48: "fork", 49: "knife", 50: "spoon", 51: "bowl", 52: "banana", 53: "apple", 54: "sandwich",
55: "orange", 56: "broccoli", 57: "carrot", 58: "hot dog", 59: "pizza", 60: "donut", 61: "cake",
62: "cake", 63: "couch", 64: "potted plant", 65: "bed", 66: "mirror", 67: "dining table", 68: "window",
69: "desk", 70: "toilet", 71: "door", 72: "tv", 73: "laptop", 74: "mouse", 75: "remote", 76: "keyboard",
77: "cell phone", 78: "microwave", 79: "oven", 80: "toaster", 81: "sink", 82: "refrigerator",
83: "blender", 84: "book", 85: "clock", 86: "vase", 87: "scissors", 88: "teddy bear", 89: "hair drier",
90: "toothbrush", 91: "hair brush"}
# Returns the object's position on the 2D image in pixel space. (0,0) is the center of the image
def get_image_xy(camera_params, params, obj):
if on_right_side(camera_params, obj) > 0:
x, y = locate(camera_params, params, obj)
else: # on wrong side of camera
x = math.inf
y = math.inf
return x, y
# Check if on the right side of camera. compare to plane going through camera's
# location with a normal vector going from camera to focus
# calculating a(x-x_0)+b(y-y_0)+c(z-z_0).
# if it turns out positive, it's on the focus' side of the camera
def on_right_side(camera_params, obj):
c = camera_params["camera"]
f = camera_params["lookat"]
focus = (f[0] - c[0])*(obj[0] - c[0]) + (f[1] - c[1])*(obj[1] - c[1]) + (f[2] - c[2])*(obj[2] - c[2])
return focus >= 0
# Given camera info and object's location, find object's location on 2-D image
def locate(camera_params, params, obj):
a_vertical, b_vertical, c_vertical = get_vertical_plane(camera_params)
a_horizontal, b_horizontal, c_horizontal = get_horizontal_plane(camera_params, a_vertical, b_vertical, c_vertical)
s_x = obj[0] - camera_params["camera"][0]
s_y = obj[1] - camera_params["camera"][1]
s_z = obj[2] - camera_params["camera"][2]
s_x_v, s_y_v, s_z_v = proj_vec_to_plane(a_vertical, b_vertical, c_vertical, s_x, s_y, s_z)
s_x_h, s_y_h, s_z_h = proj_vec_to_plane(a_horizontal, b_horizontal, c_horizontal, s_x, s_y, s_z)
angle_from_vertical = get_angle(a_vertical, b_vertical, c_vertical, s_x_h, s_y_h, s_z_h)
angle_from_horizontal = get_angle(a_horizontal, b_horizontal, c_horizontal, s_x_v, s_y_v, s_z_v)
x = params["image_dim_x"] / 2 * (angle_from_vertical / math.radians(params["horizontal_FoV"] / 2))
y = params["image_dim_y"] / 2 * (-angle_from_horizontal / math.radians(params["vertical_FoV"] / 2))
x = x + params["image_dim_x"] / 2
y = y + params["image_dim_y"] / 2
return x, y
# Returns the angle between the vector and the plane
# a,b,c is coefficients of plane. x, y, z is the vector.
def get_angle(a, b, c, x, y, z):
numerator = a*x + b*y + c*z # took out abs
denominator = math.sqrt(math.pow(a, 2) + math.pow(b, 2) + math.pow(c, 2)) * math.sqrt(math.pow(x, 2) + math.pow(y, 2) + math.pow(z, 2))
return math.asin(numerator / denominator) #angle in radians
# Returns a,b,c for the vertical plane. only need the camera parameters
def get_vertical_plane(camera_params):
p1 = camera_params["camera"]
p2 = camera_params["lookat"]
p3 = [p1[0], p1[1] + 1, p1[2]]
a, b, c = get_abc_plane(p1, p2, p3)
#want normal to be be on the "righthand" side of camera
#x and z component of vec for the direction camera is pointing
camera_pointing_x = camera_params["lookat"][0] - camera_params["camera"][0]
camera_pointing_y = camera_params["lookat"][1] - camera_params["camera"][1]
camera_pointing_z = camera_params["lookat"][2] - camera_params["camera"][2]
result = np.cross(np.array([a, b, c]), np.array([camera_pointing_x, camera_pointing_y, camera_pointing_z]))
#if y-component < 0, multiply by -1
to_return = [-a, -b, -c] if result[2] < 0 else [a, b, c]
return to_return
# Need camera parameters and vertical plane (so horizontal will be perpendicular to it)
def get_horizontal_plane(camera_params, a_vertical, b_vertical, c_vertical):
p1 = camera_params["camera"]
p2 = camera_params["lookat"]
p3 = [p1[0] + a_vertical, p1[1] + b_vertical, p1[2] + c_vertical] #adding normal vector (a, b, c) to the point to make a third point on the horizontal plane
a, b, c = get_abc_plane(p1, p2, p3)
#make sure that this normal is upright, so b > 0
if b < 0:
return -a, -b, -c
elif b > 0:
return a, b, c
print("uh oh. camera looking straight up or straight down")
return a, b, c
# Given 3 points on a plane (p1, p2, p3), get a, b, and c coefficients in the general form.
# abc also gives a normal vector
def get_abc_plane(p1, p2, p3):
# using Method 2 from wikipedia: https://en.wikipedia.org/wiki/Plane_(geometry)#:~:text=In%20mathematics%2C%20a%20plane%20is,)%20and%20three%2Ddimensional%20space.
D = np.linalg.det(np.array([[p1[0], p2[0], p3[0]], [p1[1], p2[1], p3[1]], [p1[2], p2[2], p3[2]]]))
if D == 0:
print("crap! determinant D=0")
print(p1)
print(p2)
print(p3)
return
# implicitly going to say d=-1 to obtain solution set
a = np.linalg.det(np.array([[1, 1, 1], [p1[1], p2[1], p3[1]], [p1[2], p2[2], p3[2]]])) / D
b = np.linalg.det(
|
np.array([[p1[0], p2[0], p3[0]], [1, 1, 1], [p1[2], p2[2], p3[2]]])
|
numpy.array
|
"""
Routines for displaying images and video
"""
# std libs
import time
import logging
import warnings
import itertools as itt
# third-party libs
import numpy as np
from matplotlib import ticker
import matplotlib.pylab as plt
from matplotlib.figure import Figure
from matplotlib.widgets import Slider
from matplotlib.gridspec import GridSpec
from astropy.visualization.stretch import BaseStretch
from astropy.visualization.interval import BaseInterval
from astropy.visualization.mpl_normalize import ImageNormalize
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import AxesGrid, make_axes_locatable
# local libs
from recipes.misc import duplicate_if_scalar
from recipes.array.neighbours import neighbours
from recipes.logging import LoggingMixin, get_module_logger
# relative libs
from .sliders import TripleSliders
from .utils import get_percentile_limits
# from obstools.aps import ApertureCollection
# from .zscale import zrange
# from astropy.visualization import mpl_normalize # import ImageNormalize as _
# module level logger
logger = get_module_logger()
logging.basicConfig()
logger.setLevel(logging.INFO)
# TODO: docstrings (when stable)
# TODO: unit tests
# TODO: maybe display things like contrast ratio ??
# TODO: middle mouse resets axes limits
def _sanitize_data(data):
"""
Removes nans and masked elements
Returns flattened array
"""
if np.ma.is_masked(data):
data = data[~data.mask]
return np.asarray(data[~np.isnan(data)])
def move_axes(ax, x, y):
"""Move the axis in the figure by x, y"""
l, b, w, h = ax.get_position(True).bounds
ax.set_position((l + x, b + y, w, h))
def get_norm(image, interval, stretch):
"""
Parameters
----------
image
interval
stretch
Returns
-------
"""
# choose colour interval algorithm based on data type
if image.dtype.kind == 'i': # integer array
if image.ptp() < 1000:
interval = 'minmax'
# determine colour transform from `interval` and `stretch`
if isinstance(interval, str):
interval = interval,
interval = Interval.from_name(*interval)
#
if isinstance(stretch, str):
stretch = stretch,
stretch = Stretch.from_name(*stretch)
# Create an ImageNormalize object
return ImageNormalize(image, interval, stretch=stretch)
def get_screen_size_inches():
"""
Use QT to get the size of the primary screen in inches
Returns
-------
size_inches: list
"""
import sys
from PyQt5.QtWidgets import QApplication, QDesktopWidget
# Note the check on QApplication already running and not executing the exit
# statement at the end.
app = QApplication.instance()
if app is None:
app = QApplication(sys.argv)
else:
logger.debug(f'QApplication instance already exists: {app}')
# TODO: find out on which screen the focus is
w = QDesktopWidget()
s = w.screen()
size_inches = [s.width() / s.physicalDpiX(), s.height() / s.physicalDpiY()]
# app.exec_()
w.close()
return size_inches
# screens = app.screens()
# size_inches = np.empty((len(screens), 2))
# for i, s in enumerate(screens):
# g = s.geometry()
# size_inches[i] = np.divide(
# [g.height(), g.width()], s.physicalDotsPerInch()
# )
# app.exec_()
# return size_inches
def guess_figsize(image, fill_factor=0.75, max_pixel_size=0.2):
"""
Make an educated guess of the size of the figure needed to display the
image data.
Parameters
----------
image: np.ndarray
Sample image
fill_factor: float
Maximal fraction of screen size allowed in any direction
min_size: 2-tuple
Minimum allowed size (width, height) in inches
max_pixel_size: float
Maximum allowed pixel size
Returns
-------
size: tuple
Size (width, height) of the figure in inches
"""
# Sizes reported by mpl figures seem about half the actual size on screen
shape = np.array(np.shape(image)[::-1])
return _guess_figsize(shape, fill_factor, max_pixel_size)
def _guess_figsize(image_shape, fill_factor=0.75, max_pixel_size=0.2,
min_size=(2, 2)):
# screen dimensions
screen_size = np.array(get_screen_size_inches())
# change order of image dimensions since opposite order of screen
max_size = np.multiply(image_shape, max_pixel_size)
# get upper limit for fig size based on screen and data and fill factor
max_size = np.min([max_size, screen_size * fill_factor], 0)
# get size from data
aspect = image_shape / image_shape.max()
size = max_size[aspect == 1] * aspect
# enlarge =
size *= max(np.max(min_size / size), 1)
logger.debug('Guessed figure size: (%.1f, %.1f)', *size)
return size
def auto_grid(n):
x = int(np.floor(np.sqrt(n)))
y = int(np.ceil(n / x))
return x, y
def set_clim_connected(x, y, artist, sliders):
artist.set_clim(*sliders.positions)
return artist
def plot_image_grid(images, layout=(), titles=(), title_kws=None, figsize=None,
plims=None, clim_all=False, **kws):
"""
Parameters
----------
images
layout
titles
clim_all:
Compute colour limits from the full set of pixel values for all
images. Choose this if your images are all normalised to roughly the
same scale. If False clims will be computed individually and the
colourbar sliders will be disabled.
Returns
-------
"""
# TODO: plot individual histograms - clim_each
# todo: guess fig size
n = len(images)
assert n, 'No images to plot!'
# assert clim_mode in ('all', 'row')
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# get grid layout
if not layout:
layout = auto_grid(n)
n_rows, n_cols = layout
if n_rows == -1:
n_rows = int(np.ceil(n / n_cols))
if n_cols == -1:
n_cols = int(np.ceil(n / n_rows))
# create figure
fig = plt.figure(figsize=figsize)
# ticks
tick_par = dict(color='w', direction='in',
bottom=1, top=1, left=1, right=1)
# Use gridspec rather than ImageGrid since the latter tends to resize
# the axes
if clim_all:
cbar_size, hist_size = 3, 5
else:
cbar_size = hist_size = 0
gs = GridSpec(n_rows, n_cols * (100 + cbar_size + hist_size),
hspace=0.005,
wspace=0.005,
left=0.03, # fixme: need more for ticks
right=0.97,
bottom=0.03,
top=0.98
) # todo: maybe better with tight layout.
# create colourbar and pixel histogram axes
#
kws = {**dict(origin='lower',
cbar=False, sliders=False, hist=False,
clim=not clim_all,
plims=plims),
**kws}
title_kws = {**dict(color='w',
va='top',
fontweight='bold'),
**(title_kws or {})}
art = []
w = len(str(int(n)))
axes = np.empty((n_rows, n_cols), 'O')
indices = enumerate(np.ndindex(n_rows, n_cols))
for (i, (j, k)), title in itt.zip_longest(indices, titles, fillvalue=''):
if i == n:
break
# last
if (i == n - 1) and clim_all:
# do colourbar + pixel histogram if clim all
kws.update(cbar=True, sliders=True, hist=True,
cax=fig.add_subplot(
gs[:, -(cbar_size + hist_size) * n_cols:]),
hax=fig.add_subplot(gs[:, -hist_size * n_cols:]))
# create axes!
axes[j, k] = ax = fig.add_subplot(
gs[j:j + 1, (100 * k):(100 * (k + 1))])
# plot image
imd = ImageDisplay(images[i], ax=ax, **kws)
art.append(imd.imagePlot)
# do ticks
top = (j == 0)
bot = (j == n_rows - 1)
left = (k == 0) # leftmost
# right = (j == n_cols - 1)
# set the ticks to white and visible on all spines for aesthetic
ax.tick_params('both', **{**dict(labelbottom=bot, labeltop=top,
labelleft=left, labelright=0),
**tick_par})
for lbl, spine in ax.spines.items():
spine.set_color('w')
# add title text
title = title.replace("\n", "\n ")
ax.text(0.025, 0.95, f'{i: <{w}}: {title}',
transform=ax.transAxes, **title_kws)
# Do colorbar
# noinspection PyUnboundLocalVariable
# fig.colorbar(imd.imagePlot, cax)
img = ImageGrid(fig, axes, imd)
if clim_all:
img._clim_all(images, plims)
return img
class ImageGrid:
def __init__(self, fig, axes, imd):
self.fig = fig
self.axes = axes
self.imd = imd
def __iter__(self):
yield from (self.fig, self.axes, self.imd)
def save(self, filenames):
from matplotlib.transforms import Bbox
fig = self.fig
assert len(filenames) == self.axes.size
ax_per_image = (len(fig.axes) // self.axes.size)
# axit = mit.chunked(self.fig.axes, ax_per_image)
for ax, name in zip(self.axes.ravel(), filenames):
mn, mx = (np.inf, np.inf), (0, 0)
# for ax in axes[::-1]:
# # Save just the portion _inside_ the second axis's boundaries
# mn1, mx1 = ax.get_window_extent().transformed(
# fig.dpi_scale_trans.inverted()).get_points()
# mn = np.min((mn, mn1), 0)
# mx = np.max((mx, mx1), 0)
# ticklabels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
# for txt in ticklabels:
# mn1, mx1 = txt.get_window_extent().transformed(
# fig.dpi_scale_trans.inverted()).get_points()
# mn = np.min((mn, mn1), 0)
# mx = np.max((mx, mx1), 0)
# remove ticks
# ax.set_axis_off()
if len(ax.texts):
ax.texts[0].set_visible(False)
# Pad the saved area by 10% in the x-direction and 20% in the y-direction
fig.savefig(name, bbox_inches=ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted()).expanded(1.2, 1))
@property
def images(self):
return [ax.images[0].get_array() for ax in self.fig.axes]
def _clim_all(art, imd, images, plims):
# connect all image clims to the sliders.
for image in art:
# noinspection PyUnboundLocalVariable
imd.sliders.lower.on_move.add(set_clim_connected, image,
imd.sliders)
imd.sliders.upper.on_move.add(set_clim_connected, image,
imd.sliders)
# The same as above can be accomplished in pure matplolib as follows:
# https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/multi_image.html
# Make images respond to changes in the norm of other images (e.g. via
# the "edit axis, curves and images parameters" GUI on Qt), but be
# careful not to recurse infinitely!
# def update(changed_image):
# for im in art:
# if (changed_image.get_cmap() != im.get_cmap()
# or changed_image.get_clim() != im.get_clim()):
# im.set_cmap(changed_image.get_cmap())
# im.set_clim(changed_image.get_clim())
#
# for im in art:
# im.callbacksSM.connect('changed', update)
# update clim for all plots
# for the general case where images are non-uniform shape, we have to
# flatten them all to get the colour percentile values.
# TODO: will be more efficient for large number of images to sample
# evenly from each image
pixels = []
for im in images:
# getattr(im, ('ravel', 'compressed')[np.ma.isMA(im)])()
pixels.extend(im.compressed() if np.ma.isMA(im) else im.ravel())
pixels =
|
np.array(pixels)
|
numpy.array
|
'''
---------------------------
Licensing and Distribution
---------------------------
Program name: Pilgrim
Version : 2021.5
License : MIT/x11
Copyright (c) 2021, <NAME> (<EMAIL>) and
<NAME> (<EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
---------------------------
*----------------------------------*
| Module : common |
| Sub-module : Molecule |
| Last Update: 2020/02/03 (Y/M/D) |
| Main Author: <NAME> |
*----------------------------------*
This module contains the Molecule class
'''
#=============================================#
import os
import numpy as np
#---------------------------------------------#
import common.fncs as fncs
import common.partfns as pf
import common.internal as intl
import common.Exceptions as Exc
from common.criteria import EPS_IC
from common.dicts import dpt_im
from common.files import read_gtsfile
from common.files import write_gtsfile
from common.files import write_xyz, write_molden
from common.pgs import get_pgs
from common.physcons import AMU
from common.physcons import KCALMOL
from common.physcons import EV
from common.physcons import ANGSTROM
from common.physcons import H2CM
from common.gaussian import read_fchk
from common.gaussian import read_gauout
#=============================================#
class Molecule():
# Initialization method
def __init__(self,label=None):
self._label = label
# Unidimensional
self._mform = "-"
self._mu = None
self._ch = None
self._mtp = None
self._V0 = None
self._pgroup = None
self._rotsigma = None
self._natoms = None
self._nel = None # number of electrons
self._rtype = None
self._linear = None
# Multi-dimensional
self._atnums = None
self._symbols = None
self._masses = None
self._les = None # list of electronic states
self._itensor = None
self._imoms = None
self._rotTs = None
# Arrays of importance
self._xcc = None
self._gcc = None
self._Fcc = None
self._xms = None
self._gms = None
self._Fms = None
# related to frequencies
self._fscal = 1.0
self._nvdof = None
self._cczpe = None
self._ccfreqs = None
self._ccFevals = None
self._ccFevecs = None
self._iczpe = None
self._icfreqs = None
self._icFevals = None
self._icFevecs = None
# other stuff for very particular occasion
self._gts = None
def __str__(self): return self._mform
def setvar(self,xcc=None,gcc=None,Fcc=None,\
atonums=None,symbols=None,masses=None,\
ch=None,mtp=None, V0=None,\
pgroup=None,rotsigma=None,\
fscal=None,les=None):
if xcc is not None: self._xcc = xcc
if gcc is not None: self._gcc = gcc
if Fcc is not None: self._Fcc = Fcc
if atonums is not None: self._atnums = atonums
if symbols is not None: self._symbols = symbols
if masses is not None: self._masses = masses
if ch is not None: self._ch = int(ch)
if mtp is not None: self._mtp = int(mtp)
if V0 is not None: self._V0 = V0
if pgroup is not None: self._pgroup = pgroup
if rotsigma is not None: self._rotsigma = rotsigma
if fscal is not None: self._fscal = fscal
if les is not None: self._les = les
def genderivates(self):
self._mform = fncs.get_molformula(self._symbols)
self._natoms = len(self._atnums)
self._mass = sum(self._masses)
self._nel = sum(self._atnums)-self._ch
if self._les is None: self._les = [ (self._mtp,0.0) ]
def prepare(self):
# check atnums
if self._atnums is not None and type(self._atnums[0]) == str:
self._symbols = list(self._atnums)
# check symbols
if self._symbols is not None and type(self._symbols[0]) == int:
self._atnums = list(self._symbols)
# Get both atnums and symbols if None
if self._atnums is None: self._atnums = fncs.symbols2atonums(self._symbols)
if self._symbols is None: self._symbols = fncs.atonums2symbols(self._atnums)
# check masses
if self._masses is None:
self._masses = fncs.atonums2masses(self._atnums)
# derivated magnitudes
self.genderivates()
# check Fcc
if self._Fcc not in (None,[]) and len(self._Fcc) != 3*self._natoms:
self._Fcc = fncs.lowt2matrix(self._Fcc)
def calc_pgroup(self,force=False):
calculate = False
if force : calculate = True
if self._pgroup is None: calculate = True
if self._rotsigma is None: calculate = True
if calculate: self._pgroup,self._rotsigma = get_pgs(self._atnums,self._masses,self._xcc)
def remove_frozen(self):
if self._natoms == 1: return [],[]
frozen = fncs.detect_frozen(self._Fcc,self._natoms)
if len(frozen) == 0: return [],[]
# coordinates and symbols of frozen moiety
bN = [at in frozen for at in range(self._natoms)]
b3N = [at in frozen for at in range(self._natoms) for ii in range(3)]
frozen_xcc = np.array(self._xcc)[b3N]
frozen_symbols = np.array(self._symbols)[bN]
# now system is just the flexible moiety
bN = [at not in frozen for at in range(self._natoms)]
b3N = [at not in frozen for at in range(self._natoms) for ii in range(3)]
self._xcc = np.array(self._xcc)[b3N].tolist()
self._symbols = np.array(self._symbols)[bN].tolist()
self._atnums = np.array(self._atnums)[bN].tolist()
self._masses = np.array(self._masses)[bN].tolist()
self._pgroup = None
self._rotsigma = None
# Gradient and hessian
if self._gcc is not None and len(self._gcc) != 0:
self._gcc = np.array(self._gcc)[b3N].tolist()
if self._Fcc is not None and len(self._Fcc) != 0:
n3 = self._natoms*3
self._Fcc = [[self._Fcc[idx1][idx2] for idx1 in range(n3) if b3N[idx1]]\
for idx2 in range(n3) if b3N[idx2]]
# set origin for frozen moiety
com = fncs.get_com(self._xcc,self._masses)
frozen_xcc = fncs.set_origin(frozen_xcc,com)
# prepare system
self.prepare()
return frozen_xcc, frozen_symbols
def mod_masses(self,masses):
self._masses = list(masses)
self._mass = sum(self._masses)
# re-calculate point group
self.calc_pgroup(force=True)
def apply_imods(self,imods,imasses):
'''
example: imods = ["H2(4,5)","C13(all_C)"]
imasses = {"H2":2.0141/AMU, "C13":13.0034/AMU}
'''
if imods is None: return
for imod in imods:
isymbol = imod.split("(")[0]
if isymbol in imasses.keys(): imass = imasses[isymbol]
elif isymbol in dpt_im.keys(): imass = dpt_im[isymbol]
else:
exception = Exc.WrongInIsomass
exception._var = isymbol
raise exception
atoms = imod.split("(")[1].split(")")[0]
if "all_" in atoms:
atype = atoms.split("all_")[1].strip()
for idx,symbol in enumerate(self._symbols):
if symbol == atype: self._masses[idx] = imass
else:
list_of_atoms = []
for atom in atoms.split(","):
if "-" in atom:
at1,atn = atom.split("-")
list_of_atoms += range(int(at1),int(atn)+1)
else: list_of_atoms.append(int(atom))
list_of_atoms = sorted(list(set(list_of_atoms)))
for idx in list_of_atoms: self._masses[idx-1] = imass
# re-calculate total mass and point group
self.mod_masses(self._masses)
def setup(self,mu=1.0/AMU,projgrad=False):
self._mu = mu
# derivated magnitudes (again, in case sth was modified)
# for example, when set from gts and masses are added latter
self.genderivates()
# shift to center of mass and reorientate molecule
idata = (self._xcc,self._gcc,self._Fcc,self._masses)
self._xcc, self._gcc, self._Fcc = fncs.center_and_orient(*idata)
# symmetry
self.calc_pgroup(force=False)
# Generate mass-scaled arrays
self._xms = fncs.cc2ms_x(self._xcc,self._masses,self._mu)
self._gms = fncs.cc2ms_g(self._gcc,self._masses,self._mu)
self._Fms = fncs.cc2ms_F(self._Fcc,self._masses,self._mu)
#-------------#
# Atomic case #
#-------------#
if self._natoms == 1:
self._nvdof = 0
self._linear = False
#self._xms = list(self._xcc)
#self._gms = list(self._gcc)
#self._Fms = list(self._Fcc)
self._ccfreqs = []
self._ccFevals = []
self._ccFevecs = []
#----------------#
# Molecular case #
#----------------#
else:
# Calculate inertia
self._itensor = fncs.get_itensor_matrix(self._xcc,self._masses)
self._imoms, self._rotTs, self._rtype, self._linear = \
fncs.get_itensor_evals(self._itensor)
# Vibrational degrees of freedom
if self._linear: self._nvdof = 3*self._natoms - 5
else : self._nvdof = 3*self._natoms - 6
# calculate frequencies
if self._Fcc is None : return
if len(self._Fcc) == 0 : return
if self._ccfreqs is not None: return
v0 = self._gms if projgrad else None
data = fncs.calc_ccfreqs(self._Fcc,self._masses,self._xcc,self._mu,v0=v0)
self._ccfreqs, self._ccFevals, self._ccFevecs = data
# Scale frequencies
self._ccfreqs = fncs.scale_freqs(self._ccfreqs,self._fscal)
def get_imag_main_dir(self):
ic, fwsign = intl.ics_idir(self._xcc,self._symbols,\
self._masses,self._ccfreqs,self._ccFevecs)
return ic, fwsign
def icfreqs(self,ics,bool_pg=False):
#----------------#
# Molecular case #
#----------------#
if self._natoms != 1:
ituple = (self._Fcc,self._masses,self._xcc,self._gcc,ics,bool_pg)
self._icfreqs, self._icFevals, self._icFevecs = intl.calc_icfreqs(*ituple)
#-------------#
# Atomic case #
#-------------#
else:
self._icfreqs = []
self._icFevals = []
self._icFevecs = []
# scale frequencies
self._icfreqs = [freq*self._fscal for freq in self._icfreqs]
def ana_freqs(self,case="cc"):
if case == "cc":
# Keep record of imaginary frequencies
if self._ccFevecs is not None:
self._ccimag = [ (frq,self._ccFevecs[idx]) for idx,frq in enumerate(self._ccfreqs)\
if frq < 0.0]
else:
self._ccimag = [ (frq,None) for idx,frq in enumerate(self._ccfreqs)\
if frq < 0.0]
# Calculate zpe
self._cczpes = [fncs.afreq2zpe(frq) for frq in self._ccfreqs]
self._cczpe = sum(self._cczpes)
self._ccV1 = self._V0 + self._cczpe
if case == "ic":
# Keep record of imaginary frequencies
if self._icFevecs is not None:
self._icimag = [ (frq,self._icFevecs[idx]) for idx,frq in enumerate(self._icfreqs)\
if frq < 0.0]
else:
self._icimag = [ (frq,None) for idx,frq in enumerate(self._icfreqs)\
if frq < 0.0]
# Calculate zpe
self._iczpes = [fncs.afreq2zpe(frq) for frq in self._icfreqs]
self._iczpe = sum(self._iczpes)
self._icV1 = self._V0 + self._iczpe
def clean_freqs(self,case="cc"):
# select case
if case == "cc": freqs = self._ccfreqs
else : freqs = self._icfreqs
# keep track of those to save
keep = []
for idx,freq in enumerate(freqs):
if abs(fncs.afreq2cm(freq)) < EPS_IC: continue
keep.append(idx)
# keep only those > EPS_IC
if case == "cc":
self._ccfreqs = [self._ccfreqs[idx] for idx in keep]
if self._ccFevals is not None:
self._ccFevals = [self._ccFevals[idx] for idx in keep]
if self._ccFevecs is not None:
self._ccFevecs = [self._ccFevecs[idx] for idx in keep]
if case == "ic":
self._icfreqs = [self._icfreqs[idx] for idx in keep]
if self._icFevals is not None:
self._icFevals = [self._icFevals[idx] for idx in keep]
if self._icFevecs is not None:
self._icFevecs = [self._icFevecs[idx] for idx in keep]
def deal_lowfq(self,lowfq={},case="cc"):
# for Cartesian Coordinates
if case == "cc":
# frequencies were not projected along MEP
if self._nvdof - len(self._ccfreqs) == 0:
for idx,newfreq in lowfq.items():
self._ccfreqs[idx] = max(self._ccfreqs[idx],newfreq)
# frequencies were projected along MEP
elif self._nvdof - len(self._ccfreqs) == 1:
for idx,newfreq in lowfq.items():
self._ccfreqs[idx-1] = max(self._ccfreqs[idx-1],newfreq)
# for Internal Coordinates
elif case == "ic":
# frequencies were not projected along MEP
if self._nvdof - len(self._icfreqs) == 0:
for idx,newfreq in lowfq.items():
self._icfreqs[idx] = max(self._icfreqs[idx],newfreq)
# frequencies were projected along MEP
elif self._nvdof - len(self._icfreqs) == 1:
for idx,newfreq in lowfq.items():
self._icfreqs[idx-1] = max(self._icfreqs[idx-1],newfreq)
def calc_pfns(self,temps,case="cc",fmode=0,imag=1E10):
'''
fmode = -1 or 0 (0 is default)
'''
# Calculate translational partition function (per unit volume)
ph_tra = np.array([pf.pf_partinbox(self._mass,T) for T in temps])
# Calculate rotational partition function (Rigid-Rotor)
if self._natoms > 1:
pf_rot = np.array([pf.pf_rigidrotor(self._imoms,T,self._rotsigma) for T in temps])
else:
pf_rot =
|
np.array([1.0 for T in temps])
|
numpy.array
|
from __future__ import division, absolute_import, print_function
from builtins import range
import numpy as np
import os
import sys
import esutil
import matplotlib.pyplot as plt
from .fgcmUtilities import expFlagDict
from .fgcmUtilities import retrievalFlagDict
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
class FgcmParameters(object):
"""
Class to contain FGCM parameters. Initialization should be done via:
newParsWithFits()
newParsWithArrays()
loadParsWithFits()
loadParsWithArrays()
parameters
----------
fgcmConfig: FgcmConfig
Config object
expInfo: numpy recarray, required if New parameters
Exposure info table
fgcmLUT: FgcmLUT, required if New parameters
inParInfo: numpy recarray, required if loading parameters
inParams: numpy recarray, required if loading parameters
inSuperStar: numpy array, required if loading parameters
Config variables
----------------
minExpPerNight: int
Minumum number of exposures in a night for plotting
freezeStdAtmosphere: bool
Fit atmosphere parameters or freeze at standard values? (good for 0th cycle)
epochMJDs: double array
MJDs which divide observing epochs
washMJDs: double array
MJDs which denote mirror washing dates
useRetrievedPwv: bool
Use Pwv retrieved from colors from previous cycle?
useNightlyRetrievedPwv: bool
Re-fit offsets for each night Pwv variation (if useRetrievedPwv==True)?
useRetrievedTauInit: bool
Use nightly retrieved tau from previous cycle as initial guess? (experimental)
"""
def __init__(self, fgcmConfig, expInfo=None, fgcmLUT=None,
inParInfo=None, inParams=None, inSuperStar=None):
initNew = False
loadOld = False
if (expInfo is not None and fgcmLUT is not None):
initNew = True
if (inParInfo is not None and inParams is not None and inSuperStar is not None):
loadOld = True
if (initNew and loadOld):
raise ValueError("Too many parameters specified: either expInfo/fgcmLUT or inParInof/inParams/inSuperStar")
if (not initNew and not loadOld):
raise ValueError("Too few parameters specificed: either expInfo/fgcmLUT or inParInof/inParams/inSuperStar")
self.hasExternalPwv = False
self.hasExternalTau = False
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
self.plotPath = fgcmConfig.plotPath
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.debug('Initializing FgcmParameters...')
# for plotting
self.minExpPerNight = fgcmConfig.minExpPerNight
# get stuff from config file
self.nCCD = fgcmConfig.nCCD
self.bands = fgcmConfig.bands
self.nBands = len(self.bands)
self.fitBands = fgcmConfig.fitBands
self.nFitBands = len(self.fitBands)
self.notFitBands = fgcmConfig.notFitBands
self.nNotFitBands = len(self.notFitBands)
self.bandFitIndex = fgcmConfig.bandFitIndex
self.bandNotFitIndex = fgcmConfig.bandNotFitIndex
self.bandRequiredIndex = fgcmConfig.bandRequiredIndex
self.bandNotRequiredIndex = fgcmConfig.bandRequiredIndex
self.lutFilterNames = fgcmConfig.lutFilterNames
self.lutStdFilterNames = fgcmConfig.lutStdFilterNames
self.nLUTFilter = len(self.lutFilterNames)
self.filterToBand = fgcmConfig.filterToBand
self.lambdaStdFilter = fgcmConfig.lambdaStdFilter
self.lambdaStdBand = fgcmConfig.lambdaStdBand
self.freezeStdAtmosphere = fgcmConfig.freezeStdAtmosphere
self.alphaStd = fgcmConfig.alphaStd
self.o3Std = fgcmConfig.o3Std
self.tauStd = fgcmConfig.tauStd
self.lnTauStd = fgcmConfig.lnTauStd
self.pwvStd = fgcmConfig.pwvStd
self.lnPwvStd = fgcmConfig.lnPwvStd
self.pmbStd = fgcmConfig.pmbStd
self.zenithStd = fgcmConfig.zenithStd
self.secZenithStd = 1./np.cos(self.zenithStd*np.pi/180.)
self.pmbRange = fgcmConfig.pmbRange
self.pwvRange = fgcmConfig.pwvRange
self.lnPwvRange = np.log(self.pwvRange)
self.O3Range = fgcmConfig.O3Range
self.tauRange = fgcmConfig.tauRange
self.lnTauRange = np.log(self.tauRange)
self.alphaRange = fgcmConfig.alphaRange
self.zenithRange = fgcmConfig.zenithRange
self.nExp = fgcmConfig.nExp
self.seeingField = fgcmConfig.seeingField
self.seeingSubExposure = fgcmConfig.seeingSubExposure
self.deepFlag = fgcmConfig.deepFlag
self.fwhmField = fgcmConfig.fwhmField
self.skyBrightnessField = fgcmConfig.skyBrightnessField
self.expField = fgcmConfig.expField
self.UTBoundary = fgcmConfig.UTBoundary
self.latitude = fgcmConfig.latitude
self.sinLatitude = np.sin(np.radians(self.latitude))
self.cosLatitude = np.cos(np.radians(self.latitude))
self.epochMJDs = fgcmConfig.epochMJDs
self.washMJDs = fgcmConfig.washMJDs
self.coatingMJDs = fgcmConfig.coatingMJDs
self.stepUnitReference = fgcmConfig.stepUnitReference
self.pwvFile = fgcmConfig.pwvFile
self.tauFile = fgcmConfig.tauFile
self.externalPwvDeltaT = fgcmConfig.externalPwvDeltaT
self.externalTauDeltaT = fgcmConfig.externalTauDeltaT
self.useRetrievedPwv = fgcmConfig.useRetrievedPwv
self.useNightlyRetrievedPwv = fgcmConfig.useNightlyRetrievedPwv
self.useQuadraticPwv = fgcmConfig.useQuadraticPwv
self.useRetrievedTauInit = fgcmConfig.useRetrievedTauInit
self.modelMagErrors = fgcmConfig.modelMagErrors
self.instrumentParsPerBand = fgcmConfig.instrumentParsPerBand
self.approxThroughput = fgcmConfig.approxThroughput
self.superStarNPar = ((fgcmConfig.superStarSubCCDChebyshevOrder + 1) *
(fgcmConfig.superStarSubCCDChebyshevOrder + 1))
self.ccdOffsets = fgcmConfig.ccdOffsets
self.superStarSubCCD = fgcmConfig.superStarSubCCD
self.superStarSubCCDTriangular = fgcmConfig.superStarSubCCDTriangular
self.illegalValue = fgcmConfig.illegalValue
self.quietMode = fgcmConfig.quietMode
if fgcmConfig.aperCorrFitNBins == 0 and len(fgcmConfig.aperCorrInputSlopes) > 0:
self.aperCorrInputSlopes = fgcmConfig.aperCorrInputSlopes
else:
self.aperCorrInputSlopes = None
if (initNew):
self._initializeNewParameters(expInfo, fgcmLUT)
else:
self._loadOldParameters(expInfo, inParInfo, inParams, inSuperStar)
@classmethod
def newParsWithFits(cls, fgcmConfig, fgcmLUT):
"""
Make a new FgcmParameters object, loading from fits.
parameters
----------
fgcmConfig: FgcmConfig
fgcmLUT: fgcmLUT
Config variables
----------------
exposureFile: string
File with exposure information
"""
import fitsio
expInfoFile = fgcmConfig.exposureFile
expInfo = fitsio.read(expInfoFile, ext=1)
return cls(fgcmConfig, expInfo=expInfo, fgcmLUT=fgcmLUT)
@classmethod
def newParsWithArrays(cls, fgcmConfig, fgcmLUT, expInfo):
"""
Make a new FgcmParameters object, with input arrays
parameters
----------
fgcmConfig: FgcmConfig
fgcmLUT: FgcmLUT
expInfo: numpy recarray
Exposure info
"""
return cls(fgcmConfig, expInfo=expInfo, fgcmLUT=fgcmLUT)
@classmethod
def loadParsWithFits(cls, fgcmConfig):
"""
Make an FgcmParameters object, loading from old parameters in fits
parameters
----------
fgcmConfig: FgcmConfig
Config variables
----------------
exposureFile: string
File with exposure information
inParameterFile: string
File with input parameters (from previous cycle)
"""
import fitsio
expInfoFile = fgcmConfig.exposureFile
inParFile = fgcmConfig.inParameterFile
expInfo = fitsio.read(expInfoFile, ext=1)
inParInfo = fitsio.read(inParFile, ext='PARINFO')
inParams = fitsio.read(inParFile, ext='PARAMS')
inSuperStar = fitsio.read(inParFile, ext='SUPER')
return cls(fgcmConfig, expInfo=expInfo,
inParInfo=inParInfo, inParams=inParams, inSuperStar=inSuperStar)
@classmethod
def loadParsWithArrays(cls, fgcmConfig, expInfo, inParInfo, inParams, inSuperStar):
"""
Make an FgcmParameters object, loading from old parameters in arrays.
parameters
----------
fgcmConfig: FgcmConfig
expInfo: numpy recarray
Exposure info
inParInfo: numpy recarray
Input parameter information array
inParams: numpy recarray
Input parameters
inSuperStar: numpy array
Input superstar
"""
return cls(fgcmConfig, expInfo=expInfo,
inParInfo=inParInfo, inParams=inParams, inSuperStar=inSuperStar)
def _initializeNewParameters(self, expInfo, fgcmLUT):
"""
Internal method to initialize new parameters
parameters
----------
expInfo: numpy recarrat
fgcmLUT: FgcmLUT
"""
# link band indices
# self._makeBandIndices()
# load the exposure information
self._loadExposureInfo(expInfo)
# load observing epochs and link indices
self._loadEpochAndWashInfo()
# and make the new parameter arrays
self.parAlpha = np.zeros(self.campaignNights.size,dtype=np.float32) + fgcmLUT.alphaStd
self.parO3 = np.zeros(self.campaignNights.size,dtype=np.float32) + fgcmLUT.o3Std
self.parLnTauIntercept = np.zeros(self.campaignNights.size,dtype=np.float32) + fgcmLUT.lnTauStd
self.parLnTauSlope = np.zeros(self.campaignNights.size,dtype=np.float32)
# these we will always have, won't always fit
self.parLnPwvIntercept = np.zeros(self.campaignNights.size, dtype=np.float32) + fgcmLUT.lnPwvStd
self.parLnPwvSlope = np.zeros(self.campaignNights.size, dtype=np.float32)
self.parLnPwvQuadratic = np.zeros(self.campaignNights.size, dtype=np.float32)
# parameters with per-epoch values
self.parSuperStarFlat = np.zeros((self.nEpochs,self.nLUTFilter,self.nCCD,self.superStarNPar),dtype=np.float64)
# The first term should be 1.0 with new flux units
self.parSuperStarFlat[:, :, :, 0] = 1.0
# parameters with per-wash values
# We always have these parameters, even if we don't fit them
self.parQESysIntercept = np.zeros((self.nBands, self.nWashIntervals), dtype=np.float32)
self.compQESysSlope = np.zeros((self.nBands, self.nWashIntervals), dtype=np.float32)
self.compQESysSlopeApplied = np.zeros_like(self.compQESysSlope)
# parameters for "absolute" offsets (and relative between filters)
# Currently, this only will turn on for when there are multiple filters
# for the same band. In the future we can add "primary absolute calibrators"
# to the fit, and turn these on.
self.parFilterOffset = np.zeros(self.nLUTFilter, dtype=np.float64)
self.parFilterOffsetFitFlag = np.zeros(self.nLUTFilter, dtype=np.bool)
for i, f in enumerate(self.lutFilterNames):
band = self.filterToBand[f]
nBand = 0
for ff in self.filterToBand:
if self.filterToBand[ff] == band:
nBand += 1
# And when there is a duplicate band and it's not the "Standard", fit the offset
if nBand > 1 and f not in self.lutStdFilterNames:
self.parFilterOffsetFitFlag[i] = True
# And absolute offset parameters (used if reference mags are supplied)
self.compAbsThroughput = np.zeros(self.nBands, dtype=np.float64)
if len(self.approxThroughput) == 1:
self.compAbsThroughput[:] = self.approxThroughput[0]
else:
self.compAbsThroughput[:] = np.array(self.approxThroughput)
self.compRefOffset = np.zeros(self.nBands, dtype=np.float64)
self.compRefSigma = np.zeros_like(self.compRefOffset)
# Add in the mirror coating...
self.compMirrorChromaticity = np.zeros((self.nLUTFilter, self.nCoatingIntervals + 1))
## FIXME: need to completely refactor
self.externalPwvFlag = np.zeros(self.nExp,dtype=np.bool)
if (self.pwvFile is not None):
self.fgcmLog.info('Found external PWV file.')
self.pwvFile = self.pwvFile
self.hasExternalPwv = True
self.loadExternalPwv(self.externalPwvDeltaT)
self.externalTauFlag = np.zeros(self.nExp,dtype=np.bool)
if (self.tauFile is not None):
self.fgcmLog.info('Found external tau file.')
self.tauFile = self.tauFile
self.hasExternalTau = True
self.loadExternalTau()
# and the aperture corrections
# These are per-band
self.compAperCorrPivot = np.zeros(self.nBands,dtype='f8')
self.compAperCorrSlope = np.zeros(self.nBands,dtype='f8')
self.compAperCorrSlopeErr = np.zeros(self.nBands,dtype='f8')
self.compAperCorrRange = np.zeros((2,self.nBands),dtype='f8')
if self.aperCorrInputSlopes is not None:
# Set the aperture correction parameters to those that were input
self.compAperCorrSlope[:] = self.aperCorrInputSlopes[:]
self.compAperCorrRange[0, :] = 0.0
self.compAperCorrRange[1, :] = np.inf
for bandIndex in range(len(self.bands)):
use, = np.where((self.expBandIndex == bandIndex) &
(self.expSeeingVariable > 0.0))
# The pivot is somewhat arbitrary and will come out in the wash
# through the fit cycles, but it's good to have it as something
# sensible
if use.size >= 3:
self.compAperCorrPivot[bandIndex] = np.median(self.expSeeingVariable[use])
# The magnitude model parameters
self.compModelErrExptimePivot = np.zeros(self.nBands, dtype='f8')
self.compModelErrFwhmPivot = np.zeros(self.nBands, dtype='f8')
self.compModelErrSkyPivot = np.zeros(self.nBands, dtype='f8')
self.compModelErrPars = np.zeros((7, self.nBands), dtype='f8')
# one of the "parameters" is expGray
self.compExpGray = np.zeros(self.nExp,dtype='f8')
self.compVarGray = np.zeros(self.nExp,dtype='f8')
self.compNGoodStarPerExp = np.zeros(self.nExp,dtype='i4')
# and sigFgcm
self.compSigFgcm = np.zeros(self.nBands,dtype='f8')
self.compSigmaCal = np.zeros(self.nBands, dtype='f8')
self.compReservedRawRepeatability = np.zeros(self.nBands, dtype='f8')
self.compReservedRawCrunchedRepeatability = np.zeros(self.nBands, dtype='f8')
# and the computed retrieved Pwv
# these are set to the standard values to start
self.compRetrievedLnPwv = np.zeros(self.nExp,dtype='f8') + self.lnPwvStd
self.compRetrievedLnPwvInput = self.compRetrievedLnPwv.copy()
self.compRetrievedLnPwvRaw = np.zeros(self.nExp,dtype='f8')
self.compRetrievedLnPwvFlag = np.zeros(self.nExp,dtype='i2') + retrievalFlagDict['EXPOSURE_STANDARD']
self.parRetrievedLnPwvScale = 1.0
self.parRetrievedLnPwvOffset = 0.0
self.parRetrievedLnPwvNightlyOffset = np.zeros(self.nCampaignNights,dtype='f8')
# and retrieved tau nightly start values
self.compRetrievedTauNight = np.zeros(self.campaignNights.size,dtype='f8') + self.tauStd
self.compRetrievedTauNightInput = self.compRetrievedTauNight.copy()
# do lookups on parameter array
self._arrangeParArray()
# and we're done
def _loadOldParameters(self, expInfo, inParInfo, inParams, inSuperStar):
"""
Internal method to load old parameters
parameters
----------
expInfo: numpy recarray
inParInfo: numpy recarray
inParams: numpy recarray
inSuperStar: numpy recarray
"""
# link band indices
# self._makeBandIndices()
self._loadExposureInfo(expInfo)
self._loadEpochAndWashInfo()
# look at external...
self.hasExternalPwv = inParInfo['HASEXTERNALPWV'][0].astype(np.bool)
self.hasExternalTau = inParInfo['HASEXTERNALTAU'][0].astype(np.bool)
## and copy the parameters
self.parAlpha = np.atleast_1d(inParams['PARALPHA'][0])
self.parO3 = np.atleast_1d(inParams['PARO3'][0])
self.parLnTauIntercept = np.atleast_1d(inParams['PARLNTAUINTERCEPT'][0])
self.parLnTauSlope = np.atleast_1d(inParams['PARLNTAUSLOPE'][0])
self.parLnPwvIntercept = np.atleast_1d(inParams['PARLNPWVINTERCEPT'][0])
self.parLnPwvSlope = np.atleast_1d(inParams['PARLNPWVSLOPE'][0])
self.parLnPwvQuadratic = np.atleast_1d(inParams['PARLNPWVQUADRATIC'][0])
self.parQESysIntercept = inParams['PARQESYSINTERCEPT'][0].reshape((self.nBands, self.nWashIntervals))
self.compQESysSlope = inParams['COMPQESYSSLOPE'][0].reshape((self.nBands, self.nWashIntervals))
self.compQESysSlopeApplied = self.compQESysSlope.copy()
self.parFilterOffset = np.atleast_1d(inParams['PARFILTEROFFSET'][0])
self.parFilterOffsetFitFlag = np.atleast_1d(inParams['PARFILTEROFFSETFITFLAG'][0]).astype(np.bool)
self.compAbsThroughput = np.atleast_1d(inParams['COMPABSTHROUGHPUT'][0])
self.compRefOffset = np.atleast_1d(inParams['COMPREFOFFSET'][0])
self.compRefSigma = np.atleast_1d(inParams['COMPREFSIGMA'][0])
self.compMirrorChromaticity = inParams['COMPMIRRORCHROMATICITY'][0].reshape((self.nLUTFilter, self.nCoatingIntervals + 1))
self.mirrorChromaticityPivot = np.atleast_1d(inParams['MIRRORCHROMATICITYPIVOT'][0])
self.externalPwvFlag = np.zeros(self.nExp,dtype=np.bool)
if self.hasExternalPwv:
self.pwvFile = str(inParInfo['PWVFILE'][0]).rstrip()
self.hasExternalPwv = True
self.loadExternalPwv(self.externalPwvDeltaT)
self.parExternalLnPwvScale = inParams['PAREXTERNALLNPWVSCALE'][0]
self.parExternalLnPwvOffset[:] = np.atleast_1d(inParams['PAREXTERNALLNPWVOFFSET'][0])
self.externalTauFlag = np.zeros(self.nExp,dtype=np.bool)
if self.hasExternalTau:
self.tauFile = str(inParInfo['TAUFILE'][0]).rstrip()
self.hasExternalTau = True
self.loadExternalTau()
self.parExternalLnTauScale = inParams['PAREXTERNALLNTAUSCALE'][0]
self.parExternalLnTauOffset[:] = np.atleast_1d(inParams['PAREXTERNALLNTAUOFFSET'][0])
self.compAperCorrPivot = np.atleast_1d(inParams['COMPAPERCORRPIVOT'][0])
self.compAperCorrSlope = np.atleast_1d(inParams['COMPAPERCORRSLOPE'][0])
self.compAperCorrSlopeErr =
|
np.atleast_1d(inParams['COMPAPERCORRSLOPEERR'][0])
|
numpy.atleast_1d
|
import numpy as np
from brl_gym.estimators.bayes_doors_estimator import BayesDoorsEstimator #, LearnableDoorsBF
from brl_gym.envs.mujoco.doors import DoorsEnv
from brl_gym.envs.mujoco.doors_slow import DoorsSlowEnv
from brl_gym.wrapper_envs.explicit_bayes_env import ExplicitBayesEnv
from brl_gym.wrapper_envs.env_sampler import DiscreteEnvSampler
from gym.spaces import Box, Dict
from gym import utils
# Divide regions into 4 regions, L0, L1, L2, L3 from left to right
REGIONS = [0, 1, 2, 3]
CLOSEST_DOORS = {0:dict(), 1:dict(), 2:dict(), 3:dict()}
def map_to_region(xs):
regions = np.zeros(xs.shape[0])
regions[xs <= -0.7] = 0
regions[np.logical_and(xs <=0, xs > -0.7)] = 1
regions[np.logical_and(xs > 0, xs <= 0.7)] = 2
regions[xs >= 0.7] = 3
return regions.astype(np.int)
CASES = ['{{:0{}b}}'.format(4).format(x) \
for x in range(1, 16)]
for binary in CASES:
# L0
if binary[0] == '1':
CLOSEST_DOORS[0][binary] = 0
elif binary[:2] == '01':
CLOSEST_DOORS[0][binary] = 1
elif binary[:3] == '001':
CLOSEST_DOORS[0][binary] = 2
elif binary == '0001':
CLOSEST_DOORS[0][binary] = 3
# L3
flip = binary[::-1]
if binary[0] == '1':
CLOSEST_DOORS[3][binary] = 0
elif binary[:2] == '01':
CLOSEST_DOORS[3][binary] = 1
elif binary[:3] == '001':
CLOSEST_DOORS[3][binary] = 2
elif binary == '0001':
CLOSEST_DOORS[3][binary] = 3
# L1
if binary[1] == '1':
CLOSEST_DOORS[1][binary] = 1
elif binary[:2] == '10':
CLOSEST_DOORS[1][binary] = 0
elif binary[:3] == '001':
CLOSEST_DOORS[1][binary] = 2
else:
CLOSEST_DOORS[1][binary] = 3
# L2
if binary[2] == '1':
CLOSEST_DOORS[2][binary] = 2
elif binary[1:3] == '10':
CLOSEST_DOORS[2][binary] = 1
elif binary[1:] == '001':
CLOSEST_DOORS[2][binary] = 3
else:
CLOSEST_DOORS[2][binary] = 0
def get_closest_door(open_doors, states):
region = map_to_region(states[:, 0])
closest_doors = np.zeros(states.shape[0], dtype=np.int)
for i, binary in enumerate(open_doors):
closest_doors[i] = CLOSEST_DOORS[region[i]][binary]
return closest_doors
class SimpleExpert:
def __init__(self):
env = DoorsEnv()
self.target_pos = np.array([0.0, 1.2])
self.door_pos = env.door_pos[:, :2]
self.door_pos[:, 1] = 0.25
def action(self, open_doors, states):
binary = []
if len(open_doors.shape) == 2:
for x in open_doors:
binary += [''.join(str(int(y)) for y in x)]
else:
binary = [CASES[x] for x in open_doors]
open_doors = binary
actions =
|
np.zeros((states.shape[0], 2))
|
numpy.zeros
|
import numpy as np
from ..base import Classifier, Regressor
from ..exceptions import MethodNotSupportedError
from ..metrics.classification import accuracy_score
class NaiveBayes(Classifier, Regressor):
"""Naive Bayes Classifier
P(A|B) = P(B|A) * P(A) / P(B)
Parameters:
-----------
eps : float, laplacian smoothing coefficient, optional
"""
def __init__(self, eps=1e-18):
self.eps = eps
self._X = []
self._y = []
self._labels = []
def fit(self, X, y):
self._X, self._y = super().fit(X, y)
self._labels = np.unique(self._y)
return self
def predict(self, X):
return np.array([
self._labels[
|
np.argmax(proba)
|
numpy.argmax
|
import numbers
import time
import numpy as np
import scipy
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.decomposition.nmf import _beta_divergence, _beta_loss_to_float
from scipy.special import expit
from scipy.sparse import issparse
USE_CYTHON = False # currently, cython is disabled due to unsolved numerical bugs
EPSILON = np.finfo(np.float32).eps
INTEGER_TYPES = (numbers.Integral, np.integer)
# utility functions
def sigmoid(M):
return expit(M)
def d_sigmoid(M):
sgm = sigmoid(M)
return sgm * (1 - sgm)
def inverse(x, link):
if link == "linear":
return x
elif link == "logit":
return sigmoid(x)
else:
raise ValueError("Invalid link function {}".format(link))
def compute_factorization_error(target, left_factor, right_factor, link, beta_loss):
if target is None:
return 0
elif link == "linear":
return _beta_divergence(target, left_factor, right_factor, beta_loss, square_root=True)
elif link == "logit":
return np.linalg.norm(target - sigmoid(np.dot(left_factor, right_factor)))
class _IterativeCMFSolver:
"""Boilerplate for iterative solvers (mu and newton)
Implement the update_step method in concrete subclasses to use.
Parameters
----------
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
l1_reg : double, default: 0.
L1 regularization parameter. Currently same for all matrices.
l2_reg : double, default: 0.
L2 regularization parameter
alpha: double, default: 0.5
Determines trade-off between optimizing for X and Y.
The larger the value, the more X is prioritized in optimization.
beta_loss : float or string, default 'frobenius'
Currently disabled. Used only in 'mu' solver.
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits.
update_H : boolean, default: True
Currently disabled. Need to enable in future to implement transform method in CNMF.
verbose : integer, default: 0
The verbosity level.
U_non_negative: bool, default: True
Whether to enforce non-negativity for U. Only applicable for the newton solver.
V_non_negative: bool, default: True
Whether to enforce non-negativity for V. Only applicable for the newton solver.
Z_non_negative: bool, default: True
Whether to enforce non-negativity for Z. Only applicable for the newton solver.
x_link: str, default: "linear"
One of either "logit" of "linear". The link function for transforming UV^T to approximate X
y_link: str, default: "linear"
One of either "logit" of "linear". The link function for transforming VZ^T to approximate Y
hessian_pertubation: double, default: 0.2
The pertubation to the Hessian in the newton solver to maintain positive definiteness
"""
def __init__(self, max_iter=200, tol=1e-4, beta_loss="frobenius",
l1_reg=0, l2_reg=0, alpha=0.5, verbose=0,
U_non_negative=True, V_non_negative=True, Z_non_negative=True,
update_U=True, update_V=True, update_Z=True,
x_link="linear", y_link="linear", hessian_pertubation=0.2,
sg_sample_ratio=1., random_state=None):
self.max_iter = max_iter
self.tol = tol
self.beta_loss = _beta_loss_to_float(beta_loss)
self.l1_reg = l1_reg
self.l2_reg = l2_reg
self.alpha = alpha
self.verbose = verbose
self.U_non_negative = U_non_negative
self.V_non_negative = V_non_negative
self.Z_non_negative = Z_non_negative
self.update_U = update_U
self.update_V = update_V
self.update_Z = update_Z
self.x_link = x_link
self.y_link = y_link
self.hessian_pertubation = hessian_pertubation
self.sg_sample_ratio = sg_sample_ratio
if random_state is not None:
np.random.seed(random_state)
def update_step(self, X, Y, U, V, Z, l1_reg, l2_reg, alpha):
"""A single update step for all the matrices in the factorization."""
raise NotImplementedError("Implement in concrete subclass to use")
def compute_error(self, X, Y, U, V, Z):
return self.alpha * compute_factorization_error(X, U, V.T, self.x_link, self.beta_loss) + \
(1 - self.alpha) * compute_factorization_error(Y, V, Z.T, self.y_link, self.beta_loss)
def fit_iterative_update(self, X, Y, U, V, Z):
"""Compute CMF with iterative methods.
The objective function is minimized with an alternating minimization of U, V
and Z. Regularly prints error and stops update when improvement stops.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
First data matrix to be decomposed
Y : {array-like, sparse matrix}, shape (n_features, n_labels)
Second data matrix to be decomposed
U : array-like, shape (n_samples, n_components)
V : array-like, shape (n_features, n_components)
Z : array-like, shape (n_labels, n_components)
Returns
-------
U : array, shape (n_samples, n_components)
Transformed data.
V : array, shape (n_features, n_components)
Transformed data.
Z : array, shape (n_labels, n_components)
Transformed data.
n_iter : int
The number of iterations done by the algorithm.
"""
start_time = time.time()
# TODO: handle beta loss other than fnorm
previous_error = error_at_init = self.compute_error(X, Y, U, V, Z)
for n_iter in range(1, self.max_iter + 1):
self.update_step(X, Y, U, V, Z, self.l1_reg, self.l2_reg, self.alpha)
# test convergence criterion every 10 iterations
if self.tol > 0 and n_iter % 10 == 0:
error = self.compute_error(X, Y, U, V, Z)
if self.verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
improvement_stopped = (previous_error - error) / error_at_init < self.tol
if improvement_stopped:
break
previous_error = error
# do not print if we have already printed in the convergence test
if self.verbose and (self.tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return U, V, Z, n_iter
class MUSolver(_IterativeCMFSolver):
"""Internal solver that solves by iteratively multiplying the matrices element wise.
The multiplying factors are always positive, meaning this solver can only return positive matrices.
References
----------
<NAME>., <NAME>., & <NAME>. (n.d.).
Semi-supervised collective matrix factorization for topic detection and document clustering.
<NAME>., & <NAME>. (2001). Algorithms for non-negative matrix factorization.
Advances in Neural Information Processing Systems, (1), 556–562.
https://doi.org/10.1109/IJCNN.2008.4634046
"""
@classmethod
def _regularized_delta(cls, numerator, denominator, l1_reg, l2_reg, gamma, H):
# Add L1 and L2 regularization
if l1_reg > 0:
denominator += l1_reg
if l2_reg > 0:
denominator = denominator + l2_reg * H
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta **= gamma
return delta
@classmethod
def _multiplicative_update_u(cls, X, U, V, beta_loss, l1_reg, l2_reg, gamma):
numerator = safe_sparse_dot(X, V)
denominator = np.dot(np.dot(U, V.T), V)
return cls._regularized_delta(numerator, denominator, l1_reg, l2_reg, gamma, U)
@classmethod
def _multiplicative_update_z(cls, Y, V, Z, beta_loss, l1_reg, l2_reg, gamma):
numerator = safe_sparse_dot(Y.T, V)
denominator = np.dot(np.dot(Z, V.T), V)
return cls._regularized_delta(numerator, denominator, l1_reg, l2_reg, gamma, Z)
@classmethod
def _multiplicative_update_v(cls, X, Y, U, V, Z, beta_loss, l1_reg, l2_reg, gamma):
numerator = safe_sparse_dot(X.T, U) + safe_sparse_dot(Y, Z)
denominator = np.dot(V, (np.dot(U.T, U) + np.dot(Z.T, Z)))
return cls._regularized_delta(numerator, denominator, l1_reg, l2_reg, gamma, V)
def update_step(self, X, Y, U, V, Z, l1_reg, l2_reg, alpha):
# TODO: Enable specification of gamma
gamma = 1.
if self.update_V:
delta_V = self._multiplicative_update_v(X, Y, U, V, Z, self.beta_loss, l1_reg,
l2_reg, gamma)
V *= delta_V
if self.update_U:
delta_U = self._multiplicative_update_u(X, U, V, self.beta_loss, l1_reg, l2_reg, gamma)
U *= delta_U
if self.update_Z:
delta_Z = self._multiplicative_update_z(Y, V, Z, self.beta_loss, l1_reg, l2_reg, gamma)
Z *= delta_Z
if USE_CYTHON:
class NewtonSolver(_IterativeCMFSolver):
"""Internal solver that solves using the Newton-Raphson method.
Updates each row independently using a Newton-Raphson step. Can handle various link functions and settings.
The gradient and Hessian are computed based on the residual between the target and the estimate.
Computing the entire target/estimate can be memory intensive, so the option to compute the residual
based on a stochastic sample can be enabled by setting sg_sample_ratio < 1.0.
References
----------
<NAME>., & <NAME>. (2008). Relational learning via collective matrix factorization.
Proceeding of the 14th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining
KDD 08, 650. https://doi.org/10.1145/1401890.1401969
"""
def fit_iterative_update(self, X, Y, U, V, Z):
# handle memory ordering and format issues for speed up
X_ = X.tocsr() if issparse(X) else np.ascontiguousarray(X) if X is not None else X
# instead of solving for Y = VZ^T, in order to make access to V continuous
# for approximating both X and Y, we will solve Y^T = ZV^T
Y_ = Y.T.tocsr() if issparse(Y) else np.ascontiguousarray(Y.T) if Y is not None else Y
# U, V, Z must be C-ordered for cython dot product to work
U = np.ascontiguousarray(U)
V = np.ascontiguousarray(V)
Z = np.ascontiguousarray(Z)
return super().fit_iterative_update(X_, Y_, U, V, Z)
def update_step(self, X, Y, U, V, Z, l1_reg, l2_reg, alpha):
if self.update_U:
_newton_update_left(U, V, X, alpha, l1_reg, l2_reg,
self.x_link, self.U_non_negative,
self.sg_sample_ratio,
self.hessian_pertubation)
if self.update_Z:
_newton_update_left(Z, V, Y, 1 - alpha, l1_reg, l2_reg,
self.y_link, self.Z_non_negative,
self.sg_sample_ratio,
self.hessian_pertubation)
if self.update_V:
_newton_update_V(V, U, Z, X, Y, alpha, l1_reg, l2_reg,
self.x_link, self.y_link,
self.V_non_negative,
self.sg_sample_ratio,
self.hessian_pertubation)
def compute_error(self, X, Y, U, V, Z):
# override because we are solving for Y^T = ZV^T
return self.alpha * compute_factorization_error(X, U, V.T, self.x_link, self.beta_loss) + \
(1 - self.alpha) * compute_factorization_error(Y, Z, V.T, self.y_link, self.beta_loss)
else:
class NewtonSolver(_IterativeCMFSolver):
"""Default implementation when Cython cannot be used."""
@classmethod
def _row_newton_update(cls, M, idx, dM, ddM_inv,
eta=1., non_negative=True):
M[idx, :] = M[idx, :] - eta * np.dot(dM, ddM_inv)
if non_negative:
M[idx, :][M[idx, :] < 0] = 0.
def _stochastic_sample(self, features, target, axis=0):
assert(features.shape[axis] == target.shape[axis])
if self.sg_sample_ratio < 1.:
sample_size = int(features.shape[axis] * self.sg_sample_ratio)
sample_mask = np.random.permutation(np.arange(features.shape[axis]))[:sample_size]
if axis == 0:
features_sampled = features[sample_mask, :]
target_sampled = target[sample_mask, :]
elif axis == 1:
features_sampled = features[:, sample_mask]
target_sampled = target[:, sample_mask]
else:
raise ValueError("Axis {} out of bounds".format(axis))
else:
features_sampled = features
target_sampled = target
return features_sampled, target_sampled
def _safe_invert(self, M):
"""Computed according to reccomendations of
http://web.stanford.edu/class/cme304/docs/newton-type-methods.pdf"""
if scipy.sparse.issparse(M):
eigs, V = scipy.sparse.linalg.eigsh(M)
else:
eigs, V = scipy.linalg.eigh(M)
# perturb hessian to be positive definite
eigs = np.abs(eigs)
eigs[eigs < self.hessian_pertubation] = self.hessian_pertubation
return np.dot(np.dot(V, np.diag(1 / eigs)), V.T)
def _force_flatten(self, v):
"""Forcibly flattens an indexed row or column of a matrix or sparse matrix"""
if np.ndim(v) > 1:
if issparse(v):
v_ = v.toarray()
elif isinstance(v, np.matrix):
v_ = np.asarray(v)
else:
raise ValueError("Indexing array returns {} dimensions " +
"but is not sparse or a matrix".format(np.ndim(v)))
return v_.flatten()
else:
return v.flatten()
def _residual(self, left, right, target, link):
"""Computes residual:
inverse(left @ right, link) - target
The number of dimensions of the residual and estimate will be the same.
This is necessary because the indexing behavior of np.ndarrays and scipy sparse matrices are different.
Specifically, slicing scipy sparse matrices does not return a 1 dimensional vector.
e.g.
>>> import numpy as np; from scipy.sparse import csc_matrix
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> B = csc_matrix(A)
>>> A[:, 0].shape
(2,)
>>> B[:, 0].shape
(2, 1)
"""
estimate = inverse(np.dot(left, right), link)
ground_truth = target
if issparse(target) and np.ndim(estimate) == 1:
return estimate - ground_truth.toarray().flatten()
else:
return estimate - ground_truth
def _newton_update_U(self, U, V, X, alpha, l1_reg, l2_reg,
link="linear", non_negative=True):
precompute_dU = self.sg_sample_ratio == 1.
if precompute_dU:
# dU is constant across samples
res_X = inverse(np.dot(U, V.T), link) - X
dU_full = alpha * np.dot(res_X, V) + l1_reg * np.sign(U) + l2_reg * U
if issparse(dU_full):
dU_full = dU_full.toarray()
elif isinstance(dU_full, np.matrix):
dU_full = np.asarray(dU_full)
# iterate over rows
precompute_ddU_inv = (link == "linear" and self.sg_sample_ratio == 1.)
if precompute_ddU_inv:
# ddU_inv is constant across samples
ddU_inv = self._safe_invert(alpha * np.dot(V.T, V) + l2_reg * np.eye(U.shape[1]))
for i in range(U.shape[0]):
u_i = U[i, :]
V_T_sampled, X_sampled = self._stochastic_sample(V.T, X, axis=1)
if precompute_dU:
dU = dU_full[i, :]
assert(np.ndim(dU) == 1)
else:
res_X = self._residual(u_i, V_T_sampled, X_sampled[i, :], link)
dU = alpha * np.dot(res_X, V_T_sampled.T) + l1_reg * np.sign(u_i) + l2_reg * u_i
if not precompute_ddU_inv:
if link == "linear":
ddU_inv = self._safe_invert(alpha * np.dot(V_T_sampled, V_T_sampled.T) +
l2_reg * np.eye(U.shape[1]))
elif link == "logit":
D = np.diag(d_sigmoid(np.dot(u_i, V_T_sampled)))
ddU_inv = self._safe_invert(alpha * np.dot(np.dot(V_T_sampled, D), V_T_sampled.T))
self._row_newton_update(U, i, dU, ddU_inv, non_negative=non_negative)
def _newton_update_V(self, V, U, Z, X, Y, alpha, l1_reg, l2_reg,
x_link="linear", y_link="linear", non_negative=True):
precompute_dV = (self.sg_sample_ratio == 1.)
if precompute_dV:
res_X_T = inverse(np.dot(U, V.T), x_link) - X
res_Y_T = inverse(np.dot(Z, V.T), y_link) - Y.T
dV_full = alpha * np.dot(res_X_T.T, U) + \
(1 - alpha) * np.dot(res_Y_T.T, Z) + \
l1_reg * np.sign(V) + l2_reg * V
if isinstance(dV_full, np.matrix):
dV_full = np.asarray(dV_full)
precompute_ddV_inv = (x_link == "linear" and y_link == "linear" and self.sg_sample_ratio == 1.)
if precompute_ddV_inv:
# ddV_inv is constant w.r.t. the samples of V, so we precompute it to save computation
ddV_inv = self._safe_invert(alpha * np.dot(U.T, U) +
(1 - alpha) * np.dot(Z.T, Z) +
l2_reg * np.eye(V.shape[1]))
for i in range(V.shape[0]):
v_i = V[i, :]
U_sampled, X_sampled = self._stochastic_sample(U, X)
Z_T_sampled, Y_sampled = self._stochastic_sample(Z.T, Y, axis=1)
if not precompute_dV:
res_X = self._residual(U_sampled, v_i.T, X_sampled[:, i], x_link)
res_Y = self._residual(v_i, Z_T_sampled, Y_sampled[i, :], y_link)
dV = alpha * np.dot(res_X.T, U_sampled) + \
(1 - alpha) * np.dot(res_Y, Z_T_sampled.T) + \
l1_reg * np.sign(v_i) + l2_reg * v_i
else:
dV = dV_full[i, :]
if not precompute_ddV_inv:
if x_link == "logit":
D_u = np.diag(d_sigmoid(np.dot(U_sampled, v_i.T)))
ddV_wrt_U = np.dot(np.dot(U_sampled.T, D_u), U_sampled)
elif x_link == "linear":
ddV_wrt_U = np.dot(U_sampled.T, U_sampled)
if y_link == "logit":
# in the original paper, the equation was v_i.T @ Z,
# which clearly does not work due to the dimensionality
D_z = np.diag(d_sigmoid(np.dot(v_i, Z_T_sampled)))
ddV_wrt_Z = np.dot(
|
np.dot(Z_T_sampled, D_z)
|
numpy.dot
|
#Exercícios Massimo - Tabata
import mayavi.mlab as mlab
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from matplotlib import cm
L = 5e9/3e8 #segundos
psi = 0
f = .001 #Hz
w = 2*np.pi*f
A = 1
def F_mais_oneway(phi, theta):
return ((1 + np.cos(theta))/2)*(A*np.cos(2*phi)*(np.cos(w*L) - np.cos(w*np.cos(theta)*L)) + A*np.sin(2*phi)*(np.sin(w*np.cos(theta)*L) - np.sin(w*L)))
def F_cruzado_oneway(phi, theta):
return ((1 + np.cos(theta))/2)*(A*np.cos(2*phi)*(np.sin(w*L) - np.sin(w*np.cos(theta)*L)) + A*np.sin(2*phi)*(np.cos(w*L)- np.cos(w*np.cos(theta)*L)))
def F_mais_LIGO(phi, theta, psi):
return 0.5*(1+ np.cos(theta)*np.cos(theta))*
|
np.cos(2*phi)
|
numpy.cos
|
from scipy.spatial import Voronoi, voronoi_plot_2d
import numpy as np
import pandas as pd
import matplotlib
from biopandas.mol2 import PandasMol2
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from sklearn.cluster import KMeans
from math import sqrt, asin, atan2, log, pi, tan
from alignment import align
import argparse
import os
import skimage
from skimage.io import imshow
import cv2
from skimage.transform import rotate as skrotate
from skimage import img_as_ubyte
import statistics
def k_different_colors(k: int):
colors = dict(**mcolors.CSS4_COLORS)
def rgb(color): return mcolors.to_rgba(color)[:3]
def hsv(color): return mcolors.rgb_to_hsv(color)
col_dict = [(k, rgb(k)) for c, k in colors.items()]
X = np.array([j for i, j in col_dict])
# Perform kmeans on rqb vectors
kmeans = KMeans(n_clusters=k)
kmeans = kmeans.fit(X)
# Getting the cluster labels
labels = kmeans.predict(X)
# Centroid values
C = kmeans.cluster_centers_
# Find one color near each of the k cluster centers
closest_colors = np.array([np.sum((X - C[i]) ** 2, axis=1)
for i in range(C.shape[0])])
keys = sorted(closest_colors.argmin(axis=1))
return [col_dict[i][0] for i in keys]
def voronoi_finite_polygons_2d(vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
Source
-------
Copied from https://gist.github.com/pv/8036995
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max() * 2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n =
|
np.array([-t[1], t[0]])
|
numpy.array
|
from __future__ import absolute_import, division, print_function
import os
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader
from layers import disp_to_depth
from utils import readlines
from options import MonodepthOptions
import datasets
import networks
cv2.setNumThreads(0) # This speeds up evaluation 5x on our unix systems (OpenCV 3.3.1)
splits_dir = os.path.join(os.path.dirname(__file__), "splits")
def compute_errors(gt, pred):
"""Computation of error metrics between predicted and ground truth depths
"""
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
log10 = np.mean(np.abs(np.log10(pred / gt)))
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, log10, a1, a2, a3
def batch_post_process_disparity(l_disp, r_disp):
"""Apply the disparity post-processing method as introduced in Monodepthv1
"""
_, h, w = l_disp.shape
m_disp = 0.5 * (l_disp + r_disp)
l, _ = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
l_mask = (1.0 - np.clip(20 * (l - 0.05), 0, 1))[None, ...]
r_mask = l_mask[:, :, ::-1]
return r_mask * l_disp + l_mask * r_disp + (1.0 - l_mask - r_mask) * m_disp
def evaluate(opt):
"""Evaluates a pretrained model using a specified test set
"""
MIN_DEPTH = 1e-2
MAX_DEPTH = 10
if opt.ext_disp_to_eval is None:
opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)
assert os.path.isdir(opt.load_weights_folder), \
"Cannot find a folder at {}".format(opt.load_weights_folder)
print("-> Loading weights from {}".format(opt.load_weights_folder))
filenames = readlines(os.path.join(splits_dir, opt.eval_split, "test_files.txt"))
encoder_path = os.path.join(opt.load_weights_folder, "encoder.pth")
decoder_path = os.path.join(opt.load_weights_folder, "depth.pth")
encoder_dict = torch.load(encoder_path)
dataset = datasets.NYUDataset(opt.data_path, filenames, encoder_dict['height'], encoder_dict['width'],
[0], 1, is_test=True, return_plane=True, num_plane_keysets=0,
return_line=True, num_line_keysets=0)
dataloader = DataLoader(dataset, opt.batch_size, shuffle=False, num_workers=opt.num_workers,
pin_memory=True, drop_last=False)
encoder = networks.ResnetEncoder(opt.num_layers, False)
depth_decoder = networks.DepthDecoder(encoder.num_ch_enc, [0])
model_dict = encoder.state_dict()
encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict})
model_dict = depth_decoder.state_dict()
decoder_dict = torch.load(decoder_path)
depth_decoder.load_state_dict({k: v for k, v in decoder_dict.items() if k in model_dict})
encoder.cuda()
encoder.eval()
depth_decoder.cuda()
depth_decoder.eval()
gt_depths = []
planes = []
lines = []
pred_disps = []
print("-> Computing predictions with size {}x{}".format(
encoder_dict['width'], encoder_dict['height']))
with torch.no_grad():
for data in dataloader:
input_color = data[("color", 0, 0)].cuda()
norm_pix_coords = [data[("norm_pix_coords", s)].cuda() for s in opt.scales]
gt_depth = data["depth_gt"][:, 0].numpy()
gt_depths.append(gt_depth)
plane = data[("plane", 0, -1)][:, 0].numpy()
planes.append(plane)
line = data[("line", 0, -1)][:, 0].numpy()
lines.append(line)
if opt.post_process:
# Post-processed results require each image to have two forward passes
input_color = torch.cat((input_color, torch.flip(input_color, [3])), 0)
norm_pix_coords = [torch.cat((pc, torch.flip(pc, [3])), 0) for pc in norm_pix_coords]
norm_pix_coords[0][norm_pix_coords[0].shape[0] // 2:, 0] *= -1
output = depth_decoder(encoder(input_color), norm_pix_coords)
pred_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth)
pred_disp = pred_disp.cpu()[:, 0].numpy()
if opt.post_process:
N = pred_disp.shape[0] // 2
pred_disp = batch_post_process_disparity(pred_disp[:N], pred_disp[N:, :, ::-1])
pred_disps.append(pred_disp)
gt_depths = np.concatenate(gt_depths)
planes = np.concatenate(planes)
lines = np.concatenate(lines)
pred_disps = np.concatenate(pred_disps)
else:
filenames = readlines(os.path.join(splits_dir, opt.eval_split, "test_files.txt"))
dataset = datasets.NYUDataset(opt.data_path, filenames, self.opt.height, self.opt.width,
[0], 1, is_test=True, return_plane=True, num_plane_keysets=0,
return_line=True, num_line_keysets=0)
dataloader = DataLoader(dataset, opt.batch_size, shuffle=False, num_workers=opt.num_workers,
pin_memory=True, drop_last=False)
gt_depths = []
planes = []
lines = []
for data in dataloader:
gt_depth = data["depth_gt"][:, 0].numpy()
gt_depths.append(gt_depth)
plane = data[("plane", 0, -1)][:, 0].numpy()
planes.append(plane)
line = data[("line", 0, -1)][:, 0].numpy()
lines.append(line)
gt_depths = np.concatenate(gt_depths)
planes = np.concatenate(planes)
lines = np.concatenate(lines)
# Load predictions from file
print("-> Loading predictions from {}".format(opt.ext_disp_to_eval))
pred_disps = np.load(opt.ext_disp_to_eval)
if opt.save_pred_disps:
output_path = os.path.join(
opt.load_weights_folder, "disps_{}_split.npy".format(opt.eval_split))
print("-> Saving predicted disparities to ", output_path)
np.save(output_path, pred_disps)
if opt.no_eval:
print("-> Evaluation disabled. Done.")
quit()
print("-> Evaluating")
print(" Mono evaluation - using median scaling")
errors = []
ratios = []
gt_plane_pixel_deviations = []
gt_plane_instance_max_deviations = []
gt_flatness_ratios = []
gt_line_pixel_deviations = []
gt_line_instance_max_deviations = []
gt_straightness_ratios = []
pred_plane_pixel_deviations = []
pred_plane_instance_max_deviations = []
pred_flatness_ratios = []
pred_line_pixel_deviations = []
pred_line_instance_max_deviations = []
pred_straightness_ratios = []
norm_pix_coords = dataset.get_norm_pix_coords()
for i in range(pred_disps.shape[0]):
gt_depth = gt_depths[i]
gt_height, gt_width = gt_depth.shape[:2]
pred_disp = pred_disps[i]
pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))
pred_depth = 1 / pred_disp
mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)
crop_mask = np.zeros(mask.shape)
crop_mask[dataset.default_crop[2]:dataset.default_crop[3], \
dataset.default_crop[0]:dataset.default_crop[1]] = 1
mask = np.logical_and(mask, crop_mask)
mask_pred_depth = pred_depth[mask]
mask_gt_depth = gt_depth[mask]
mask_pred_depth *= opt.pred_depth_scale_factor
if not opt.disable_median_scaling:
ratio = np.median(mask_gt_depth) / np.median(mask_pred_depth)
ratios.append(ratio)
mask_pred_depth *= ratio
else:
ratio = 1
ratios.append(ratio)
mask_pred_depth[mask_pred_depth < MIN_DEPTH] = MIN_DEPTH
mask_pred_depth[mask_pred_depth > MAX_DEPTH] = MAX_DEPTH
errors.append(compute_errors(mask_gt_depth, mask_pred_depth))
# compute the flatness and straightness
plane_seg = planes[i]
line_seg = lines[i]
X = norm_pix_coords[0] * gt_depth
Y = norm_pix_coords[1] * gt_depth
Z = gt_depth
for j in range(plane_seg.max()):
seg_x = X[plane_seg == (j + 1)]
seg_y = Y[plane_seg == (j + 1)]
seg_z = Z[plane_seg == (j + 1)]
P = np.stack((seg_x, seg_y, seg_z), axis=1)
mean_P = P.mean(axis=0)
cent_P = P - mean_P
conv_P = cent_P.T.dot(cent_P) / seg_x.shape[0]
e_vals, e_vecs = np.linalg.eig(conv_P)
idx = e_vals.argsort()[::-1]
e_vals = e_vals[idx]
e_vecs = e_vecs[:, idx]
deviations = np.abs(cent_P.dot(e_vecs[:, 2]))
variance_ratios = e_vals / e_vals.sum()
gt_plane_instance_max_deviations.append(np.max(deviations))
gt_plane_pixel_deviations.append(deviations)
gt_flatness_ratios.append(variance_ratios[2])
for j in range(line_seg.max()):
seg_x = X[line_seg == (j + 1)]
seg_y = Y[line_seg == (j + 1)]
seg_z = Z[line_seg == (j + 1)]
P = np.stack((seg_x, seg_y, seg_z), axis=1)
mean_P = P.mean(axis=0)
cent_P = P - mean_P
conv_P = cent_P.T.dot(cent_P) / seg_x.shape[0]
e_vals, e_vecs = np.linalg.eig(conv_P)
idx = e_vals.argsort()[::-1]
e_vals = e_vals[idx]
e_vecs = e_vecs[:, idx]
dev2 = np.sum(cent_P ** 2, 1) - (cent_P.dot(e_vecs[:, 0])) ** 2
dev2[dev2 < 0] = 0
deviations = np.sqrt(dev2)
gt_line_instance_max_deviations.append(np.max(deviations))
gt_line_pixel_deviations.append(deviations)
variance_ratios = e_vals / e_vals.sum()
gt_straightness_ratios.append(variance_ratios[1] + variance_ratios[2])
pred_depth *= ratio
X = norm_pix_coords[0] * pred_depth
Y = norm_pix_coords[1] * pred_depth
Z = pred_depth
for j in range(plane_seg.max()):
seg_x = X[plane_seg == (j + 1)]
seg_y = Y[plane_seg == (j + 1)]
seg_z = Z[plane_seg == (j + 1)]
P = np.stack((seg_x, seg_y, seg_z), axis=1)
mean_P = P.mean(axis=0)
cent_P = P - mean_P
conv_P = cent_P.T.dot(cent_P) / seg_x.shape[0]
e_vals, e_vecs = np.linalg.eig(conv_P)
idx = e_vals.argsort()[::-1]
e_vals = e_vals[idx]
e_vecs = e_vecs[:, idx]
deviations = np.abs(cent_P.dot(e_vecs[:, 2]))
variance_ratios = e_vals / e_vals.sum()
pred_plane_instance_max_deviations.append(np.max(deviations))
pred_plane_pixel_deviations.append(deviations)
pred_flatness_ratios.append(variance_ratios[2])
for j in range(line_seg.max()):
seg_x = X[line_seg == (j + 1)]
seg_y = Y[line_seg == (j + 1)]
seg_z = Z[line_seg == (j + 1)]
P = np.stack((seg_x, seg_y, seg_z), axis=1)
mean_P = P.mean(axis=0)
cent_P = P - mean_P
conv_P = cent_P.T.dot(cent_P) / seg_x.shape[0]
e_vals, e_vecs =
|
np.linalg.eig(conv_P)
|
numpy.linalg.eig
|
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class Test_cutout(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked(self, mock_inplace):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
mock_inplace.return_value = "foo"
rng = iarandom.RNG(0)
image_aug = iaa.cutout(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="gaussian",
cval=1,
fill_per_channel=0.5,
seed=rng)
assert mock_inplace.call_count == 1
assert image_aug == "foo"
args = mock_inplace.call_args_list[0][0]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(args[1], 10)
assert np.isclose(args[2], 20)
assert np.isclose(args[3], 30)
assert np.isclose(args[4], 40)
assert args[5] == "gaussian"
assert args[6] == 1
assert np.isclose(args[7], 0.5)
assert args[8] is rng
class Test_cutout_(unittest.TestCase):
def test_with_simple_image(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
mask = np.zeros(image.shape, dtype=bool)
mask[20:40, 10:30, :] = True
overlap_inside = np.sum(image_aug[mask] == 0) / np.sum(mask)
overlap_outside = np.sum(image_aug[~mask] > 0) / np.sum(~mask)
assert image_aug is image
assert overlap_inside >= 1.0 - 1e-4
assert overlap_outside >= 1.0 - 1e-4
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_constant_")
def test_fill_mode_constant_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("constant", mock_fill)
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_gaussian_")
def test_fill_mode_gaussian_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("gaussian", mock_fill)
@classmethod
def _test_with_fill_mode_mocked(cls, fill_mode, mock_fill):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
mock_fill.return_value = image
seed = iarandom.RNG(0)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode=fill_mode,
cval=0,
fill_per_channel=False,
seed=seed)
assert mock_fill.call_count == 1
args = mock_fill.call_args_list[0][0]
kwargs = mock_fill.call_args_list[0][1]
assert image_aug is image
assert args[0] is image
assert kwargs["x1"] == 10
assert kwargs["y1"] == 20
assert kwargs["x2"] == 30
assert kwargs["y2"] == 40
assert kwargs["cval"] == 0
assert kwargs["per_channel"] is False
assert kwargs["random_state"] is seed
def test_zero_height(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=20,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_zero_height_width(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=10,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_fully_outside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-50,
y1=150,
x2=-1,
y2=200,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_partially_inside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=-25,
y1=-25,
x2=25,
y2=25,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.all(image_aug[0:25, 0:25] == 0)
assert np.all(image_aug[0:25, 25:] > 0)
assert np.all(image_aug[25:, :] > 0)
def test_zero_sized_axes(self):
shapes = [(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0, 1, 1),
(1, 1, 0),
(1, 0, 1),
(1, 0),
(0, 1),
(0, 0)]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-5,
y1=-5,
x2=5,
y2=5,
fill_mode="constant",
cval=0)
assert np.array_equal(image_aug, image_cp)
class Test_fill_rectangle_gaussian_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=20,
x2=60,
y2=70,
cval=0,
per_channel=False,
random_state=rng)
assert np.array_equal(image_aug[:20, :],
image_cp[:20, :])
assert not np.array_equal(image_aug[20:70, 10:60],
image_cp[20:70, 10:60])
assert np.isclose(np.average(image_aug[20:70, 10:60]), 127.5,
rtol=0, atol=5.0)
assert np.isclose(np.std(image_aug[20:70, 10:60]), 255.0/2.0/3.0,
rtol=0, atol=2.5)
def test_per_channel(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=False,
random_state=iarandom.RNG(0))
image_aug_pc = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
diff11 = (image_aug[..., 0] != image_aug[..., 1])
diff12 = (image_aug[..., 0] != image_aug[..., 2])
diff21 = (image_aug_pc[..., 0] != image_aug_pc[..., 1])
diff22 = (image_aug_pc[..., 0] != image_aug_pc[..., 2])
assert not np.any(diff11)
assert not np.any(diff12)
assert np.any(diff21)
assert np.any(diff22)
def test_deterministic_with_same_seed(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug_pc1 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc2 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc3 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(1))
assert np.array_equal(image_aug_pc1, image_aug_pc2)
assert not np.array_equal(image_aug_pc2, image_aug_pc3)
def test_no_channels(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = image.reshape((1, 10))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=per_channel,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 3, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(nb_channels=nb_channels,
per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)),
(1, 1, nb_channels))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=10,
x2=300-10,
y2=300-10,
cval=0,
per_channel=per_channel,
random_state=rng)
rect = image_aug[10:-10, 10:-10]
p_true = np.sum(rect) / rect.size
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect, image_cp[10:-10, 10:-10])
assert np.isclose(p_true, 0.5, rtol=0, atol=0.1)
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_int_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = int(max_value) - int(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect,
image_cp[10:-10, 10:-10])
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
min_value = 0.0
center_value = 0.5
max_value = 1.0
dynamic_range = np.float128(max_value) - np.float128(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert np.any(np.isclose(image, min_value,
rtol=0, atol=1e-4))
assert np.any(np.isclose(image, max_value,
rtol=0, atol=1e-4))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
assert not np.allclose(rect, image_cp[10:-10, 10:-10],
rtol=0, atol=1e-4)
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.allclose(image_aug[..., 0],
image_aug[..., c],
rtol=0, atol=1e-4)
class Test_fill_rectangle_constant_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_but_per_channel_is_false(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 25)
def test_iterable_cval_with_per_channel_is_true_channel_mismatch(self):
image = np.mod(np.arange(100*100*5), 256).astype(np.uint8).reshape(
(100, 100, 5))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
assert np.all(image_aug[20:70, 10:60, 3] == 21)
assert np.all(image_aug[20:70, 10:60, 4] == 17)
def test_single_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 17)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
def test_no_channels_single_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=per_channel, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_no_channels_iterable_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100*nb_channels), 256
).astype(np.uint8).reshape((100, 100, nb_channels))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
if per_channel:
for c in np.arange(nb_channels):
val = 17 if c % 2 == 0 else 21
assert np.all(image_aug[20:70, 10:60, c] == val)
else:
assert np.all(image_aug[20:70, 10:60, :] == 17)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[0, 1], per_channel=per_channel,
random_state=None)
rect = image_aug[10:-10, 10:-10]
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0] == 0)
assert np.all(image_aug[10:-10, 10:-10, 1] == 1)
assert np.all(image_aug[10:-10, 10:-10, 2] == 0)
else:
assert np.all(image_aug[20:70, 10:60] == 0)
def test_other_dtypes_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0]
== min_value)
assert np.all(image_aug[10:-10, 10:-10, 1]
== 10)
assert np.all(image_aug[10:-10, 10:-10, 2]
== max_value)
else:
assert np.all(image_aug[-10:-10, 10:-10] == min_value)
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
# Use this here instead of any(isclose(...)) because
# the latter one leads to overflow warnings.
assert image.flat[0] <= np.float128(min_value) + 1.0
assert image.flat[4] >= np.float128(max_value) - 1.0
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert image_aug.dtype.name == dtype
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
if per_channel:
assert np.allclose(image_aug[10:-10, 10:-10, 0],
np.float128(min_value),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 1],
np.float128(10),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 2],
np.float128(max_value),
rtol=0, atol=1e-4)
else:
assert np.allclose(image_aug[-10:-10, 10:-10],
np.float128(min_value),
rtol=0, atol=1e-4)
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestCutout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Cutout()
assert aug.nb_iterations.value == 1
assert isinstance(aug.position[0], iap.Uniform)
assert isinstance(aug.position[1], iap.Uniform)
assert np.isclose(aug.size.value, 0.2)
assert aug.squared.value == 1
assert aug.fill_mode.value == "constant"
assert aug.cval.value == 128
assert aug.fill_per_channel.value == 0
def test___init___custom(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
assert aug.nb_iterations.value == 1
assert np.isclose(aug.position[0].value, 0.5)
assert np.isclose(aug.position[1].value, 0.5)
assert np.isclose(aug.size.value, 0.1)
assert np.isclose(aug.squared.p.value, 0.6)
assert aug.fill_mode.a == ["gaussian", "constant"]
assert np.isclose(aug.cval.a.value, 0)
assert np.isclose(aug.cval.b.value, 255)
assert np.isclose(aug.fill_per_channel.p.value, 0.5)
def test___init___fill_mode_is_stochastic_param(self):
param = iap.Deterministic("constant")
aug = iaa.Cutout(fill_mode=param)
assert aug.fill_mode is param
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_false(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=False,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.2*30))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.2*30))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_true(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=True,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.1*10))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.1*10))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
def test_simple_image(self):
aug = iaa.Cutout(nb_iterations=2,
position=(
iap.DeterministicList([0.2, 0.8]),
iap.DeterministicList([0.2, 0.8])
),
size=0.2,
fill_mode="constant",
cval=iap.DeterministicList([0, 0, 0, 1, 1, 1]))
image = np.full((100, 100, 3), 255, dtype=np.uint8)
for _ in np.arange(3):
images_aug = aug(images=[image, image])
for image_aug in images_aug:
values = np.unique(image_aug)
assert len(values) == 3
assert 0 in values
assert 1 in values
assert 255 in values
def test_batch_contains_only_non_image_data(self):
aug = iaa.Cutout()
segmap_arr = np.ones((3, 3, 1), dtype=np.int32)
segmap = ia.SegmentationMapsOnImage(segmap_arr, shape=(3, 3, 3))
segmap_aug = aug.augment_segmentation_maps(segmap)
assert np.array_equal(segmap.get_arr(), segmap_aug.get_arr())
def test_sampling_when_position_is_stochastic_parameter(self):
# sampling of position works slightly differently when it is a single
# parameter instead of tuple (paramX, paramY), so we have an extra
# test for that situation here
param = iap.DeterministicList([0.5, 0.6])
aug = iaa.Cutout(position=param)
samples = aug._draw_samples([
np.zeros((3, 3, 3), dtype=np.uint8),
np.zeros((3, 3, 3), dtype=np.uint8)
], iarandom.RNG(0))
assert np.allclose(samples.pos_x, [0.5, 0.5])
assert np.allclose(samples.pos_y, [0.6, 0.6])
def test_by_comparison_to_official_implementation(self):
image = np.ones((10, 8, 2), dtype=np.uint8)
aug = iaa.Cutout(1, position="uniform", size=0.2, squared=True,
cval=0)
aug_official = _CutoutOfficial(n_holes=1, length=int(10*0.2))
dropped = np.zeros((10, 8, 2), dtype=np.int32)
dropped_official = np.copy(dropped)
height = np.zeros((10, 8, 2), dtype=np.int32)
width = np.copy(height)
height_official = np.copy(height)
width_official = np.copy(width)
nb_iterations = 3 * 1000
images_aug = aug(images=[image] * nb_iterations)
for image_aug in images_aug:
image_aug_off = aug_official(image)
mask = (image_aug == 0)
mask_off = (image_aug_off == 0)
dropped += mask
dropped_official += mask_off
ydrop = np.max(mask, axis=(2, 1))
xdrop = np.max(mask, axis=(2, 0))
wx = np.where(xdrop)
wy = np.where(ydrop)
x1 = wx[0][0]
x2 = wx[0][-1]
y1 = wy[0][0]
y2 = wy[0][-1]
ydrop_off = np.max(mask_off, axis=(2, 1))
xdrop_off = np.max(mask_off, axis=(2, 0))
wx_off = np.where(xdrop_off)
wy_off = np.where(ydrop_off)
x1_off = wx_off[0][0]
x2_off = wx_off[0][-1]
y1_off = wy_off[0][0]
y2_off = wy_off[0][-1]
height += (
np.full(height.shape, 1 + (y2 - y1), dtype=np.int32)
* mask)
width += (
np.full(width.shape, 1 + (x2 - x1), dtype=np.int32)
* mask)
height_official += (
np.full(height_official.shape, 1 + (y2_off - y1_off),
dtype=np.int32)
* mask_off)
width_official += (
np.full(width_official.shape, 1 + (x2_off - x1_off),
dtype=np.int32)
* mask_off)
dropped_prob = dropped / nb_iterations
dropped_prob_off = dropped_official / nb_iterations
height_avg = height / (dropped + 1e-4)
height_avg_off = height_official / (dropped_official + 1e-4)
width_avg = width / (dropped + 1e-4)
width_avg_off = width_official / (dropped_official + 1e-4)
prob_max_diff = np.max(np.abs(dropped_prob - dropped_prob_off))
height_avg_max_diff = np.max(np.abs(height_avg - height_avg_off))
width_avg_max_diff = np.max(np.abs(width_avg - width_avg_off))
assert prob_max_diff < 0.04
assert height_avg_max_diff < 0.3
assert width_avg_max_diff < 0.3
def test_determinism(self):
aug = iaa.Cutout(nb_iterations=(1, 3),
size=(0.1, 0.2),
fill_mode=["gaussian", "constant"],
cval=(0, 255))
image = np.mod(
np.arange(100*100*3), 256
).reshape((100, 100, 3)).astype(np.uint8)
sums = []
for _ in np.arange(10):
aug_det = aug.to_deterministic()
image_aug1 = aug_det(image=image)
image_aug2 = aug_det(image=image)
assert np.array_equal(image_aug1, image_aug2)
sums.append(np.sum(image_aug1))
assert len(np.unique(sums)) > 1
def test_get_parameters(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
params = aug.get_parameters()
assert params[0] is aug.nb_iterations
assert params[1] is aug.position
assert params[2] is aug.size
assert params[3] is aug.squared
assert params[4] is aug.fill_mode
assert params[5] is aug.cval
assert params[6] is aug.fill_per_channel
def test_pickleable(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
runtest_pickleable_uint8_img(aug)
# this is mostly copy-pasted cutout code from
# https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
# we use this to compare our implementation against
# we changed some pytorch to numpy stuff
class _CutoutOfficial(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of
it.
"""
# h = img.size(1)
# w = img.size(2)
h = img.shape[0]
w = img.shape[1]
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
# note that in the paper they normalize to 0-mean,
# i.e. 0 here is actually not black but grayish pixels
mask[y1: y2, x1: x2] = 0
# mask = torch.from_numpy(mask)
# mask = mask.expand_as(img)
if img.ndim != 2:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, img.shape[-1]))
img = img * mask
return img
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarsePepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarsePepper(p=0.5, size_px=(4, 15),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class Test_invert(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] is None
assert args[1]["max_value"] is None
assert args[1]["threshold"] is None
assert args[1]["invert_above_threshold"] is True
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr, min_value=1, max_value=10, threshold=5,
invert_above_threshold=False)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] == 1
assert args[1]["max_value"] == 10
assert args[1]["threshold"] == 5
assert args[1]["invert_above_threshold"] is False
def test_uint8(self):
values = np.array([0, 20, 45, 60, 128, 255], dtype=np.uint8)
expected = np.array([
255,
255-20,
255-45,
255-60,
255-128,
255-255
], dtype=np.uint8)
observed = iaa.invert(values)
assert np.array_equal(observed, expected)
assert observed is not values
# most parts of this function are tested via Invert
class Test_invert_(unittest.TestCase):
def test_arr_is_noncontiguous_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_flipped = np.fliplr(np.copy(zeros + 255))
observed = iaa.invert_(max_vr_flipped)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_arr_is_view_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_view = np.copy(zeros + 255)[:, :, [0, 2]]
observed = iaa.invert_(max_vr_view)
expected = zeros[:, :, [0, 2]]
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values))
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_0_inv_above(self):
threshold = 0
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_255_inv_above(self):
threshold = 255
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_256_inv_above(self):
threshold = 256
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above_with_min_max(self):
threshold = 50
# uint64 does not support custom min/max, hence removed it here
dtypes = ["uint8", "uint16", "uint32"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0, # not clipped to 10 as only >thresh affected
20,
45,
100 - 50,
100 - 90,
100 - 90
], dtype=dt)
observed = iaa.invert_(np.copy(values),
min_value=10,
max_value=100,
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
-45,
-20,
center_value,
20,
45,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
(-1) * (-45) - 1,
(-1) * (-20) - 1,
(-1) * center_value - 1,
(-1) * 20 - 1,
(-1) * 45 - 1,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_float_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
-45.5,
-20.5,
center_value,
20.5,
45.5,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
def test_float_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
(-1) * (-45.5),
(-1) * (-20.5),
(-1) * center_value,
(-1) * 20.5,
(-1) * 45.5,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
class Test_solarize(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 5
assert observed == "foo"
def test_uint8(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
observed = iaa.solarize(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
class Test_solarize_(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 5
assert observed == "foo"
class TestInvert(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_one(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0).augment_image(zeros + 255)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_p_is_zero(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=0.0).augment_image(zeros + 255)
expected = zeros + 255
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200).augment_image(zeros + 200)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 200)
expected = zeros + 100
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 100)
expected = zeros + 200
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set_with_float_image(self):
# with min/max and float inputs
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
zeros_f32 = zeros.astype(np.float32)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 200)
expected = zeros_f32 + 100
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 100)
expected = zeros_f32 + 200
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
def test_p_is_80_percent(self):
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=0.8)
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=iap.Binomial(0.8))
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
def test_per_channel(self):
aug = iaa.Invert(p=0.5, per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 255
observed = aug.augment_image(img)
assert len(np.unique(observed)) == 2
# TODO split into two tests
def test_p_is_stochastic_parameter_per_channel_is_probability(self):
nb_iterations = 1000
aug = iaa.Invert(p=iap.Binomial(0.8), per_channel=0.7)
img = np.zeros((1, 1, 20), dtype=np.uint8) + 255
seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) == 2:
seen[1] += 1
else:
assert False
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
def test_threshold(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
aug = iaa.Invert(p=1.0, threshold=128, invert_above_threshold=True)
observed = aug.augment_image(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_threshold_inv_below(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
aug = iaa.Invert(p=1.0, threshold=128, invert_above_threshold=False)
observed = aug.augment_image(arr)
expected = np.array([255-0, 255-10, 255-50, 150, 200, 255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=zeros.shape)]
aug = iaa.Invert(p=1.0)
aug_det = iaa.Invert(p=1.0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Invert(p="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Invert(p=0.5, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Invert(1.0)
image_aug = aug(image=image)
assert np.all(image_aug == 255)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Invert(1.0)
image_aug = aug(image=image)
assert np.all(image_aug == 255)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Invert(p=0.5, per_channel=False, min_value=10, max_value=20)
params = aug.get_parameters()
assert params[0] is aug.p
assert params[1] is aug.per_channel
assert params[2] == 10
assert params[3] == 20
assert params[4] is aug.threshold
assert params[5] is aug.invert_above_threshold
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Invert(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_p_is_zero(self):
# with p=0.0
aug = iaa.Invert(p=0.0)
dtypes = [bool,
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64, np.float128]
for dtype in dtypes:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
kind = np.dtype(dtype).kind
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype ==
|
np.dtype(dtype)
|
numpy.dtype
|
# -*- coding: utf-8 -*-
"""
Function to save the results of multi-panel optimisations
- save_result_BELLAs
saves the results for the design of a multipanel structure
- save_result_BELLA_one_pdl
saves the results at one iteration of the design of a multipanel
structure
"""
import sys
import numpy as np
import pandas as pd
sys.path.append(r'C:\BELLA')
from src.divers.excel import append_df_to_excel
from src.buckling.buckling import buckling_factor
from src.guidelines.ipo_oopo import calc_penalty_ipo_oopo_mp
from src.guidelines.contiguity import calc_penalty_contig_mp
from src.guidelines.disorientation import calc_number_violations_diso_mp
from src.guidelines.ten_percent_rule import calc_penalty_10_ss
from src.CLA.lampam_functions import calc_lampam
from src.CLA.ABD import D_from_lampam
def save_result_BELLAs(filename, multipanel, constraints, parameters,
obj_func_param, pdl, output, mat=None, only_best=False):
"""
saves the results for the design of a multipanel structure
"""
if only_best:
table_res = pd.DataFrame()
if hasattr(output, 'time'):
table_res.loc[0, 'time (s)'] = output.time
table_res = save_result_BELLA_one_pdl(
table_res,
multipanel,
constraints,
parameters,
obj_func_param,
output,
None,
0,
mat)
append_df_to_excel(
filename, table_res, 'Best Result', index=False, header=True)
return 0
for ind in range(parameters.n_ini_ply_drops):
table_res = pd.DataFrame()
table_res = save_result_BELLA_one_pdl(
table_res,
multipanel,
constraints,
parameters,
obj_func_param,
output,
pdl[ind],
ind,
mat)
append_df_to_excel(
filename, table_res, 'Results', index=False, header=True)
# to save best result
table_res = pd.DataFrame()
table_res.loc[0, 'time (s)'] = output.time
table_res = save_result_BELLA_one_pdl(
table_res,
multipanel,
constraints,
parameters,
obj_func_param,
output,
pdl[output.ind_mini],
0,
mat)
append_df_to_excel(
filename, table_res, 'Best Result', index=False, header=True)
return 0
def save_result_BELLA_one_pdl(
table_res, multipanel, constraints, parameters, obj_func_param,
output, pdl=None, ind=0, mat=None):
"""
saves the results at one iteration of the design of a multipanel
structure
"""
if parameters is None:
if multipanel.panels[0].N_x == 0 and multipanel.panels[0].N_y == 0:
save_buckling = False
else:
save_buckling = True
else:
save_buckling = parameters.save_buckling
if hasattr(output, 'obj_constraints'):
table_res.loc[ind, 'obj_constraints'] \
= output.obj_constraints_tab[ind]
if hasattr(output, 'n_obj_func_calls_tab'):
table_res.loc[ind, 'n_obj_func_calls'] \
= output.n_obj_func_calls_tab[ind]
# table_res.loc[ind, 'n_designs_last_level'] \
# = output.n_designs_last_level_tab[ind]
# table_res.loc[ind, 'n_designs_after_ss_ref_repair'] \
# = output.n_designs_after_ss_ref_repair_tab[ind]
# table_res.loc[ind, 'n_designs_after_thick_to_thin'] \
# = output.n_designs_after_thick_to_thin_tab[ind]
# table_res.loc[ind, 'n_designs_after_thin_to_thick'] \
# = output.n_designs_after_thin_to_thick_tab[ind]
# table_res.loc[ind, 'n_designs_repaired_unique'] \
# = output.n_designs_repaired_unique_tab[ind]
table_res.loc[ind, 'penalty_spacing'] = output.penalty_spacing_tab[ind]
ss = output.ss
norm_diso_contig = np.array(
[panel.n_plies for panel in multipanel.panels])
n_diso = calc_number_violations_diso_mp(ss, constraints)
penalty_diso = np.zeros((multipanel.n_panels,))
if constraints.diso and n_diso.any():
penalty_diso = n_diso / norm_diso_contig
else:
penalty_diso = np.zeros((multipanel.n_panels,))
n_contig = calc_penalty_contig_mp(ss, constraints)
penalty_contig = np.zeros((multipanel.n_panels,))
if constraints.contig and n_contig.any():
penalty_contig = n_contig / norm_diso_contig
else:
penalty_contig =
|
np.zeros((multipanel.n_panels,))
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 11:06:33 2018
@author: jeremiasknoblauch
Description: Get plots for EU1880 data
"""
import pickle
import numpy as np
from Evaluation_tool import EvaluationTool
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import csv
import datetime
import matplotlib
import math
#ensure that we have type 1 fonts (for ICML publishing guiedlines)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
"""STEP 1: Get file names"""
data_file = ("///Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Data//EuropeanTemperatureData//1880//1880_temperatures.csv")
nbhs_file = ("//Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Data//EuropeanTemperatureData//1880//1880_pw_distances.csv")
results_file = ("///Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Paper//EUTemperature1880//" +
"results_EUTemp1880_medium_range_models.txt")
storage_folder = ("//Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Paper//EUTemperature1880//")
"""STEP 2: Read in the data"""
"""Get p.w. distances"""
""" Read in (as strings)"""
pw_distances = []
station_IDs = []
count = 0
with open(nbhs_file) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if count > 0:
pw_distances += row
else:
station_IDs += row
count += 1
num_stations = int(np.sqrt(len(pw_distances)))
"""Convert into floats"""
pwd = []
stat_IDs = []
for entry in pw_distances:
pwd += [float(entry)]
count2 = 0
for entry in station_IDs:
stat_IDs += [float(entry)]
pw_distances =
|
np.array(pwd, dtype=float)
|
numpy.array
|
import numpy as np
from numba import jit
import cv2
from stytra.tracking import ParametrizedImageproc
class TailTrackingMethod(ParametrizedImageproc):
"""General tail tracking method."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# TODO maybe getting default values here:
self.add_params(
n_segments=dict(value=10, type="int", limits=(2, 50)),
tail_start=dict(value=(440, 225), visible=False),
tail_length=dict(value=(-250, 30), visible=False),
)
self.accumulator_headers = ["tail_sum"] + [
"theta_{:02}".format(i) for i in range(self.params["n_segments"])
]
self.monitored_headers = ["tail_sum"]
self.data_log_name = "behaviour_tail_log"
class CentroidTrackingMethod(TailTrackingMethod):
"""Center-of-mass method to find consecutive segments."""
name = "tracking_tail_params"
def __init__(self):
super().__init__()
self.add_params(
window_size=dict(value=30, suffix=" pxs", type="float", limits=(2, 100))
)
@classmethod
def detect(
cls,
im,
tail_start=(0, 0),
tail_length=(1, 1),
n_segments=12,
window_size=7,
image_scale=1,
**extraparams
):
"""Finds the tail for an embedded fish, given the starting point and
the direction of the tail. Alternative to the sequential circular arches.
Parameters
----------
im :
image to process
tail_start :
starting point (x, y) (Default value = (0)
tail_length :
tail length (x, y) (Default value = (1)
n_segments :
number of desired segments (Default value = 12)
window_size :
window size in pixel for center-of-mass calculation (Default value = 7)
color_invert :
True for inverting luminosity of the image (Default value = False)
filter_size :
Size of the box filter to low-pass filter the image (Default value = 0)
image_scale :
the amount of downscaling of the image (Default value = 0.5)
0) :
1) :
Returns
-------
type
list of cumulative sum + list of angles
"""
start_y, start_x = tail_start
tail_length_y, tail_length_x = tail_length
n_segments += 1
# Calculate tail length:
length_tail = np.sqrt(tail_length_x ** 2 + tail_length_y ** 2) * image_scale
# Segment length from tail length and n of segments:
seg_length = length_tail / n_segments
# Initial displacements in x and y:
disp_x = tail_length_x * image_scale / n_segments
disp_y = tail_length_y * image_scale / n_segments
angles = []
start_x *= image_scale
start_y *= image_scale
halfwin = window_size / 2
for i in range(1, n_segments):
# Use next segment function for find next point
# with center-of-mass displacement:
start_x, start_y, disp_x, disp_y, acc = _next_segment(
im, start_x, start_y, disp_x, disp_y, halfwin, seg_length
)
abs_angle = np.arctan2(disp_x, disp_y)
angles.append(abs_angle)
return [reduce_to_pi(angles[-1] + angles[-2] - angles[0] - angles[1])] + angles[
:
]
class AnglesTrackingMethod(TailTrackingMethod):
"""Angular sweep method to find consecutive segments."""
name = "tracking_tail_params"
def __init__(self):
super().__init__()
self.add_params(dark_tail=False)
@classmethod
def detect(
cls,
im,
tail_start=(0, 0),
n_segments=7,
tail_length=(1, 1),
dark_tail=False,
image_scale=1,
**extraparams
):
"""Tail tracing based on min (or max) detection on arches. Wraps
_tail_trace_core_ls. Speed testing: 20 us for a 514x640 image without
smoothing, 300 us with smoothing.
Parameters
----------
img :
input image
tail_start :
tail starting point (x, y) (Default value = (0)
tail_length :
tail length (Default value = (1)
n_segments :
number of segments (Default value = 7)
filter_size :
Box for smoothing the image (Default value = 0)
dark_tail :
True for inverting image colors (Default value = False)
im :
0) :
1) :
image_scale :
(Default value = 1)
Returns
-------
"""
start_y, start_x = tail_start
tail_length_y, tail_length_x = tail_length
# Calculate tail length:
length_tail = np.sqrt(tail_length_x ** 2 + tail_length_y ** 2) * image_scale
# Initial displacements in x and y:
disp_x = tail_length_x * image_scale / n_segments
disp_y = tail_length_y * image_scale / n_segments
start_x *= image_scale
start_y *= image_scale
# Use jitted function for the actual calculation:
angle_list = _tail_trace_core_ls(
im, start_x, start_y, disp_x, disp_y, n_segments, length_tail, dark_tail
)
return angle_list
@jit(nopython=True)
def reduce_to_pi(angle):
"""
Parameters
----------
angle :
Returns
-------
"""
return np.mod(angle + np.pi, 2 * np.pi) - np.pi
@jit(nopython=True)
def find_direction(start, image, seglen):
"""
Parameters
----------
start :
image :
seglen :
Returns
-------
"""
n_angles = 20
angles = np.arange(n_angles) * np.pi * 2 / n_angles
detect_angles = angles
weighted_vector = np.zeros(2)
for i in range(detect_angles.shape[0]):
coord = (
int(start[0] + seglen * np.cos(detect_angles[i])),
int(start[1] + seglen * np.sin(detect_angles[i])),
)
if (
(coord[0] > 0)
& (coord[0] < image.shape[1])
& (coord[1] > 0)
& (coord[1] < image.shape[0])
):
brg = image[coord[1], coord[0]]
weighted_vector += brg * np.array(
[np.cos(detect_angles[i]), np.sin(detect_angles[i])]
)
return np.arctan2(weighted_vector[1], weighted_vector[0])
@jit(nopython=True, cache=True)
def angle(dx1, dy1, dx2, dy2):
"""Calculate angle between two segments d1 and d2
Parameters
----------
dx1 :
x length for first segment
dy1 :
y length for first segment
dx2 :
param dy2: -
dy2 :
Returns
-------
type
angle between -pi and +pi
"""
alph1 = np.arctan2(dy1, dx1)
alph2 = np.arctan2(dy2, dx2)
diff = alph2 - alph1
if diff >= np.pi:
diff -= 2 * np.pi
if diff <= -np.pi:
diff += 2 * np.pi
return diff
def bp_filter_img(img, small_square=3, large_square=50):
"""Bandpass filter for images.
Parameters
----------
img :
input image
small_square :
small square for low-pass smoothing (Default value = 3)
large_square :
big square for high pass smoothing (subtraction of background shades) (Default value = 50)
Returns
-------
type
filtered image
"""
img_filt_lower = cv2.boxFilter(img, -1, (large_square, large_square))
img_filt_low = cv2.boxFilter(img, -1, (small_square, small_square))
return cv2.absdiff(img_filt_low, img_filt_lower)
@jit(nopython=True)
def _next_segment(fc, xm, ym, dx, dy, halfwin, next_point_dist):
"""Find the endpoint of the next tail segment
by calculating the moments in a look-ahead area
Parameters
----------
fc :
image to find tail
xm :
starting point x
ym :
starting point y
dx :
initial displacement x
dy :
initial displacement y
wind_size :
size of the window to estimate next tail point
next_point_dist :
distance to the next tail point
halfwin :
Returns
-------
"""
# Generate square window for center of mass
halfwin2 = halfwin ** 2
y_max, x_max = fc.shape
xs = min(max(int(round(xm + dx - halfwin)), 0), x_max)
xe = min(max(int(round(xm + dx + halfwin)), 0), x_max)
ys = min(max(int(round(ym + dy - halfwin)), 0), y_max)
ye = min(max(int(round(ym + dy + halfwin)), 0), y_max)
# at the edge returns invalid data
if xs == xe and ys == ye:
return -1, -1, 0, 0, 0
# accumulators
acc = 0.0
acc_x = 0.0
acc_y = 0.0
for x in range(xs, xe):
for y in range(ys, ye):
lx = (xs + halfwin - x) ** 2
ly = (ys + halfwin - y) ** 2
if lx + ly <= halfwin2:
acc_x += x * fc[y, x]
acc_y += y * fc[y, x]
acc += fc[y, x]
if acc == 0:
return -1, -1, 0, 0, 0
# center of mass relative to the starting points
mn_y = acc_y / acc - ym
mn_x = acc_x / acc - xm
# normalise to segment length
a = np.sqrt(mn_y ** 2 + mn_x ** 2) / next_point_dist
# check center of mass validity
if a == 0:
return -1, -1, 0, 0, 0
# Use normalization factor
dx = mn_x / a
dy = mn_y / a
return xm + dx, ym + dy, dx, dy, acc
@jit(nopython=True)
def _tail_trace_core_ls(
img, start_x, start_y, disp_x, disp_y, num_points, tail_length, color_invert
):
"""Tail tracing based on min (or max) detection on arches. Wrapped by
trace_tail_angular_sweep.
Parameters
----------
img :
start_x :
start_y :
disp_x :
disp_y :
num_points :
tail_length :
color_invert :
Returns
-------
"""
# Define starting angle based on tail dimensions:
start_angle = np.arctan2(disp_x, disp_y)
# Initialise first angle arch, tail sum and angle list:
pi2 = np.pi / 2
lin = np.linspace(-pi2 + start_angle, pi2 + start_angle, 25)
tail_sum = 0.
angles =
|
np.zeros(num_points + 1)
|
numpy.zeros
|
"""!@package func_utils
Multiple nonconvex loss functions.
"""
import numpy as np
import scipy
import random
import math
import time
## constant indicating total available memory when calculating full gradient
total_mem_full = 3.0e10
## constant indicating total available memory when calculating batch gradient
total_mem_batch = 2.0e10
def prox_l1_norm( w, lamb=1 ):
"""! Compute the proximal operator of the \f$\ell_1\f$ - norm
\f$ prox_{\lambda \|.\|_1} = {arg\min_x}\left\{\|.\|_1^2 + \frac{1}{2\lambda}\|x - w\|^2\right\} \f$
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : output vector
"""
return np.sign( w ) * np.maximum( np.abs( w ) - lamb, 0 )
def prox_l2_norm( w, lamb=1 ):
"""! Compute the proximal operator of the \f$\ell_2\f$ - norm
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : output vector
"""
norm_w = np.linalg.norm(w, ord=2)
return np.maximum(1 - lamb/norm_w, 0) * w
def prox_linf_norm( w, lamb=1 ):
"""! Compute the proximal operator of the \f$\ell_2\f$ - norm
\f$ prox_{\lambda \|.\|_2} = {arg\min_x}\left\{\|.\|_1^2 + \frac{1}{2\lambda}\|x - w\|^2\right\} \f$
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : output vector
"""
return w - lamb * proj_l1_ball(w / lamb)
def proj_l2_ball( w, lamb=1 ):
"""! Compute the projection onto \f$\ell_2\f$-ball
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : output vector
"""
norm_w = np.linalg.norm(w, ord=2)
if norm_w > lamb:
return lamb * w / norm_w
else:
return w
def proj_l1_ball( w, lamb=1 ):
"""! Compute the projection onto \f$\ell_1\f$-ball
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : output vector
"""
norm_w = np.linalg.norm(w, ord=1)
if norm_w <= 1:
return w
else:
# find lamb by bisection
sort_w = np.sort(w)[::-1]
tmp_sum = 0
index = -1
for i in range(len(sort_w)):
tmp_sum += sort_w[i]
if sort_w[i] <= (1.0/(i+1)) * (tmp_sum - 1):
break
else:
index += 1
index = np.max(index,0)
lamb = np.max((1.0/(index+1))*(tmp_sum - 1),0)
return prox_l1_norm(w,lamb)
def proj_linf_ball( w, lamb=1 ):
"""! Compute the projection onto \f$\ell_{\infty}\f$-ball
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : perform projection onto $\ell_{\infty}$-ball
"""
norm_w = np.linalg.norm(w, ord=np.inf)
if norm_w > lamb:
return lamb * w / norm_w
else:
return w
def ind_l2_ball( w, lamb=1 ):
"""! Compute the indication function of the \f$\ell_{2}\f$-ball
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : whether the input is in $\ell_{2}$-ball
"""
# norm_w = np.linalg.norm(w, ord=2)
# if norm_w > lamb:
# return np.inf
# else:
# return 0.0
return 0.0
def ind_l1_ball( w, lamb=1 ):
"""! Compute the indication function of the \f$\ell_1\f$-ball
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : whether the input is in $\ell_1$-ball
"""
# norm_w = np.linalg.norm(w, ord=1)
# if norm_w > lamb:
# return np.inf
# else:
# return 0.0
return 0.0
def ind_linf_ball( w, lamb=1 ):
"""! Compute the indication function of the \f$\ell_{\infty}\f$-ball
Parameters
----------
@param w : input vector
@param lamb : penalty paramemeter
Returns
----------
@retval : whether the input is in $\ell_{\infty}$-ball
"""
# norm_w = np.linalg.norm(w, ord=np.inf)
# if norm_w > lamb:
# return np.inf
# else:
# return 0.0
return 0.0
def func_val_l1_norm( w, lamb=1 ):
"""! Compute \f$\ell_1\f$ - norm of a vector
Parameters
----------
@param w : input vector
Returns
----------
@retval : \f$ \|w\|_1 \f$
"""
return lamb * np.linalg.norm( w,ord = 1 )
def func_val_l2_norm( w, lamb=1 ):
"""! Compute \f$\ell_1\f$ - norm of a vector
Parameters
----------
@param w : input vector
Returns
----------
@retval : \f$ \|w\|_1 \f$
"""
return lamb * np.linalg.norm( w,ord = 2 )
def func_val_linf_norm( w, lamb=1 ):
"""! Compute \f$\ell_1\f$ - norm of a vector
Parameters
----------
@param w : input vector
Returns
----------
@retval : \f$ \|w\|_1 \f$
"""
return lamb * np.linalg.norm( w, ord=np.inf )
def func_val_huber( w, lamb=1.0, dt=1.0 ):
"""! Compute huber loss of a vector
Parameters
----------
@param w : input vector
Returns
----------
@retval : huber loss
"""
abs_w = np.abs(w)
return np.sum((abs_w <= dt)*0.5*w*w + (abs_w > dt) * dt*(abs_w - 0.5*dt))
def grad_eval_huber( w, lamb=1.0, dt=1.0 ):
"""! Compute gradient of huber loss of a vector
Parameters
----------
@param w : input vector
Returns
----------
@retval : grad huber loss
"""
abs_w = np.abs(w)
return (abs_w <= dt)*w + (abs_w > dt) * dt*np.sign(w)
def prox_huber(w, lamb=1.0, dt=1.0):
"""! Compute proximal operator of huber loss
Parameters
----------
@param w : input vector
@param lamd : penalty param
Returns
----------
@retval : proximal operator of huber loss
"""
abs_w = np.abs(w)
return (abs_w <= dt)*(1.0/(1.0 + lamb))*w + (abs_w > dt) * prox_l1_norm(w,dt*lamb)
def func_val_huber_conj( w, lamb=1.0, dt=1.0 ):
"""! Compute conjugate of huber loss
Parameters
----------
@param w : input vector
@param lamd : penalty param
@param dt : delta
Returns
----------
@retval : conjugate of huber function
"""
abs_w = np.abs(w)
return np.sum((abs_w <= dt)*0.5*w*w + (abs_w > dt)*0.5*dt*dt )
###################################################################
def func_val_bin_class_loss_1( n, d, b, X, Y, bias, w, lamb = None, XYw_bias = None, nnzX = None, index=None):
"""! Compute the objective value of loss function 1
\f$\ell_1( Y( Xw + b )) := 1 - \tanh( \omega Y( Xw + b )) \f$
for a given \f$ \omega > 0\f$. Here \f$ \omega = 1\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : \f$\ell_1( Y( Xw + b ))\f$
"""
omega = 1.0
if b == 1:
# get a random sample
if index is None:
index = np.random.randint( 0, n )
Xi = X[index,:]
expt = np.exp( 2.0 * omega * Y[i] * ( Xi.dot( w ) + bias[i] ))
return ( 1.0/float( b )) * np.sum( 2.0 / ( expt + 1.0 ))
# mini-batch
elif b < n:
# get a random batch of size b
if index is None:
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w ) + batch_bias ))
batch_loss += np.sum( 2.0 / ( expt + 1.0 ))
return batch_loss / float( b )
# full
else:
if XYw_bias is not None:
expt = np.exp( 2.0 * omega * XYw_bias )
return ( 1.0/float( n )) * np.sum( 2.0 / ( expt + 1.0 ))
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w ) + batch_bias ))
full_loss += np.sum( 2.0 / ( expt + 1.0 ))
return full_loss / float( n )
def func_diff_eval_bin_class_loss_1( n, d, b, X, Y, bias, w1, w2, lamb = None, XYw_bias = None, nnzX = None, index=None ):
"""! Compute the objective value of loss function 1,
\f$\ell_1( Y( Xw2 + b )) - \f$\ell_1( Y( Xw1 + b )) \f$
for a given \f$ \omega > 0\f$. Here \f$ \omega = 1\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : 1st input vector
@param w1 : 2nd input vector
@param lamb : penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : \f$\ell_1( Y( Xw + b ))\f$
"""
omega = 1.0
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt1 = np.exp( 2.0 * omega * Y[i] * ( Xi.dot( w1 ) + bias[i] ))
expt2 = np.exp( 2.0 * omega * Y[i] * ( Xi.dot( w2 ) + bias[i] ))
return ( 1.0/float( b )) * np.sum( 2.0 / ( expt2 + 1.0 ) - 2.0 / ( expt1 + 1.0 ))
# mini-batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss_diff = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt1 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
batch_loss_diff += np.sum( 2.0 / ( expt2 + 1.0 ) - 2.0 / ( expt1 + 1.0 ))
return batch_loss_diff / float( b )
# full
else:
if XYw_bias is not None:
expt = np.exp( 2.0 * omega * XYw_bias )
return ( 1.0/float( b )) * np.sum( 2.0 / ( expt + 1.0 ))
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss_diff = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt1 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
full_loss_diff += np.sum( 2.0 / ( expt2 + 1.0 )- 2.0 / ( expt1 + 1.0 ) )
return full_loss_diff / float( n )
def grad_eval_bin_class_loss_1( n, d, b, X, Y, bias, w, lamb = None, nnzX = None, index=None ):
"""! Compute the ( full/stochastic ) gradient of loss function 1.
where \f$\ell_1( Y( Xw + b )) := 1 - \tanh( \omega Y( Xw + b )) \f$
for a given \f$ \omega > 0\f$. Here \f$ \omega = 0\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : computed full/stochastic gradient
@retval XYw_bias: The precomputed \f$ Y( Xw + bias )\f$
"""
if nnzX is None:
nnzX = d
omega = 1.0
# single sample
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt = np.exp( 2.0 * omega * Y[i] * ( Xi.dot( w ) + bias[i] ))
return - 4.0 * omega * ( (expt/( expt + 1.0 ))/( expt + 1.0 )) * Y[i] * Xi
# mini-batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_grad = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w ) + batch_bias ))
batch_grad -= 4.0 * omega * batch_X.transpose().dot( batch_Y * ( (expt/( expt + 1.0 ))/( expt + 1.0 )) )
return batch_grad / float( b )
# full
else:
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
full_grad = np.zeros( d )
XYw_bias = np.zeros( n )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
batch_XYw_bias = batch_Y * ( batch_X.dot( w ) + batch_bias )
XYw_bias[startIdx:endIdx] = batch_XYw_bias
expt = np.exp( 2.0 * omega * batch_XYw_bias )
full_grad -= 4.0 * omega * batch_X.transpose().dot( batch_Y * ( (expt/( expt + 1.0 ))/( expt + 1.0 )) )
return full_grad / float( n ), XYw_bias
def grad_diff_eval_bin_class_loss_1( n, d, b, X, Y, bias, w1, w2, lamb = None, nnzX = None, index=None ):
"""! Compute the ( full/stochastic ) gradient difference of loss function 1
\f$\displaystyle\frac{1}{b}\left( \sum_{i \in \mathcal{B}_t}( \nabla f_i( w_2 ) - \nabla f_i( w_1 )) \right ) \f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : input vector
@param w2 : input vector
@param nnzX : average number of non - zero elements for each sample
Returns
----------
@retval : computed full/stochastic gradient
"""
if nnzX is None:
nnzX = d
omega = 1.0
# single sample
if b == 1:
# get a random sample
i = np.random.randint( 0,n )
Xi = X[i, :]
expt1 = np.exp( 2.0 * omega * Y[i] * ( Xi.dot( w1 ) + bias[i] ))
expt2 = np.exp( 2.0 * omega * Y[i] * ( Xi.dot( w2 ) + bias[i] ))
diff_expt = (expt2 / ( expt2 + 1.0 )) / ( expt2 + 1.0 ) - expt1 / ( expt1 + 1.0 ) / ( expt1 + 1.0 )
return - ( 4.0 * omega * diff_expt * Y[i] ) * Xi
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_grad_diff = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt1 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
diff_expt = (expt2/( expt2 + 1.0 ))/( expt2 + 1.0 ) - (expt1/( expt1 + 1.0 ))/( expt1 + 1.0 )
batch_grad_diff -= 4.0 * omega * batch_X.transpose().dot( batch_Y * diff_expt )
return batch_grad_diff / float( b )
# full
else:
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
full_grad_diff = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt1 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( 2.0 * omega * batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
diff_expt = (expt2/( expt2 + 1.0 ))/( expt2 + 1.0 ) - (expt1/( expt1 + 1.0 ))/( expt1 + 1.0 )
full_grad_diff -= 4.0 * omega * batch_X.transpose().dot( batch_Y * diff_expt )
return full_grad_diff / float( n )
######################################################################
def func_val_bin_class_loss_2( n, d, b, X, Y, bias, w, lamb = None, XYw_bias = None, nnzX = None ):
"""! Compute the objective value of loss function 2
\f$\ell_2( Y( Xw + b )) := \left( 1 - \frac{1}{1 + \exp[ -Y( Xw + b )]}\right )^2 \f$
for a given \f$ \omega > 0\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : \f$\ell_2( Y( Xw + b ))\f$
"""
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt = np.exp( Y[i] * ( Xi.dot( w ) + bias[i] ))
return np.sum ( (1.0 / ( expt + 1.0 )) / ( expt + 1.0 ) )
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt = np.exp( batch_Y * ( batch_X.dot( w ) + batch_bias ))
batch_loss += np.sum ( (1.0 / ( expt + 1.0 )) / ( expt + 1.0 ) )
return batch_loss / float( b )
# batch_X = X[index,:]
# batch_Y = Y[index]
# batch_bias = bias[index]
# expt = np.exp( batch_Y * ( batch_X.dot( w ) + batch_bias ))
# return ( 1.0/float( b )) * np.sum ( 1.0 / ( expt + 1.0 ) / ( expt + 1.0 ) )
else:
if XYw_bias is not None:
expt = np.exp( XYw_bias )
return ( 1.0/float( n )) * np.sum ( (1.0 / ( expt + 1.0 )) / ( expt + 1.0 ) )
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt = np.exp( batch_Y * ( batch_X.dot( w ) + batch_bias ))
full_loss += np.sum ( (1.0 / ( expt + 1.0 )) / ( expt + 1.0 ) )
return full_loss / float( n )
def func_diff_eval_bin_class_loss_2( n, d, b, X, Y, bias, w1, w2, lamb = None, XYw_bias = None, nnzX = None ):
"""! Compute the objective value of loss function 2
\f$\ell_2( Y( Xw + b )) := \left( 1 - \frac{1}{1 + \exp[ -Y( Xw + b )]}\right )^2 \f$
for a given \f$ \omega > 0\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : 1st input vector
@param w2 : 2nd input vector
@param lamb: penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : \f$\ell_2( Y( Xw + b ))\f$
"""
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt1 = np.exp( Y[i] * ( Xi.dot( w1 ) + bias[i] ))
expt2 = np.exp( Y[i] * ( Xi.dot( w2 ) + bias[i] ))
return np.sum ( (1.0 / ( expt2 + 1.0 )) / ( expt2 + 1.0 ) - (1.0 / ( expt1 + 1.0 )) / ( expt1 + 1.0 ) )
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt1 = np.exp( batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
batch_loss += np.sum ( (1.0 / ( expt2 + 1.0 )) / ( expt2 + 1.0 ) - (1.0 / ( expt1 + 1.0 )) / ( expt1 + 1.0 ) )
return batch_loss / float( b )
# batch_X = X[index,:]
# batch_Y = Y[index]
# batch_bias = bias[index]
# expt = np.exp( batch_Y * ( batch_X.dot( w ) + batch_bias ))
# return ( 1.0/float( b )) * np.sum ( 1.0 / ( expt + 1.0 ) / ( expt + 1.0 ) )
else:
if XYw_bias is not None:
expt = np.exp( XYw_bias )
return ( 1.0/float( n )) * np.sum ( (1.0 / ( expt + 1.0 )) / ( expt + 1.0 ) )
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt1 = np.exp( batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
full_loss += np.sum ( (1.0 / ( expt2 + 1.0 )) / ( expt2 + 1.0 ) - (1.0 / ( expt1 + 1.0 )) / ( expt1 + 1.0 ) )
return full_loss / float( n )
def grad_eval_bin_class_loss_2( n, d, b, X, Y, bias, w, lamb = None, nnzX = None ):
"""! Compute the ( full/stochastic ) gradient of loss function 2.
\f$\ell_2( Y( Xw + b )) := \left( 1 - \frac{1}{1 + \exp[ -Y( Xw + b )]}\right )^2 \f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : computed full/stochastic gradient
@retval XYw_bias: The precomputed \f$ Y( Xw + bias )\f$
"""
# single sample
if nnzX is None:
nnzX = d
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i, :]
expt = np.exp( Y[i] * ( Xi.dot( w ) + bias[i] ))
return ( - 2.0 * ( ((expt/( 1.0 + expt ))/( 1.0 + expt ))/( 1.0 + expt )) * Y[i] ) * Xi
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_grad = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt = np.exp( batch_Y * ( batch_X.dot( w ) + batch_bias ))
batch_grad -= 2.0 * batch_X.transpose().dot( batch_Y \
* ( ((expt/( 1.0 + expt ))/( 1.0 + expt ))/( 1.0 + expt )) )
return batch_grad / float( b )
# full
else:
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
full_grad = np.zeros( d )
XYw_bias = np.zeros( n )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
batch_XYw_bias = batch_Y * ( batch_X.dot( w ) + batch_bias )
XYw_bias[startIdx:endIdx] = batch_XYw_bias
expt = np.exp( batch_XYw_bias )
full_grad -= 2.0 * batch_X.transpose().dot( batch_Y * ( ((expt/( expt + 1 ))/( expt + 1 ))/( expt + 1 )) )
return full_grad / float( n ), XYw_bias
def grad_diff_eval_bin_class_loss_2( n, d, b, X, Y, bias, w1, w2, lamb = None, nnzX = None ):
"""! Compute the ( full/stochastic ) gradient difference of loss function 2
\f$\displaystyle\frac{1}{b}\left( \sum_{i \in \mathcal{B}_t}( \nabla f_i( w_2 ) - \nabla f_i( w_1 )) \right ) \f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : 1st input vector
@param w2 : 2nd input vector
@param lamb: penalty parameters
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : computed full/stochastic gradient
"""
# single sample
if nnzX is None:
nnzX = d
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt1 = np.exp( Y[i] * ( Xi.dot( w1 ) + bias[i] ))
expt2 = np.exp( Y[i] * ( Xi.dot( w2 ) + bias[i] ))
diff_expt = ( ((expt2/( 1.0 + expt2 ))/( 1.0 + expt2 ))/( 1.0 + expt2 )) - ( ((expt1/( 1.0 + expt1 ))/( 1.0 + expt1 ))/( 1.0 + expt1 ))
return - 2.0 * diff_expt * Y[i] * Xi
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_grad_diff = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt1 = np.exp( batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
diff_expt = ((expt2/( 1.0 + expt2 ))/( 1.0 + expt2 ))/( 1.0 + expt2 ) - ((expt1/( 1.0 + expt1 ))/( 1.0 + expt1 ))/( 1.0 + expt1 )
batch_grad_diff -= 2.0 * batch_X.transpose().dot( batch_Y * diff_expt )
return batch_grad_diff / float( b )
# full
else:
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
full_grad_diff = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt1 = np.exp( batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
diff_expt = ((expt2/( 1.0 + expt2 ))/( 1.0 + expt2 ))/( 1.0 + expt2 ) - ((expt1/( 1.0 + expt1 ))/( 1.0 + expt1 ))/( 1.0 + expt1 )
full_grad_diff -= 2.0 * batch_X.transpose().dot( batch_Y * diff_expt )
return full_grad_diff / float( n )
##################################################################
def func_val_bin_class_loss_3( n, d, b, X, Y, bias, w, lamb = None, XYw_bias = None, nnzX = None ):
"""! Compute the objective value of loss function 3
\f$ \ell_3( Y( Xw + b )) := \ln( 1 + \exp( -Y( Xw + b )) ) - \ln( 1 + \exp( -Y( Xw + b ) - \alpha ))\f$
for a given \f$ \alpha > 0\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : \f$\ell_3( Y( Xw + b ))\f$
"""
alpha = 1.0
exp_a = np.exp( - alpha )
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt = np.exp( -Y[i] * ( Xi.dot( w ) + bias[i] ))
return np.sum( np.log( 1.0 + expt ) - np.log( 1.0 + exp_a * expt ) )
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt = np.exp( -batch_Y * ( batch_X.dot( w ) + batch_bias ))
batch_loss += np.sum( np.log( 1.0 + expt ) - np.log( 1.0 + exp_a * expt ) )
return batch_loss / float( b )
else:
if XYw_bias is not None:
expt = np.exp( - XYw_bias )
return ( 1.0 / float( n )) * np.sum(( np.log( 1.0 + expt ) - np.log( 1.0 + exp_a * expt )) )
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt = np.exp( -batch_Y * ( batch_X.dot( w ) + batch_bias ))
full_loss += np.sum( np.log( 1.0 + expt ) - np.log( 1.0 + exp_a * expt ) )
return full_loss / float( n )
def func_diff_eval_bin_class_loss_3( n, d, b, X, Y, bias, w1, w2, lamb = None, XYw_bias = None, nnzX = None ):
"""! Compute the objective value of loss function 3
\f$ \ell_3( Y( Xw2 + b )) - \ell_3( Y( Xw1 + b )) \f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : 1st input vector
@param w2 : 2nd input vector
@param lamb: penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
"""
alpha = 1.0
exp_a = np.exp( - alpha )
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt1 = np.exp( -Y[i] * ( Xi.dot( w1 ) + bias[i] ))
expt2 = np.exp( -Y[i] * ( Xi.dot( w2 ) + bias[i] ))
return np.sum( (np.log( 1.0 + expt2 ) - np.log( 1.0 + exp_a * expt2 )) - (np.log( 1.0 + expt1 ) - np.log( 1.0 + exp_a * expt1 ) ) )
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss_diff = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt1 = np.exp( -batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( -batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
batch_loss_diff += np.sum( (np.log( 1.0 + expt2 ) - np.log( 1.0 + exp_a * expt2 )) - (np.log( 1.0 + expt1 ) - np.log( 1.0 + exp_a * expt1 )) )
return batch_loss_diff / float( b )
else:
if XYw_bias is not None:
expt = np.exp( - XYw_bias )
return ( 1.0 / float( n )) * np.sum(( np.log( 1.0 + expt ) - np.log( 1.0 + exp_a * expt )) )
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss_diff = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt1 = np.exp( -batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( -batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
full_loss_diff += np.sum( (np.log( 1.0 + expt2 ) - np.log( 1.0 + exp_a * expt2 )) - (np.log( 1.0 + expt1 ) - np.log( 1.0 + exp_a * expt1 )) )
return full_loss_diff / float( n )
def grad_eval_bin_class_loss_3( n, d, b, X, Y, bias, w, lamb = None, nnzX = None ):
"""! Compute the ( full/stochastic ) gradient of loss function 3.
where \f$ \ell_3( Y( Xw + b )) := \ln( 1 + \exp( -Y( Xw + b )) ) - \ln( 1 + \exp( -Y( Xw + b ) - \omega ))\f$
for a given \f$ \omega > 0\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : computed full/stochastic gradient
@retval XYw_bias: The precomputed \f$ Y( Xw + bias )\f$
"""
if nnzX is None:
nnzX = d
alpha = 1
exp_a = np.exp( alpha )
# single sample
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i, :]
expt = np.exp( Y[i] * ( Xi.dot( w ) + bias[i] ))
return (( 1 / ( expt * exp_a + 1.0 ) - 1 / ( expt + 1.0 )) * Y[i] ) * Xi
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_grad = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt = np.exp( batch_Y * ( batch_X.dot( w ) + batch_bias ))
batch_grad += batch_X.transpose().dot( batch_Y * ( 1 / ( expt * exp_a + 1.0 ) - 1 / ( expt + 1.0 )) )
return batch_grad / float( b )
# full
else:
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
full_grad = np.zeros( d )
XYw_bias = np.zeros( n )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
batch_XYw_bias = batch_Y * ( batch_X.dot( w ) + batch_bias )
XYw_bias[startIdx:endIdx] = batch_XYw_bias
expt = np.exp( batch_XYw_bias )
full_grad += batch_X.transpose().dot( batch_Y * ( 1.0/ ( expt * exp_a + 1.0 ) - 1.0/ ( expt + 1.0 )) )
return full_grad / float( n ), XYw_bias
def grad_diff_eval_bin_class_loss_3( n, d, b, X, Y, bias, w1, w2, lamb = None, nnzX = None ):
"""! Compute the ( full/stochastic ) gradient difference of loss function 3
\f$\displaystyle\frac{1}{b}\left( \sum_{i \in \mathcal{B}_t}( \nabla f_i( w_2 ) - \nabla f_i( w_1 )) \right ) \f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : 1st input vector
@param w2 : 2nd input vector
@param lamb: penalty parameters
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : computed full/stochastic gradient
"""
if nnzX is None:
nnzX = d
alpha = 1
exp_a = np.exp( alpha )
# single sample
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
expt1 = np.exp( Y[i] * ( Xi.dot( w1 ) + bias[i] ))
expt2 = np.exp( Y[i] * ( Xi.dot( w2 ) + bias[i] ))
diff_expt = ( 1.0/( expt2 * exp_a + 1.0 ) - 1.0/( expt2 + 1.0 )) - ( 1.0/( expt1 * exp_a + 1.0 ) - 1.0/( expt1 + 1.0 ))
return diff_expt * Y[i] * Xi
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = int( total_mem_batch // nnzX )
num_batches = math.ceil( b / batch_size )
batch_grad_diff = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
expt1 = np.exp( batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
diff_expt = ( 1/( expt2 * exp_a + 1.0 ) - 1/( expt2 + 1.0 )) - ( 1/( expt1 * exp_a + 1.0 ) - 1/( expt1 + 1.0 ))
batch_grad_diff += batch_X.transpose().dot( batch_Y * diff_expt )
return batch_grad_diff / float( b )
# full
else:
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = int( total_mem_full // nnzX )
num_batches = math.ceil( b / batch_size )
full_grad_diff = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
expt1 = np.exp( batch_Y * ( batch_X.dot( w1 ) + batch_bias ))
expt2 = np.exp( batch_Y * ( batch_X.dot( w2 ) + batch_bias ))
diff_expt = ( 1/( expt2 * exp_a + 1.0 ) - 1/( expt2 + 1.0 )) - ( 1/( expt1 * exp_a + 1.0 ) - 1/( expt1 + 1.0 ))
full_grad_diff += batch_X.transpose().dot( batch_Y * diff_expt )
return full_grad_diff / float( n )
##################################################################
def func_val_bin_class_loss_4( n, d, b, X, Y, bias, w, lamb = None, XYw_bias = None, nnzX = None ):
"""! Compute the objective value of loss function 4
\f$ \ell_4^{(i)}( Y_i( X_i^Tw + b )) := \begin{cases}
0,~&\text{if }Y_i( X_i^Tw + b ) > 1\\
\ln(1 + (Y_i( X_i^Tw + b ) - 1)^2),&\text{otherwise}
\end{cases}\f$
for a given \f$ \omega > 0\f$.
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : \f$\ell_4( Y( Xw + b )) = [\ell_4^{(0)},\ell_4^{(1)},\dots,\ell_4^{(n-1)}]^T\f$
"""
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
XYw_bias = Y[i] * ( Xi.dot( w ) + bias[i] )
return (XYw_bias <= 1) * ( np.log( 1 + np.square( XYw_bias - 1 ) ))
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
batch_XYw_bias = batch_Y * ( batch_X.dot( w ) + batch_bias )
batch_loss += np.sum((batch_XYw_bias <= 1) * ( np.log( 1 + np.square( batch_XYw_bias - 1 ) )))
return batch_loss / float( b )
else:
if XYw_bias is not None:
return ( 1.0 / float( n )) * ( np.sum((XYw_bias <= 1) * ( np.log( 1 + np.square( XYw_bias - 1 ) ))))
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
batch_XYw_bias = batch_Y * ( batch_X.dot( w ) + batch_bias )
full_loss += np.sum((batch_XYw_bias <= 1) * ( np.log( 1 + np.square( batch_XYw_bias - 1 ) )))
return full_loss / float( n )
def func_diff_eval_bin_class_loss_4( n, d, b, X, Y, bias, w1, w2, lamb = None, XYw_bias = None, nnzX = None ):
"""! Compute the objective value of loss function 3
\f$ \ell_4( Y( Xw2 + b )) - \ell_4( Y( Xw1 + b )) \f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : 1st input vector
@param w2 : 2nd input vector
@param lamb: penalty parameters
@param XYw_bias : precomputed Y(Xw + b) if available
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
"""
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
XYw_bias1 = Y[i] * ( Xi.dot( w1 ) + bias[i] )
XYw_bias2 = Y[i] * ( Xi.dot( w2 ) + bias[i] )
return (XYw_bias2 <= 1) * ( np.log( 1 + np.square( XYw_bias2 - 1 ) )) \
- (XYw_bias1 <= 1) * ( np.log( 1 + np.square( XYw_bias1 - 1 ) ))
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_loss_diff = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
batch_XYw_bias1 = batch_Y * ( batch_X.dot( w1 ) + batch_bias )
batch_XYw_bias2 = batch_Y * ( batch_X.dot( w2 ) + batch_bias )
batch_loss_diff += np.sum((batch_XYw_bias2 <= 1) * ( np.log( 1 + np.square( batch_XYw_bias2 - 1 ) )))\
- np.sum((batch_XYw_bias1 <= 1) * ( np.log( 1 + np.square( batch_XYw_bias1 - 1 ) )))
return batch_loss_diff / float( b )
else:
# calculate number of batches
if nnzX is None:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( n / batch_size )
full_loss_diff = 0.0
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
batch_XYw_bias1 = batch_Y * ( batch_X.dot( w1 ) + batch_bias )
batch_XYw_bias2 = batch_Y * ( batch_X.dot( w2 ) + batch_bias )
full_loss_diff += np.sum((batch_XYw_bias2 <= 1) * ( np.log( 1 + np.square( batch_XYw_bias2 - 1 ) )))\
- np.sum((batch_XYw_bias1 <= 1) * ( np.log( 1 + np.square( batch_XYw_bias1 - 1 ) )))
return full_loss_diff / float( n )
def grad_eval_bin_class_loss_4( n, d, b, X, Y, bias, w, lamb = None, nnzX = None ):
"""! Compute the ( full/stochastic ) gradient of loss function 4.
where \f$ \ell_4^{(i)}( Y_i( X_i^Tw + b )) := \begin{cases}
0,~&\text{if }Y_i( X_i^Tw + b ) > 1\\
\ln(1 + (Y_i( X_i^Tw + b ) - 1)^2),&\text{otherwise}
\end{cases}\f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w : input vector
@param lamb: penalty parameters
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : computed full/stochastic gradient
@retval XYw_bias: The precomputed \f$ Y( Xw + bias )\f$
"""
if nnzX is None:
nnzX = d
# single sample
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i, :]
tempV = Y[i] * ( Xi.dot( w ) + bias[i] ) - 1
return ( 2 * tempV * Y[i] / ( 1 + np.square( tempV ) )) * Xi
# batch
elif b < n:
# get a random batch of size b
index = random.sample( range( n ), b )
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
batch_grad = np.zeros( d )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), b - 1 )
batch_X = X[index[startIdx:endIdx],:]
batch_Y = Y[index[startIdx:endIdx]]
batch_bias = bias[index[startIdx:endIdx]]
tempVect = batch_Y * ( batch_X.dot( w ) + batch_bias ) - 1
batch_grad += batch_X.transpose().dot( batch_Y * 2 * tempVect / ( 1.0 + np.square( tempVect ) ))
return batch_grad / float( b )
# full
else:
# calculate number of batches
if nnzX == 0:
nnzX = d
batch_size = np.maximum( int( total_mem_full // nnzX ), 1 )
num_batches = math.ceil( b / batch_size )
full_grad = np.zeros( d )
XYw_bias = np.zeros( n )
for j in range( num_batches ):
# calculate start/end indices for each batch
startIdx = batch_size * j
endIdx = np.minimum( batch_size * ( j + 1 ), n - 1 )
batch_X = X[startIdx:endIdx,:]
batch_Y = Y[startIdx:endIdx]
batch_bias = bias[startIdx:endIdx]
batch_XYw_bias = batch_Y * ( batch_X.dot( w ) + batch_bias )
tempVect = batch_XYw_bias - 1
XYw_bias[startIdx:endIdx] = batch_XYw_bias
full_grad += batch_X.transpose().dot( batch_Y * 2 * tempVect / ( 1.0 + np.square( tempVect ) ))
return full_grad / float( n ), XYw_bias
def grad_diff_eval_bin_class_loss_4( n, d, b, X, Y, bias, w1, w2, lamb = None, nnzX = None ):
"""! Compute the ( full/stochastic ) gradient difference of loss function 3
\f$\displaystyle\frac{1}{b}\left( \sum_{i \in \mathcal{B}_t}( \nabla f_i( w_2 ) - \nabla f_i( w_1 )) \right ) \f$
Parameters
----------
@param n : sample size
@param d : number of features
@param b : mini - batch size
b = 1: single stochastic gradient
1 < b < n: mini - batch stochastic gradient
b = n: full gradient
@param X : input data
@param Y : input label
@param bias : input bias
@param w1 : 1st input vector
@param w2 : 2nd input vector
@param lamb: penalty parameters
@param nnzX : average number of non - zero elements for each sample
@param index : index set for mini-batch calculation
Returns
----------
@retval : computed full/stochastic gradient
"""
if nnzX is None:
nnzX = d
alpha = 1
exp_a = np.exp( alpha )
# single sample
if b == 1:
# get a random sample
i = np.random.randint( 0, n )
Xi = X[i,:]
tempV1 = Y[i] * ( Xi.dot( w1 ) + bias[i] ) - 1
tempV2 = Y[i] * ( Xi.dot( w2 ) + bias[i] ) - 1
diff = 2 * ( tempV2 / ( 1.0 + np.square(tempV2) ) - tempV1 / ( 1.0 +
|
np.square(tempV1)
|
numpy.square
|
import numpy as np
import bayesiancoresets as bc
import os, sys
from scipy.stats import multivariate_normal
#make it so we can import models/etc from parent folder
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
import gaussian
M = 300 # log: 200
N = 600 # log: 1000
d = 200 # log: 200
opt_itrs = 500 # for SparseVI
proj_dim = 100
pihat_noise =0.75
mu0 = np.zeros(d)
Sig0 = np.eye(d)
Sig = np.eye(d)
SigL = np.linalg.cholesky(Sig)
th = np.ones(d)
Sig0inv = np.linalg.inv(Sig0)
Siginv =
|
np.linalg.inv(Sig)
|
numpy.linalg.inv
|
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as c
import matplotlib.ticker as ticker
from decimal import Decimal
from risk import get_heatmap
'''
Hello
heatmap.py plots the output of risk.py as a heatmap graph with the given parameters.
The risk is a function of 3 variables: C, theta and r_loc. We follow this order in this script.
- i) fix one variable: param = (YOUR SELECTED VARIABLE)
ex. param = 0, that is "C" (as Python idexes from 0)
or param = 1, that is "theta"
or param = 2, that is "r_loc"
- ii) say, you selected param = 2 (r_loc) to be fixed, now you can plot as many heatmaps as many r_loc values you provide to the program in r_loc = [(YOUR LIST OF FIXED PARAMETERS)]
ex. r_loc = np.array([1.3,1.8,2.2,2.6])
- iii) specify the range of the other two variables you want to examine
ex. c = np.arange(100000, 1100000, 10000)
and theta = np.arange(0, 0.000255, 0.000001)
- iv) The program saves the heatmaps in a folder named after your selected variable in the "Heatmaps" folder
'''
#----------------------------------------------------
# Give the values here:
param = 2 #param = 0 ( = C); param = 1 ( = theta); param = 2 ( = r_loc)
c = np.arange(100000, 1100000, 10000)
theta = np.arange(0, 0.000255, 0.000001)
r_loc =
|
np.array([1.05, 1.3, 1.8, 2.2, 2.6])
|
numpy.array
|
#!/usr/bin/env python3
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import numpy as np
###
# Enable latex for labels
plt.rcParams['text.usetex'] = True
###
# Parse the input file
f = open( sys.argv[1], 'r' )
x_lst = []
y_lst = []
z_lst = []
for line in f:
line = line.strip()
a = line.split()
x_lst.append( float( a[ 0 ] ) )
y_lst.append( float( a[ 1 ] ) )
z_lst.append( float( a[ 2 ] ) )
###
# Convert the data to NumPy array
x =
|
np.array(x_lst)
|
numpy.array
|
'''
Classes
-------
LearnAlg
Defines some generic routines for
* saving global parameters
* assessing convergence
* printing progress updates to stdout
* recording run-time
'''
import numpy as np
import time
import logging
import os
import sys
import scipy.io
import learnalg.ElapsedTimeLogger as ElapsedTimeLogger
from sklearn.externals import joblib
from bnpy.ioutil import ModelWriter
from bnpy.util import (
isEvenlyDivisibleFloat,
getMemUsageOfCurProcess_MiB,
)
Log = logging.getLogger('bnpy')
Log.setLevel(logging.DEBUG)
class LearnAlg(object):
""" Abstract base class for learning algorithms that train HModels.
Attributes
------
task_output_path : str
file system path to directory where files are saved
seed : int
seed used for random initialization
PRNG : np.random.RandomState
random number generator
algParams : dict
keyword parameters controlling algorithm behavior
default values for each algorithm live in config/
Can be overrided by keyword arguments specified by user
outputParams : dict
keyword parameters controlling saving to file / logs
"""
def __init__(self, task_output_path=None, seed=0,
algParams=dict(), outputParams=dict(),
BNPYRunKwArgs=dict()):
''' Constructs and returns a LearnAlg object
'''
if isinstance(task_output_path, str):
self.task_output_path = os.path.splitext(task_output_path)[0]
else:
self.task_output_path = None
self.seed = int(seed)
self.PRNG = np.random.RandomState(self.seed)
self.algParams = algParams
self.outputParams = outputParams
self.BNPYRunKwArgs = BNPYRunKwArgs
self.lap_list = list()
self.loss_list = list()
self.K_list = list()
self.elapsed_time_sec_list = list()
self.SavedIters = set()
self.PrintIters = set()
self.totalDataUnitsProcessed = 0
self.status = 'active. not converged.'
self.algParamsLP = dict()
for k, v in algParams.items():
if k.count('LP') > 0:
self.algParamsLP[k] = v
def fit(self, hmodel, Data):
''' Execute learning algorithm to train hmodel on Data.
This method is extended by any subclass of LearnAlg
Returns
-------
Info : dict of diagnostics about this run
'''
pass
def set_random_seed_at_lap(self, lap):
''' Set internal random generator based on current lap.
Reset the seed deterministically for each lap.
using combination of seed attribute (unique to this run),
and the provided lap argument. This allows reproducing
exact values from this run later without starting over.
Post Condition
------
self.PRNG rest to new random seed.
'''
if isEvenlyDivisibleFloat(lap, 1.0):
self.PRNG = np.random.RandomState(self.seed + int(lap))
def set_start_time_now(self):
''' Record start time (in seconds since 1970)
'''
self.start_time = time.time()
def updateNumDataProcessed(self, N):
''' Update internal count of number of data observations processed.
Each lap thru dataset of size N, this should be updated by N
'''
self.totalDataUnitsProcessed += N
def get_elapsed_time(self):
''' Returns float of elapsed time (in seconds) since this object's
set_start_time_now() method was called
'''
return time.time() - self.start_time
def buildRunInfo(self, Data, **kwargs):
''' Create dict of information about the current run.
Returns
------
Info : dict
contains information about completed run.
'''
# Convert TraceLaps from set to 1D array, sorted in ascending order
lap_history = np.asarray(self.lap_list)
loss_history = np.asarray(self.loss_list)
K_history = np.asarray(self.K_list)
elapsed_time_sec_history = np.asarray(self.elapsed_time_sec_list)
return dict(status=self.status,
task_output_path=self.task_output_path,
loss_history=loss_history,
lap_history=lap_history,
K_history=K_history,
elapsed_time_sec_history=elapsed_time_sec_history,
Data=Data,
elapsedTimeInSec=time.time() - self.start_time,
**kwargs)
def hasMove(self, moveName):
if moveName in self.algParams:
return True
return False
def verify_monotonic_decrease(
self, cur_loss=0.00001, prev_loss=0, lapFrac=None):
''' Verify current loss does not increase from previous loss
Returns
-------
boolean : True if monotonic decrease, False otherwise
'''
if np.isnan(cur_loss):
raise ValueError("Evidence should never be NaN")
if np.isinf(prev_loss):
return False
isDecreasing = cur_loss <= prev_loss
thr = self.algParams['convergeThrELBO']
isWithinTHR = np.abs(cur_loss - prev_loss) < thr
mLPkey = 'doMemoizeLocalParams'
if not isDecreasing and not isWithinTHR:
serious = True
if mLPkey in self.algParams and not self.algParams[mLPkey]:
warnMsg = 'loss increased when doMemoizeLocalParams=0'
warnMsg += '(monotonic decrease not guaranteed)\n'
serious = False
else:
warnMsg = 'loss increased!\n'
warnMsg += ' prev = % .15e\n' % (prev_loss)
warnMsg += ' cur = % .15e\n' % (cur_loss)
if lapFrac is None:
prefix = "WARNING: "
else:
prefix = "WARNING @ %.3f: " % (lapFrac)
if serious or not self.algParams['doShowSeriousWarningsOnly']:
Log.error(prefix + warnMsg)
def isSaveDiagnosticsCheckpoint(self, lap, nMstepUpdates):
''' Answer True/False whether to save trace stats now
'''
traceEvery = self.outputParams['traceEvery']
if traceEvery <= 0:
return False
return isEvenlyDivisibleFloat(lap, traceEvery) \
or nMstepUpdates < 3 \
or lap in self.lap_list \
or isEvenlyDivisibleFloat(lap, 1.0)
def saveDiagnostics(self, lap, SS, loss, ActiveIDVec=None):
''' Save trace stats to disk
'''
if lap in self.lap_list:
return
elapsed_time_sec = self.get_elapsed_time()
self.lap_list.append(lap)
self.loss_list.append(loss)
self.K_list.append(SS.K)
self.elapsed_time_sec_list.append(elapsed_time_sec)
# Exit here if we're not saving to disk
if self.task_output_path is None:
return
# Record current state to plain-text files
with open(self.mkfile('trace_lap.txt'), 'a') as f:
f.write('%.4f\n' % (lap))
with open(self.mkfile('trace_loss.txt'), 'a') as f:
f.write('%.9e\n' % (loss))
with open(self.mkfile('trace_elapsed_time_sec.txt'), 'a') as f:
f.write('%.3f\n' % (elapsed_time_sec))
with open(self.mkfile('trace_K.txt'), 'a') as f:
f.write('%d\n' % (SS.K))
with open(self.mkfile('trace_n_examples_total.txt'), 'a') as f:
f.write('%d\n' % (self.totalDataUnitsProcessed))
# Record active counts in plain-text files
counts = SS.getCountVec()
assert counts.ndim == 1
counts = np.asarray(counts, dtype=np.float32)
np.maximum(counts, 0, out=counts)
with open(self.mkfile('active_counts.txt'), 'a') as f:
flatstr = ' '.join(['%.3f' % x for x in counts])
f.write(flatstr + '\n')
with open(self.mkfile('active_uids.txt'), 'a') as f:
if ActiveIDVec is None:
if SS is None:
ActiveIDVec = np.arange(SS.K)
else:
ActiveIDVec = SS.uids
flatstr = ' '.join(['%d' % x for x in ActiveIDVec])
f.write(flatstr + '\n')
if SS.hasSelectionTerm('DocUsageCount'):
ucount = SS.getSelectionTerm('DocUsageCount')
flatstr = ' '.join(['%d' % x for x in ucount])
with open(self.mkfile('active_doc_counts.txt'), 'a') as f:
f.write(flatstr + '\n')
def isCountVecConverged(self, Nvec, prevNvec, batchID=None):
if Nvec.size != prevNvec.size:
# Warning: the old value of maxDiff is still used for printing
return False
maxDiff = np.max(np.abs(Nvec - prevNvec))
isConverged = maxDiff < self.algParams['convergeThr']
if batchID is not None:
if not hasattr(self, 'ConvergeInfoByBatch'):
self.ConvergeInfoByBatch = dict()
self.ConvergeInfoByBatch[batchID] = dict(
isConverged=isConverged,
maxDiff=maxDiff)
isConverged = np.min([
self.ConvergeInfoByBatch[b]['isConverged']
for b in self.ConvergeInfoByBatch])
maxDiff = np.max([
self.ConvergeInfoByBatch[b]['maxDiff']
for b in self.ConvergeInfoByBatch])
self.ConvergeInfo = dict(isConverged=isConverged,
maxDiff=maxDiff)
return isConverged
def isSaveParamsCheckpoint(self, lap, nMstepUpdates):
''' Answer True/False whether to save full model now
'''
s = self.outputParams['saveEveryLogScaleFactor']
sE = self.outputParams['saveEvery']
if s > 0:
new_sE = np.maximum(np.maximum(sE, sE ** s), sE * s)
if (lap >= new_sE):
self.outputParams['saveEvery'] = new_sE
if lap > 1.0:
self.outputParams['saveEvery'] = \
np.ceil(self.outputParams['saveEvery'])
saveEvery = self.outputParams['saveEvery']
if saveEvery <= 0 or self.task_output_path is None:
return False
return isEvenlyDivisibleFloat(lap, saveEvery) \
or (isEvenlyDivisibleFloat(lap, 1.0) and
lap <= self.outputParams['saveEarly']) \
or nMstepUpdates < 3 \
or np.allclose(lap, 1.0) \
or np.allclose(lap, 2.0) \
or
|
np.allclose(lap, 4.0)
|
numpy.allclose
|
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
num_classes = 2
img_height, img_width = 224, 224
channel = 3
GPU = False
torch.manual_seed(0)
class InceptionModule(torch.nn.Module):
def __init__(self, in_f, f_1, f_2_1, f_2_2, f_3_1, f_3_2, f_4_2):
super(InceptionModule, self).__init__()
self.conv1 = torch.nn.Conv2d(in_f, f_1, kernel_size=1, padding=0, stride=1)
self.conv2_1 = torch.nn.Conv2d(in_f, f_2_1, kernel_size=1, padding=0, stride=1)
self.conv2_2 = torch.nn.Conv2d(f_2_1, f_2_2, kernel_size=3, padding=1, stride=1)
self.conv3_1 = torch.nn.Conv2d(in_f, f_3_1, kernel_size=1, padding=0, stride=1)
self.conv3_2 = torch.nn.Conv2d(f_3_1, f_3_2, kernel_size=5, padding=2, stride=1)
self.conv4_2 = torch.nn.Conv2d(in_f, f_4_2, kernel_size=1, padding=0, stride=1)
def forward(self, x):
x1 = torch.nn.ReLU()(self.conv1(x))
x2 = torch.nn.ReLU()(self.conv2_1(x))
x2 = torch.nn.ReLU()(self.conv2_2(x2))
x3 = torch.nn.ReLU()(self.conv3_1(x))
x3 = torch.nn.ReLU()(self.conv3_2(x3))
x4 = F.max_pool2d(x, 3, padding=1, stride=1)
x4 = torch.nn.ReLU()(self.conv4_2(x4))
x = torch.cat([x1, x2, x3, x4], dim=1)
return x
class GoogLeNetv1(torch.nn.Module):
def __init__(self):
super(GoogLeNetv1, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=7, padding=0, stride=2)
self.conv2_1 = torch.nn.Conv2d(64, 64, kernel_size=1, padding=0, stride=1)
self.conv2_2 = torch.nn.Conv2d(64, 192, kernel_size=3, padding=1, stride=1)
self.inception3a = InceptionModule(192, 64, 96, 128, 16, 32, 32)
self.inception3b = InceptionModule(256, 128, 128, 192, 32, 96, 64)
self.inception4a = InceptionModule(480, 192, 96, 208, 16, 48, 64)
self.inception4b = InceptionModule(512, 160, 112, 224, 24, 64, 64)
self.inception4c = InceptionModule(512, 128, 128, 256, 24, 64, 64)
self.inception4d = InceptionModule(512, 112, 144, 288, 32, 64, 64)
self.inception4e = InceptionModule(528, 256, 160, 320, 32, 128, 128)
self.inception5a = InceptionModule(832, 256, 160, 320, 32, 128, 128)
self.inception5b = InceptionModule(832, 384, 192, 384, 48, 128, 128)
self.linear = torch.nn.Linear(1024, num_classes)
self.aux1_conv1 = torch.nn.Conv2d(512, 128, kernel_size=1, padding=0, stride=1)
self.aux1_linear1 = torch.nn.Linear(25088, 1024)
self.aux1_linear2 = torch.nn.Linear(1024, num_classes)
self.aux2_conv1 = torch.nn.Conv2d(528, 128, kernel_size=1, padding=0, stride=1)
self.aux2_linear1 = torch.nn.Linear(25088, 1024)
self.aux2_linear2 = torch.nn.Linear(1024, num_classes)
def forward(self, x):
x = torch.nn.ReLU()(self.conv1(x))
x = F.max_pool2d(x, 3, padding=1, stride=2)
x = torch.nn.modules.normalization.LocalResponseNorm(size=1)(x)
x = torch.nn.ReLU()(self.conv2_1(x))
x = torch.nn.ReLU()(self.conv2_2(x))
x = torch.nn.modules.normalization.LocalResponseNorm(size=1)(x)
x = F.max_pool2d(x, 3, padding=1, stride=2)
x = self.inception3a(x)
x = self.inception3b(x)
x = F.max_pool2d(x, 3, padding=1, stride=2)
x = self.inception4a(x)
x_aux1 = F.avg_pool2d(x, 5, padding=2, stride=1)
x_aux1 = torch.nn.ReLU()(self.aux1_conv1(x_aux1))
x_aux1 = x_aux1.view(list(x_aux1.size())[0], -1)
x_aux1 = torch.nn.ReLU()(self.aux1_linear1(x_aux1))
x_aux1 = torch.nn.Dropout(p=0.7)(x_aux1)
x_aux1 = self.aux1_linear2(x_aux1)
x_aux1 = F.softmax(x_aux1, dim=1)
x = self.inception4b(x)
x = self.inception4c(x)
x = self.inception4d(x)
x_aux2 = F.avg_pool2d(x, 5, padding=2, stride=1)
x_aux2 = torch.nn.ReLU()(self.aux2_conv1(x_aux2))
x_aux2 = x_aux2.view(list(x_aux2.size())[0], -1)
x_aux2 = torch.nn.ReLU()(self.aux2_linear1(x_aux2))
x_aux2 = torch.nn.Dropout(p=0.7)(x_aux2)
x_aux2 = self.aux2_linear2(x_aux2)
x_aux2 = F.softmax(x_aux2, dim=1)
x = self.inception4e(x)
x = F.max_pool2d(x, 3, padding=1, stride=2)
x = self.inception5a(x)
x = self.inception5b(x)
x = F.avg_pool2d(x, 7, padding=0, stride=1)
x = x.view(list(x.size())[0], -1)
x = self.linear(x)
x = F.softmax(x, dim=1)
return x, x_aux1, x_aux2
CLS = ['akahara', 'madara']
# get train data
def data_load(path, hf=False, vf=False, rot=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
for i, cls in enumerate(CLS):
if cls in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot != False:
angle = rot
scale = 1
# show
a_num = 360 // rot
w_num = np.ceil(np.sqrt(a_num))
h_num = np.ceil(a_num / w_num)
count = 1
#plt.subplot(h_num, w_num, count)
#plt.axis('off')
#plt.imshow(x)
#plt.title("angle=0")
while angle < 360:
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(_x)
ts.append(t)
paths.append(path)
# show
#count += 1
#plt.subplot(h_num, w_num, count)
#plt.imshow(_x)
#plt.axis('off')
#plt.title("angle={}".format(angle))
angle += rot
#plt.show()
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = GoogLeNetv1().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
model.train()
xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True, rot=10)
# training
mb = 32
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
|
np.random.shuffle(train_ind)
|
numpy.random.shuffle
|
from scipy import interpolate as interp
import numpy as np
import sys, os
from saveNet import saveTensorToMat, saveArrayToMat
import matplotlib.pyplot as plt
#from pynufft import NUFFT_cpu
from utils.data_vis import tensorshow, ntensorshow
import multiprocessing
import ctypes
from functools import partial
NUFFTobj = NUFFT_cpu()
sys.path.append('/home/helrewaidy/bart/python/')
from bart import bart, bart_nested
os.environ["TOOLBOX_PATH"] = '/home/helrewaidy/bart/'
import time
'''
[IMPORTANT] --> Bart docs (https://github.com/mikgroup/bart-workshop)
if error "Exception: Environment variable TOOLBOX_PATH is not set." appeared
in terminal:
export TOOLBOX_PATH=/home/helrewaidy/bart
export PATH=${TOOLBOX_PATH}:${PATH}
'''
from parameters import Parameters
params = Parameters()
def shared_array(shape):
"""
Form a shared memory numpy array.
http://stackoverflow.com/questions/5549190/is-shared-readonly-data-copied-to-different-processes-for-python-multiprocessing
"""
l = 1
for i in shape:
l *= i
# print(np.prod(shape))
shared_array_base = multiprocessing.Array(ctypes.c_float, l)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape(*shape)
return np.asarray(shared_array)
def sort_files(flist):
return sorted(flist, key=lambda x: 1000 * parse_dat_filename(x)['slc'] + parse_dat_filename(x)['phs'])
def create_radial_trajectory(n_cols, n_views, s_angle=0):
angles = np.linspace(0, np.pi-np.pi/n_views, n_views) + s_angle
line = np.zeros([2, n_cols]); line[0, :] = np.linspace(-n_cols//2, n_cols//2, n_cols)
traj = [np.matmul([[np.cos(ang), np.sin(ang)], [-np.sin(ang), np.cos(ang)]], line) for ang in angles]
return np.flip(np.moveaxis(traj, [2, 0], [0, 1]), 0) # [n_cols, n_views, 2]
def create_radial_trajectory2(n_cols, n_views, s_angle=0):
angles = np.linspace(0, np.pi-np.pi/n_views, n_views) + s_angle
line = np.zeros([2, n_cols]); line[0, :] = np.linspace(-n_cols//2, n_cols//2, n_cols)
traj = [np.matmul([[np.cos(ang), np.sin(ang)], [-np.sin(ang), np.cos(ang)]], line) for ang in angles]
return np.flip(np.moveaxis(traj, [2, 0], [0, 1]), 0), angles # [n_cols, n_views, 2]
def get_radial_trajectory(kspace_lines, gradient_delays_method=None):
n_cols = kspace_lines.shape[0]
n_views = kspace_lines.shape[1]
# if 'NONE' == gradient_delays_method: # use python not Not BART
# trajectory = create_radial_trajectory(n_cols, n_views)
# else:
trajectory = bart(1, 'traj -r -x{0} -y{1} -c'.format(n_cols, n_views))
ckl = np.expand_dims(kspace_lines[:,:,:,0], 0) + 1j* np.expand_dims(kspace_lines[:,:,:,1],0);
ckl = ckl.astype(np.complex64)
if 'RING' == gradient_delays_method:
trajectory = bart_nested(1, 2, 'estdelay -R -r1.5',
'traj -x{0} -y{1} -r -c -q'.format(n_cols, n_views), trajectory ,ckl)
elif 'AC-addaptive' == gradient_delays_method:
trajectory = bart_nested(1, 2, 'estdelay',
'traj -x{0} -y{1} -r -c -q'.format(n_cols, n_views), trajectory ,ckl)
debug = False
if debug:
img_nufft = bart(1, 'nufft -d{0}:{1}:1 -i'.format(n_cols, n_cols), trajectory, ckl)
gd_img_nufft = bart(1, 'nufft -d{0}:{1}:1 -i'.format(n_cols, n_cols), trajectory, ckl)
rss_img = np.sqrt(np.sum(img_nufft.real**2 + img_nufft.imag**2, 3))
gd_rss_img = np.sqrt(np.sum(gd_img_nufft.real**2 + gd_img_nufft.imag**2, 3))
show_complex_image(rss_img, rng=(0, 0.005))
show_complex_image(gd_rss_img, rng=(0, 0.005))
trajectory = np.flip(np.moveaxis(trajectory[0:2,:,:].real, [0], [2]), 2)# [n_cols, n_views, 2]
return trajectory
def show_complex_image(img, ch = 0,rng=(0, 0.1)):
plt.figure()
plt.imshow(np.abs(img[:,:,ch]), cmap='gray', vmin=rng[0], vmax=rng[1])
plt.show()
def get_grid_neighbors(kspace_lines, grid_size=(416, 416), k_neighbors=50, trajectory=None, gradient_delays_method='RING', neighbor_mat=True):
n_cols = kspace_lines.shape[0]
n_views = kspace_lines.shape[1]
n_cha = kspace_lines.shape[2]
if trajectory is None:
trajectory = get_radial_trajectory(kspace_lines, gradient_delays_method=gradient_delays_method)
if neighbor_mat:
ngrid = np.zeros((grid_size[0], grid_size[0], k_neighbors, n_cha, 2))
traj = np.reshape(trajectory, [n_cols * n_views, 2], order='F')
traj = traj.real
ksl = np.reshape(kspace_lines, [n_cols * n_views, n_cha, 2], order='F')
Ksp_ri = np.zeros((grid_size[0], grid_size[1], k_neighbors, n_cha, 2))
Loc_xy = 1e10 * np.ones((grid_size[0], grid_size[1], k_neighbors, 2))
k = 0
# st = time.time()
for ic in range(-grid_size[0]//2, grid_size[0]-grid_size[0]//2):
for jc in range(-grid_size[1]//2, grid_size[1]-grid_size[1]//2):
i = ic + grid_size[0] // 2
j = jc + grid_size[1] // 2
gi = ic * n_cols / grid_size[0]
gj = jc * n_cols / grid_size[1]
# rd = 5
# rd = 0.05 * (abs(ic) + abs(jc)) + 1
# rd = (10/208) * (ic**2 + jc**2)**0.5 + 2
rd = 10
dist = ((traj[:, 0] - gi) ** 2 + (traj[:, 1] - gj) ** 2) ** 0.5
idxs = np.argwhere(dist <= rd)[:, 0].tolist()
idxs = np.array([x for _,x in sorted(zip(dist[idxs], idxs))])
# tmp_n[i, j] = len(idxs)
# idxs = sorted(dist)
# tmp_n[i, j] = idxs[9]
if neighbor_mat:
if len(idxs) == 0:
continue
rdist = np.round(dist[idxs] * (k_neighbors / rd), 0)
#
for n in range(0, k_neighbors - 1):
if any(rdist == n):
kidx = idxs[np.argwhere(rdist == n)[:, 0].tolist()]
ngrid[i, j, n, :, :] = np.mean(ksl[kidx, :, :], axis=0)
else:
s = len(idxs) if len(idxs)<=k_neighbors else k_neighbors
Ksp_ri[i, j, 0:s, ] = ksl[idxs[:s].tolist()]
Loc_xy[i, j, 0:s, 0], Loc_xy[i, j, 0:s, 1] = traj[idxs[:s].tolist(), 0] - gi, traj[idxs[:s].tolist(), 1] - gj
k += 1
# Idx_list.append(idxs.tolist())
# Dist.append(dist[idxs.tolist()].tolist())
return ngrid if neighbor_mat else Ksp_ri, Loc_xy
def fill_row(rn, ksl, traj, grid_size, k_neighbors, n_batch, n_cols, n_cha):
# print('start filling row')
l_Ksp_ri = np.zeros((n_batch, rn[1], grid_size[1], k_neighbors, n_cha, 2))
l_Loc_xy = 1e3 * np.ones((rn[1], grid_size[1], k_neighbors, 2))
i = 0
for ic in range(rn[0], rn[0]+rn[1]):
# i = ic + grid_size[0] // 2
gi = ic * n_cols / grid_size[0]
for jc in range(-grid_size[1]//2, grid_size[1]-grid_size[1]//2):
j = jc + grid_size[1] // 2
gj = jc * n_cols / grid_size[1]
rd = 10
dist = ((traj[:, 0] - gi) ** 2 + (traj[:, 1] - gj) ** 2) ** 0.5
idxs = np.argwhere(dist <= rd)[:, 0].tolist()
idxs = np.array([x for _,x in sorted(zip(dist[idxs], idxs))])
s = len(idxs) if len(idxs)<=k_neighbors else k_neighbors
l_Ksp_ri[:, i, j, 0:s, ] = ksl[:, idxs[:s].tolist()]
l_Loc_xy[i, j, 0:s, 0], l_Loc_xy[i, j, 0:s, 1] = traj[idxs[:s].tolist(), 0] - gi, traj[idxs[:s].tolist(), 1] - gj
i += 1
# lock.acquire()
s = rn[0] + grid_size[0] // 2
t = rn[0] + rn[1] + grid_size[0] // 2
# print(s, ' ', t)
# saveArrayToMat(l_Ksp_ri, 'ksp_{0}_{1}'.format(s, t))
# saveArrayToMat(l_Loc_xy, 'loc_{0}_{1}'.format(s, t))
Ksp_ri[:, :, :, s:t, :, :] = np.float32(np.flip(np.moveaxis(l_Ksp_ri, [3, 4], [2, 1]), axis=4))
# print(l_Ksp_ri.shape)
Loc_xy[:, s:t, :, :] = np.float32(np.flip(np.moveaxis(l_Loc_xy, [2], [0]), axis=2))
# print(l_Loc_xy.shape)
# lock.release()
# i = ic + grid_size[0] // 2
# time.sleep(10)
# gi = ic * n_cols / grid_size[0]
# for jc in range(-grid_size[1] // 2, grid_size[1] - grid_size[1] // 2):
# j = jc + grid_size[1] // 2
# gj = jc * n_cols / grid_size[1]
#
# rd = 10
# dist = ((traj[:, 0] - gi) ** 2 + (traj[:, 1] - gj) ** 2) ** 0.5
# idxs = np.argwhere(dist <= rd)[:, 0].tolist()
# idxs = np.array([x for _, x in sorted(zip(dist[idxs], idxs))])
#
# s = len(idxs) if len(idxs) <= k_neighbors else k_neighbors
# Ksp_ri[:, j, 0:s, ] = ksl[:, idxs[:s].tolist()]
# Loc_xy[j, 0:s, 0], Loc_xy[j, 0:s, 1] = traj[idxs[:s].tolist(), 0] - gi, traj[idxs[:s].tolist(), 1] - gj
# print('Finish filling row')
# return Ksp_ri, Loc_xy
n_process = 16 ## could be only in {2, 4, 8, 13, 16, 26, ...etc} ## for input of size 416; the 16 cores will make everything easier, since is is dividable to 16
Ksp_ri = shared_array([params.batch_size, params.n_channels, params.k_neighbors, params.img_size[0], params.img_size[1], 2])
Loc_xy = shared_array([params.k_neighbors, params.img_size[0], params.img_size[0], 2])
pool = multiprocessing.Pool(processes=n_process)
# lock = multiprocessing.Lock()
def get_grid_neighbors_mp(kspace_lines, grid_size=(416, 416), k_neighbors=50, trajectory=None, gradient_delays_method='RING', neighbor_mat=True):
'''
Multi-processing version with a batch dimension
'''
n_batch = kspace_lines.shape[0]
n_cols = kspace_lines.shape[1]
n_views = kspace_lines.shape[2]
n_cha = kspace_lines.shape[3]
if trajectory is None:
trajectory = get_radial_trajectory(kspace_lines, gradient_delays_method=gradient_delays_method)
# if neighbor_mat:
# ngrid = np.zeros((grid_size[0], grid_size[0], k_neighbors, n_cha, 2))
traj = np.reshape(trajectory[0,], [n_cols * n_views, 2], order='F')
# traj = traj.real
ksl = np.reshape(kspace_lines, [n_batch, n_cols * n_views, n_cha, 2], order='F')
# # Ksp_ri = np.zeros((grid_size[0], grid_size[1], k_neighbors, n_cha, 2))
# # Loc_xy = 1e3 * np.ones((grid_size[0], grid_size[1], k_neighbors, 2))
# print('In Grid Function')
# st = time.time()
rn = [(r, grid_size[0]//n_process) for r in range(-grid_size[0]//2, grid_size[0]-grid_size[0]//2, grid_size[0]//n_process)]
fill_row_p = partial(fill_row, ksl=ksl, traj=traj, grid_size=grid_size, k_neighbors=k_neighbors, n_batch=n_batch, n_cols=n_cols, n_cha=n_cha)
pool.map(fill_row_p, rn)
# # st = time.time()
# # ss = pool.map(fill_row_p, rn)
# # print(time.time()-st)
# # st = time.time()
# Ksp_ri, Loc_xy = zip(*pool.map(fill_row_p, rn))
# # print(time.time() - st)
# Ksp_ri = np.moveaxis(np.asarray(Ksp_ri), [1, 5, 4, 0], [0, 1, 2, 4])
# Ksp_ri = np.reshape(Ksp_ri, [n_batch, n_cha, k_neighbors, grid_size[0], grid_size[1], 2])
# Ksp_ri = np.flip(Ksp_ri, axis=4)
#
# Loc_xy = np.flip(np.moveaxis(
# np.asarray(Loc_xy), [3], [0]).reshape([k_neighbors, grid_size[0], grid_size[1], 2]),
# axis=3)
# # for ic in range(-grid_size[0]//2, grid_size[0]-grid_size[0]//2):
# print('Finished Grid Function')
return ngrid if neighbor_mat else Ksp_ri, Loc_xy
def get_radial_undersampled_image(img, n_views, rot_angle=0, grid_method='BART'):
traj, angles = create_radial_trajectory2(2*img.shape[0], n_views, rot_angle)
im = np.zeros((2*img.shape[0], 2*img.shape[1]))
im[img.shape[0]//2:-img.shape[0]//2, img.shape[0]//2:-img.shape[0]//2] = img
img = im.astype(np.float32).astype(np.complex64)
DC = True
if grid_method == 'BART':
trajectory = np.moveaxis(traj, 2, 0)
trajectory = np.concatenate((trajectory, np.zeros((1, img.shape[0], n_views))), 0)
ksl = bart(1, 'nufft -d{0}:{1}:1 -t'.format(img.shape[0], img.shape[1]), trajectory, img)
if DC:
d = np.abs(np.linspace(-1, 1, img.shape[0]) + 1/img.shape[0])
d = np.repeat(np.expand_dims(np.expand_dims(d, 0), 2), ksl.shape[2], 2)
# for ii in range(0, ksl.shape[2]):
# d[:, :, ii] = d[:, :, ii] * np.exp(1j*angles[ii])
# w = abs(d.real + 1j * d.imag) / max(abs(d.real + 1j * d.imag))
ksl = ksl * d # * w
output = bart(1, 'nufft -d{0}:{1}:1 -i -t'.format(img.shape[0], img.shape[1]), trajectory, ksl)
output = output[img.shape[0]//4:-img.shape[0]//4, img.shape[0]//4:-img.shape[0]//4]
elif grid_method == 'pyNUFFT':
traj = np.reshape(traj, [img.shape[0] * n_views, 2], order='F')
NUFFTobj.plan(om=traj, Nd=(img.shape[0], img.shape[1]), Kd=(2*img.shape[0], 2*img.shape[1]), Jd=(6, 6))
ksll = NUFFTobj.forward(img)
output = NUFFTobj.solve(ksll, 'dc', maxiter=30)
return output
def grid_kspace(kspace_lines, trajectory=None, gridding_method='grid_kernels', gradient_delays_method='RING', k_neighbors=10):
n_cols = kspace_lines.shape[0]
n_views = kspace_lines.shape[1]
n_cha = kspace_lines.shape[2]
if trajectory is None:
trajectory = get_radial_trajectory(kspace_lines, gradient_delays_method=gradient_delays_method)
if gridding_method == 'grid_kernels':
# return get_grid_neighbors(kspace_lines, grid_size=(416, 416), k_neighbors=k_neighbors, trajectory=trajectory, neighbor_mat=False)
return kspace_lines, trajectory
if gridding_method == 'neighbours_matrix':
ngrid = get_grid_neighbors(kspace_lines, grid_size=(416, 416), k_neighbors=k_neighbors, trajectory=trajectory, neighbor_mat=True)
output = np.flip(ngrid, 1)
return output, [], []
if 'pyNUFFT' == gridding_method:
trajectory = np.reshape(trajectory, [n_cols * n_views, 2], order='F')
kspace_lines = np.reshape(kspace_lines, [n_cols * n_views, n_cha, 2], order='F')
trajectory = trajectory.real * np.pi / (n_cols // 2)
NUFFTobj.plan(om=trajectory, Nd=(n_cols//2, n_cols//2), Kd=(n_cols, n_cols), Jd=(6, 6), batch=n_cha)
kspace_lines = kspace_lines[:, :, 0] + 1j*kspace_lines[:, :, 1]
# st = time.time()
output = NUFFTobj.solve(kspace_lines, solver='cg', maxiter=10)
# print(time.time()-st)
output = np.fft.fftn(output, axes=(0, 1))
## output = NUFFTobj.y2k(kspace_lines)
elif 'BART' == gridding_method:
ckl = np.expand_dims(kspace_lines[:, :, :, 0], 0) + 1j * np.expand_dims(kspace_lines[:, :, :, 1], 0)
ckl = ckl.astype(np.complex64)
trajectory = np.moveaxis(trajectory, [2], [0])
trajectory = np.concatenate((trajectory, np.zeros((1, n_cols, n_views))), 0)
# st = time.time()
output = bart(1, 'nufft -d{0}:{1}:1 -i'.format(n_cols, n_cols), trajectory, ckl)
# print(time.time()-st)
output = np.fft.fftn(np.squeeze(output,2)[n_cols//4:-n_cols//4, n_cols//4:-n_cols//4, :], axes=(0, 1))
else:
trajectory = np.reshape(trajectory, [n_cols * n_views, 2], order='F')
kspace_lines =
|
np.reshape(kspace_lines, [n_cols * n_views, n_cha, 2], order='F')
|
numpy.reshape
|
# pylint: disable=R0902,R0904,R0914
from math import sin, cos, radians, atan2, sqrt, degrees
from itertools import count
from typing import Tuple # , TYPE_CHECKING
import numpy as np
from numpy import array, zeros
from scipy.sparse import coo_matrix # type: ignore
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.field_writer_double import print_card_double
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, string, string_or_blank,
parse_components, interpret_value, integer_double_string_or_blank)
class DTI(BaseCard):
"""
+-----+-------+-----+------+-------+--------+------+-------------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=====+=======+=====+======+=======+========+======+=============+
| DTI | UNITS | "1" | MASS | FORCE | LENGTH | TIME | STRESS |
+-----+-------+-----+------+-------+--------+------+-------------+
MSC
+-----+-------+-----+------+-------+--------+------+-------------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=====+=======+=====+======+=======+========+======+=============+
| DTI | UNITS | "1" | MASS | FORCE | LENGTH | TIME | TEMPERATURE |
+-----+-------+-----+------+-------+--------+------+-------------+
NX
"""
type = 'DTI'
#_properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type', 'tin_dtype', 'tout_dtype']
@classmethod
def _init_from_empty(cls):
name = 'name'
fields = []
return DTI(name, fields, comment='')
def _finalize_hdf5(self, encoding):
"""hdf5 helper function"""
keys, values = self.fields
# nan != nan
values = [value if value == value else None for value in values]
values_str = [value.decode(encoding) if isinstance(value, bytes) else value
for value in values]
#values = [valuei.decode(encoding) if isinstance(valuei, bytes) else (
# None if np.isnan(valuei) else valuei)
# for valuei in values]
self.fields = {key : value for key, value in zip(keys, values_str)}
@classmethod
def export_to_hdf5(cls, h5_file, model, encoding):
"""exports the elements in a vectorized way"""
from pyNastran.bdf.bdf_interface.hdf5_exporter import _export_list
for name, dti in sorted(model.dti.items()):
if name == 'UNITS':
i = 0
for key, value in sorted(dti.fields.items()):
#print(key, value)
h5_group = h5_file.create_group(str(key))
if value is None:
h5_group.create_dataset(str(i), data=np.nan)
else:
h5_group.create_dataset(str(i), data=value)
i += 1
#fields = {
#'mass' : mass,
#'force' : force,
#'length' : length,
#'time' : time,
#'temp_stress' : temp_stress
#}
else:
for irecord, fields in sorted(dti.fields.items()):
#h5_group = h5_file.create_group(str(irecord))
attr = 'irecord=%s' % irecord
namei = str(irecord)
values = fields
_export_list(h5_file, attr, namei, values, encoding)
#print(h5_group)
#print(irecord, fields)
def __init__(self, name, fields, comment=''):
"""
Creates a DTI card
Parameters
----------
name : str
UNITS
fields : List[varies]
the fields
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.name = name
self.fields = fields
assert len(fields) > 0, fields
@classmethod
def add_card(cls, card, comment):
"""
Adds a DTI card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
name = string(card, 1, 'name')
if name == 'UNITS':
integer(card, 2, '1')
mass = string(card, 3, 'mass')
force = string(card, 4, 'force')
length = string(card, 5, 'length')
time = string(card, 6, 'time')
temp_stress = string_or_blank(card, 7, 'stress/temperature')
fields = {
'mass' : mass,
'force' : force,
'length' : length,
'time' : time,
'temp_stress' : temp_stress
}
else:
fields = []
#field2 = card[2]
list_fields = []
irecord = integer(card, 2, 'record')
if irecord == 0:
for i in range(3, len(card)):
val = integer_double_string_or_blank(
card, i, 'T%i' % (i-1), default=32767)
list_fields.append(val)
else:
for i in range(3, len(card)):
val = integer_double_string_or_blank(
card, i, 'T%i' % (i-1), default=None)
list_fields.append(val)
fields = {irecord: list_fields,}
return DTI(name, fields, comment=comment)
def raw_fields(self):
if self.name == 'UNITS':
mass = self.fields['mass']
force = self.fields['force']
length = self.fields['length']
time = self.fields['time']
temp_stress = self.fields['temp_stress']
list_fields = ['DTI', self.name, '1', mass, force, length, time, temp_stress]
else:
list_fields = []
for irecord, fields in sorted(self.fields.items()):
nfields = len(fields)
list_fields += ['DTI', self.name] + fields
nleftover = nfields % 8
if nleftover:
list_fields += [None] * nleftover
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
if self.name == 'UNITS':
card = self.repr_fields()
return self.comment + print_card_8(card)
msg = self.comment
for irecord, fields in sorted(self.fields.items()):
list_fields = ['DTI', self.name, irecord, ] + fields
msg += print_card_8(list_fields)
return msg
class NastranMatrix(BaseCard):
"""
Base class for the DMIG, DMIJ, DMIJI, DMIK matrices
"""
def _finalize_hdf5(self, encoding):
"""hdf5 helper function"""
self.finalize()
def __init__(self, name, matrix_form, tin, tout, polar, ncols,
GCj, GCi, Real, Complex=None, comment='', finalize=True):
"""
Creates a NastranMatrix
Parameters
----------
name : str
the name of the matrix
matrix_form : int
matrix shape
4=Lower Triangular
5=Upper Triangular
6=Symmetric
8=Identity (m=nRows, n=m)
tin : int
matrix input precision
1=Real, Single Precision
2=Real, Double Precision
3=Complex, Single Precision
4=Complex, Double Precision
tout : int
matrix output precision
0=same as tin
1=Real, Single Precision
2=Real, Double Precision
3=Complex, Single Precision
4=Complex, Double Precision
polar : int; default=0
Input format of Ai, Bi
Integer=blank or 0 indicates real, imaginary format
Integer > 0 indicates amplitude, phase format
ncols : int
???
GCj : List[(node, dof)]
the jnode, jDOFs
GCi : List[(node, dof)]
the inode, iDOFs
Real : List[float]
The real values
Complex : List[float]; default=None
The complex values (if the matrix is complex)
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
if Complex is None:
Complex = []
if tout is None:
tout = 0
polar = _set_polar(polar)
if matrix_form not in [1, 2, 4, 5, 6, 8, 9]:
msg = (
'matrix_form=%r must be [1, 2, 4, 5, 6, 8, 9]\n'
' 1: Square\n'
' 2: Rectangular\n'
#' 4: Lower Triangular\n'
#' 5: Upper Triangular\n'
' 6: Symmetric\n'
#' 8: Identity (m=nRows, n=m)\n'
' 9: Rectangular\n' % matrix_form)
raise ValueError(msg)
self.name = name
#: 4-Lower Triangular; 5=Upper Triangular; 6=Symmetric; 8=Identity (m=nRows, n=m)
self.matrix_form = matrix_form
#: 1-Real, Single Precision; 2=Real,Double Precision;
# 3=Complex, Single; 4=Complex, Double
self.tin = tin
#: 0-Set by cell precision
self.tout = tout
#: Input format of Ai, Bi. (Integer=blank or 0 indicates real, imaginary format;
#: Integer > 0 indicates amplitude, phase format.)
self.polar = polar
self.ncols = ncols
self.GCj = GCj
self.GCi = GCi
self.Real = Real
if len(Complex) or self.is_complex:
self.Complex = Complex
assert self.tin in [3, 4], 'tin=%r and must 3 or 4 to be complex' % self.tin
assert self.tout in [0, 3, 4], 'tin=%r and must 0, 3 or 4 to be complex' % self.tout
assert isinstance(matrix_form, integer_types), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))
assert not isinstance(matrix_form, bool), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))
if finalize:
self.finalize()
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a NastranMatrix (DMIG, DMIJ, DMIK, DMIJI) card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
name = string(card, 1, 'name')
#zero
matrix_form = integer(card, 3, 'ifo')
tin = integer(card, 4, 'tin')
tout = integer_or_blank(card, 5, 'tout', 0)
polar = integer_or_blank(card, 6, 'polar', 0)
if matrix_form == 1: # square
ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)
elif matrix_form == 6: # symmetric
ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)
elif matrix_form in [2, 9]: # rectangular
ncols = integer(card, 8, 'matrix_form=%s; ncol' % (matrix_form))
else:
# technically right, but nulling this will fix bad decks
#self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)
msg = (
'%s name=%r matrix_form=%r is not supported. Valid forms:\n'
' 4=Lower Triangular\n'
' 5=Upper Triangular\n'
' 6=Symmetric\n'
' 8=Identity (m=nRows, n=m)\n' % (cls.type, name, matrix_form)
)
raise NotImplementedError(msg)
GCj = []
GCi = []
Real = []
Complex = []
return cls(name, matrix_form, tin, tout, polar, ncols,
GCj, GCi, Real, Complex, comment=comment, finalize=False)
@property
def matrix_type(self):
"""gets the matrix type"""
if not isinstance(self.matrix_form, integer_types):
msg = 'ifo must be an integer; matrix_form=%r type=%s name=%s' % (
self.matrix_form, type(self.matrix_form), self.name)
raise TypeError(msg)
if isinstance(self.matrix_form, bool):
msg = 'matrix_form must not be a boolean; matrix_form=%r type=%s name=%s' % (
self.matrix_form, type(self.matrix_form), self.name)
raise TypeError(msg)
if self.matrix_form == 1:
matrix_type = 'square'
elif self.matrix_form == 6:
matrix_type = 'symmetric'
elif self.matrix_form in [2, 9]:
matrix_type = 'rectangular'
else:
# technically right, but nulling this will fix bad decks
#self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)
raise NotImplementedError('%s matrix_form=%r is not supported' % (
self.type, self.matrix_form))
return matrix_type
def finalize(self):
"""converts the lists into numpy arrays"""
self.GCi =
|
np.asarray(self.GCi)
|
numpy.asarray
|
__all__ = ['temporaldev_itemvalues_freq','read_config_temporaldev','write_config_temporaldev','temporaldev_result_toxlsx']
from .BiblioSpecificGlobals import DIC_OUTDIR_DESCRIPTION
def temporaldev_itemvalues_freq(keyword_filters ,items, years, corpuses_folder):
'''
Make a list of named tuple [('item','keyword','data_frame','freq','year','mode'),...]
with:
'item' in the list "items". Ex: items=['IK','AK',TK]
'keyword' in the list keyword_filters.values where:
keyword_filters is a dict {mode:[keyword1,keyword2,...],...} with mode='is_in'or 'is_equal'
'data_frame' the dataframe of the frequency occurrences of keywords
containing (or equal) to 'keyword')
'''
# Standard library imports
import re
from collections import namedtuple
from pathlib import Path
# 3rd party imports
import pandas as pd
# Local imports
import BiblioAnalysis_Utils as bau
DIC_OUTDIR_DESCRIPTION = bau.DIC_OUTDIR_DESCRIPTION
def _filter(df,keyword_filter,mode):
'''Builds the dataframe dg out of the dataframe df by selecting
the df rows such that the "item" column :
- contains the keyword "keyword_filter" if mode="is_in"
- is equal to "keyword_filter" elsewhere
'''
if mode == 'is_in':
dg = df[df['item'].str.contains(keyword_filter)]
else:
dg = df.loc[df['item']==keyword_filter]
return dg
keyword_filter_tuple = namedtuple('keyword_filter_list',['item','keyword','data_frame','freq','year','mode'] )
keyword_filter_list = []
for file_year in years:
year = int(re.findall('\d{4}', file_year )[0])
for item in items:
file_kw = DIC_OUTDIR_DESCRIPTION[item]
file = corpuses_folder / Path(file_year) / Path('freq') / Path(file_kw)
try:
df = pd.read_csv(file)
except:
print(f"Warning no such file {file}")
pass
for mode,keyword_filter_all in keyword_filters.items():
for keyword_filter in keyword_filter_all:
df_filter = _filter(df,keyword_filter,mode)
keyword_filter_list.append(keyword_filter_tuple(item=item,
keyword=keyword_filter,
data_frame=df_filter,
freq=sum(df_filter['f']),
year=year,
mode=mode))
return keyword_filter_list
def read_config_temporaldev(file_config):
"""
Parse json file to build the keywords configuration for temporal analysis of corpuses
Args:
file_config (Path): absolute path of the configuration file
Returns:
items (list of strings): selected items for the analysis among ['IK', 'AK', 'TK', 'S', 'S2']
keywords (dict): {"is_in": list of selected strings,
"is_equal": list of keywords}
"""
# Standard library imports
import json
from collections import defaultdict
keywords = defaultdict(list)
with open(file_config, "r") as read_file:
config_temporal = json.load(read_file)
items = config_temporal ["items"]
for key, value in config_temporal.items():
keywords[key] = value
return items,keywords
def write_config_temporaldev(file_config,items,keywords):
"""
Store the keywords configuration for temporal analysis of corpuses in ajson file
Args:
file_config (path): absolute path of the configuration file
keyword_filters (dict): { "is_in": list of selected strings,
"is_equal": list of keywords}
items (list of strings): selected items for the analysis among ['IK', 'AK', 'TK', 'S', 'S2']
"""
# Standard library imports
import json
config_temporal = {}
config_temporal ["items"] = items
for key, value in keywords.items():
config_temporal [key] = value
with open(file_config,'w') as write_file:
config_temporal = json.dump(config_temporal,write_file,indent = 4)
def temporaldev_result_toxlsx(keyword_filter_list,store_file):
# Standard library imports
import numpy as np
# 3rd party imports
import pandas as pd
# Building a dict from each tupple of the keyword_filter_list list
# to append the dataframe dg with the dict
res={}
flag = True
for i in range(0,len(keyword_filter_list)):
res['Corpus year']=keyword_filter_list[i].year
res['Item'] = keyword_filter_list[i].item
res['Search word'] = keyword_filter_list[i].keyword
res['Weight'] = (round(keyword_filter_list[i].freq,3))
res['Search mode'] = keyword_filter_list[i].mode
df = keyword_filter_list[i].data_frame
res['Item-values list'] = [
|
np.array(df['item'])
|
numpy.array
|
# -*- coding: utf-8 -*-
#
import numpy
class Midpoint(object):
def __init__(self):
self.weights = numpy.array([2.0])
self.points =
|
numpy.array([0.0])
|
numpy.array
|
import pandas as pd
import numpy as np
from gym import spaces, Env
from typing import List, Optional, Tuple, Dict
from ReservoirClass import *
# TODO: make the bid-asks more modular
# TODO: make the scale coherent and modular
# TODO: implement cost function
# TODO: calculate some statistics along
# TODO: scale the observations
pd.options.mode.chained_assignment = None # default='warn'
### GLOBAL VARIABLES ###
MAX_ACCOUNT_BALANCE = 100000
MAX_VOLUME = 500
MAX_PRICE = 80000
VOL_SCALE = 50
BID_ASK_SPREAD_RANGE = (0.0065, 0.012)
INITIAL_FORTUNE = 10000
MAX_STEPS = 1000
MAX_NET_WORTH = 30000
MAX_HOLDING = 50
# fix the seed.
np.random.seed(0)
class MultiAssetTradingEnv(Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
_scaling_functions = {
'tanh': np.tanh,
'sigmoid': lambda x: 1/(1+np.exp(-x)),
'softsign': lambda x: x/(1 + np.abs(x)),
'arctan': np.arctan,
'identity': lambda x: x,
}
_reservoir_types = ['rnn','lstm','gru','esn_infos','esn_states']
def __init__(self,
assets: List[pd.DataFrame],
delta = np.float64,
window_size: int = 25,
max_steps: int = 1000,
initial_fortune: float = 10000,
bid_ask_spread_range: Tuple[float, float] = BID_ASK_SPREAD_RANGE,
transaction_cost = 0.001,
):
super(MultiAssetTradingEnv, self).__init__()
assert (delta >= 0 and delta <= 1), 'Impossible to construct utility'
self.number_of_assets = len(assets)
self.delta = delta
self.initial_fortune = initial_fortune
self.window_size = window_size
self.max_steps = max_steps
self.data = assets
self._active_data = list()
self.bid_ask_spread_min = bid_ask_spread_range[0]
self.bid_ask_spread_max = bid_ask_spread_range[1]
self.transaction_cost = transaction_cost
self.action_space = spaces.Box(low= -np.inf*np.ones(self.number_of_assets), high=np.inf*np.ones(self.number_of_assets))
self.obs_size = 4*self.number_of_assets + 3 + (self.window_size-1)*6*self.number_of_assets
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(self.obs_size,), dtype=np.float64)
self.scale_price,self.scale_volume = self.get_scale_values()
def get_scale_values(self):
scale_price = 0
scale_volume = 0
for asset in self.data:
max_price = np.max(asset['High'])
max_volume = np.max(asset['Volume'])
scale_price = max(scale_price,max_price)
scale_volume = max(scale_volume,max_volume)
return scale_price,scale_volume
def scale_reward(self,x):
return np.tanh(x)
def utility(self,x):
if (self.delta ==0):
if x < 0:
return -self.initial_fortune
else:
return np.log(x)
else:
if x < 0:
return -self.initial_fortune
else:
return (np.power(x,self.delta) - 1)/self.delta
def _make_trading_period(self) -> None:
"""
Sets the currently active trading episode of the environment
"""
start_episode = np.random.randint(1 + self.window_size, len(self.data[0]) - self._steps_left)
self._active_data = list()
for asset in self.data:
#data = asset[start_episode - self.window_size:start_episode + self._steps_left].reset_index(drop=True)
#data = data.reset_index(drop=True)
dr = asset.loc[start_episode - self.window_size:start_episode + self._steps_left -1].reset_index(drop=True)
self._active_data.append(dr)
def reset(self):
"""
Reset the environment, and return the next observation.
:return: next_observation
"""
self._current_step = self.window_size
self._steps_left = self.max_steps
self._make_trading_period()
self.balance = self.initial_fortune
self.portfolio = np.zeros(self.number_of_assets)
self.net_worth = self.initial_fortune
self.max_net_worth = self.initial_fortune
self.fees = 0
self.std_portfolio = []
return self.next_observation()
def next_observation(self):
"""
Use the active dataset active_df to compile the state space fed to the agent.
Scale the data to fit into the range of observation_space.
Differentiation of data has to be implemented.
:return: obs: observation given to the agent. Modify observation_space accordingly.
"""
#Calculate open prices.
open_prices_t = []
for asset in self._active_data:
open_prices_t.append(asset['Open'][self._current_step])
open_prices_t = np.array(open_prices_t)/self.scale_price
# TODO: create window_data with a reservoir_data function
return_scaled = []
mean_price = []
mean_volume = []
window_data = list()
for asset_data in self._active_data:
window_asset_data = asset_data[self._current_step - self.window_size:self._current_step-1]
window_asset_data.Open /= self.scale_price
window_asset_data.Close /= self.scale_price
window_asset_data.High /= self.scale_price
window_asset_data.Low /= self.scale_price
window_asset_data.Volume /= self.scale_volume
return_window = asset_data['Close'][self._current_step - self.window_size:self._current_step - 1]\
- asset_data['Open'][self._current_step - self.window_size:self._current_step - 1]
difference_window = asset_data['High'][self._current_step - self.window_size:self._current_step - 1]\
- asset_data['Low'][self._current_step - self.window_size:self._current_step - 1]
dt = return_window / (difference_window + 1)
window_asset_data['Return'] = dt
mean_price.append(np.array(asset_data['Close'][self._current_step - self.window_size:self._current_step-1].values).mean())
mean_volume.append(
|
np.array(asset_data['Volume'][self._current_step - self.window_size:self._current_step-1].values).mean()
|
numpy.array
|
import logging
import numpy as np
#from ..utilities.function_tools import func_call_exception_trap
from . import factor
from .material import characteristic_material_strength, characteristic_wall_thickness
logger = logging.getLogger(__name__)
def incidental_reference_pressure(p_d, γ_inc):
"""Calculate DNVGL-ST-F101 «incidental reference pressure».
:param p_d: design pressure :math:`(p_d)`
:type p_d: float
:param γ_inc: incidental to design pressure ratio :math:`(\gamma_{inc})`
:type γ_inc: float
:returns: p_inc incidental reference pressure :math:`(p_{inc})`
:rtype: float
Reference:
DNVGL-ST-F101 (2017-12)
eq:4.3 sec:4.2.2.2 p:67 :math:`(p_{inc})`
.. doctest::
>>> incid_ref_press(100e5, 1.1)
11000000.0
"""
p_inc = p_d * γ_inc
return p_inc
def system_test_pressure(p_d, γ_inc, α_spt):
"""Calculate DNVGL-ST-F101 «system test pressure».
(system_test_press)
:param p_d: design pressure :math:`(p_d)`
:type p_d: float
:param γ_inc: incidental to design pressure ratio :math:`(\gamma_{inc})`
:type γ_inc: float
:param α_spt: system pressure test factor :math:`(\alpha_{spt})`
:type α_spt: float
:returns: p_t system test pressure :math:`(p_t)`
:rtype: float
Reference:
DNVGL-ST-F101 (2017-12)
| eq:4.3 sec:4.2.2.2 p:67 :math:`p_{inc}`
| table:5.8 sec:5.4.2.1 p:94 :math:`\alpha_{spt}`
| sec:5.2.2.1 p:84
.. doctest::
>>> incid_ref_press(100e5, 1.1)
11000000.0
"""
p_t = p_d * γ_inc * α_spt
return p_t
def local_incidental_pressure(p_d, ρ_cont, h_l, h_ref, γ_inc, g=9.80665):
'''Calculate local incidental pressure.
:param p_d: design pressure at ref elevation :math:`(p_d)`
:type p_d: float
:param ρ_cont: density of pipeline contents :math:`(\rho_{cont})`
:type ρ_cont: float
:param h_l: elevation of the local pressure point :math:`(h_l)`
:type h_l: float
:param h_ref: elevation of the reference point :math:`(h_{ref})`
:type h_ref: float
:param γ_inc: incidental to design pressure ratio :math:`(\gamma_{inc})`
:type γ_inc: float
:param g: gravitational acceleration :math:`(g)`
:type g: float
:returns: p_li local incidental pressure :math:`(p_{li})`
:rtype: float
Notes:
γ_inc=1.0 for local system test pressure :math:`(p_{lt})`.
.. math::
p_{li} = p_{inc} - \rho_{cont} \cdot g \cdot \left( h_l - h_{ref} \right)
Reference:
DNVGL-ST-F101 (2017-12)
| sec:4.2.2.2 eq:4.1 p:67 :math:`(p_{li})`
| sec:4.2.2.2 eq:4.3 p:67 :math:`(p_{inc})`
.. doctest::
>>> local_incid_press(100.e-5, 1025, -125, 30)
1558563.751
'''
p_inc = p_d * γ_inc
p_li = p_inc - ρ_cont * g * (h_l - h_ref)
return p_li
def local_test_pressure(p_t, ρ_t, h_l, h_ref, g=9.80665):
"""Calculate local test pressure.
Reference:
DNVGL-ST-F101 (2017-12)
sec:4.2.2.2 eq:4.2 p:67 $p_{lt}$
"""
_γ_inc = 1.0
p_lt = local_incidental_pressure(p_t, ρ_t, h_l, h_ref, _γ_inc, g)
return p_lt
def local_test_pressure_unity(α_spt, p_lt=None, p_li=None, p_e=None, **kwargs):
"""Local test pressure unity check.
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.2.1 eq:5.6 p:93
(local_test_press_unity)
"""
if p_e is None:
p_e = external_pressure(h_l, ρ_seawater, g)
if p_lt is None:
p_lt = local_test_pressure(p_t, ρ_t, h_l, h_ref, p_e, α_spt, g)
if p_li is None:
p_li = local_incidental_pressure(p_d, ρ_cont, h_l, h_ref, γ_inc, g)
p_lt_uty = (p_li - p_e) * α_spt / p_lt
return p_lt_uty
def external_pressure(h_l, ρ_seawater, g=9.80665):
"""Water pressure, external to pipe.
"""
p_e = np.abs(h_l) * ρ_seawater * g
return p_e
def mill_test_pressure(D, SMYS, SMTS, α_U=None, α_mpt=None,
t_min=None, t=None, t_fab=None):
"""Mill test pressure
Reference:
DNVGL-ST-F101 (2017-12)
sec:7.5.1.2 eq:7.3 p:175 $p_{mpt}$
(mill_test_press)
see also p93.
"""
k=1.15 # assuming end-cap effect applies
if t_min is None:
t_min = characteristic_wall_thickness(t, t_fab, t_corr=0.0)
p_mpt = k * (2*t_min)/(D-t_min) *
|
np.minimum(SMYS*0.96, SMTS*0.84)
|
numpy.minimum
|
# -*- coding: utf-8 -*-
import numpy as np
from .base import Layer
from ztlearn.utils import get_pad
from ztlearn.utils import unroll_inputs
from ztlearn.utils import im2col_indices
from ztlearn.utils import col2im_indices
from ztlearn.utils import get_output_dims
from ztlearn.initializers import InitializeWeights as init
from ztlearn.optimizers import OptimizationFunction as optimizer
class Conv(Layer):
def __init__(self,
filters = 32,
kernel_size = (3, 3),
activation = None,
input_shape = (1, 8, 8),
strides = (1, 1),
padding = 'valid'):
self.filters = filters
self.strides = strides
self.padding = padding
self.activation = activation
self.kernel_size = kernel_size
self.input_shape = input_shape
self.init_method = None
self.optimizer_kwargs = None
self.is_trainable = True
@property
def trainable(self):
return self.is_trainable
@trainable.setter
def trainable(self, is_trainable):
self.is_trainable = is_trainable
@property
def weight_initializer(self):
return self.init_method
@weight_initializer.setter
def weight_initializer(self, init_method):
self.init_method = init_method
@property
def weight_optimizer(self):
return self.optimizer_kwargs
@weight_optimizer.setter
def weight_optimizer(self, optimizer_kwargs = {}):
self.optimizer_kwargs = optimizer_kwargs
@property
def layer_activation(self):
return self.activation
@layer_activation.setter
def layer_activation(self, activation):
self.activation = activation
@property
def layer_parameters(self):
return sum([np.prod(param.shape) for param in [self.weights, self.bias]])
@property
def output_shape(self):
pad_height, pad_width = get_pad(self.padding,
self.input_shape[1],
self.input_shape[2],
self.strides[0],
self.strides[1],
self.kernel_size[0],
self.kernel_size[1])
output_height, output_width = get_output_dims(self.input_shape[1],
self.input_shape[2],
self.kernel_size,
self.strides,
self.padding)
return self.filters, int(output_height), int(output_width)
def prep_layer(self):
self.kernel_shape = (self.filters, self.input_shape[0], self.kernel_size[0], self.kernel_size[1])
self.weights = init(self.weight_initializer).initialize_weights(self.kernel_shape)
self.bias = np.zeros((self.kernel_shape[0], 1))
class Conv2D(Conv):
def __init__(self,
filters = 32,
kernel_size = (3, 3),
activation = None,
input_shape = (1, 8, 8),
strides = (1, 1),
padding = 'valid'):
super(Conv2D, self).__init__(filters, kernel_size, activation, input_shape, strides, padding)
def pass_forward(self, inputs, train_mode = True, **kwargs):
self.filter_num, _, _, _ = self.weights.shape
self.input_shape = inputs.shape
self.inputs = inputs
input_num, input_depth, input_height, input_width = inputs.shape
pad_height, pad_width = get_pad(self.padding,
input_height,
input_width,
self.strides[0],
self.strides[1],
self.kernel_size[0],
self.kernel_size[1])
# confirm dimensions
assert (input_height +
|
np.sum(pad_height)
|
numpy.sum
|
#!/usr/bin/env python3
# Standard lib
import unittest
# 3rd party
import numpy as np
# Our own imports
from deep_hipsc_tracking.tracking import tracking_pipeline, Link
from deep_hipsc_tracking.utils import save_point_csvfile
from ..helpers import FileSystemTestCase
# Tests
class TestLinkNearbyTracks(unittest.TestCase):
def test_links_closer_of_two_tracks_in_time(self):
tt1 = np.array([1, 3, 5, 7, 9])
xx1 = np.array([0, 1, 2, 3, 4])
yy1 = np.array([1.1, 2.1, 3.1, 4.1, 5.1])
track1 = Link.from_arrays(tt1, xx1, yy1)
tt2 = np.array([1, 3, 5, 7, 9])
xx2 = np.array([0, 1, 2, 3, 4])
yy2 = np.array([1, 2, 3, 4, 5])
track2 = Link.from_arrays(tt2, xx2, yy2)
tt3 = np.array([11, 13, 15, 17, 19])
xx3 = np.array([5, 6, 7, 8, 9])
yy3 = np.array([6, 7, 8, 9, 10])
track3 = Link.from_arrays(tt3, xx3, yy3)
res = tracking_pipeline.link_nearby_tracks(
[track1, track2, track3],
max_track_lag=2.1,
max_link_dist=2,
min_track_len=5,
max_relink_attempts=10,
)
exp = [track2, Link.join(track1, track3)]
self.assertEqual(res, exp)
def test_links_closer_of_two_tracks_in_space(self):
tt1 = np.array([1, 3, 5, 7, 9])
xx1 = np.array([0, 1, 2, 3, 4])
yy1 = np.array([1, 2, 3, 4, 5])
track1 = Link.from_arrays(tt1, xx1, yy1)
tt2 = np.array([1.5, 3.5, 5.5, 7.5, 9.5])
xx2 = np.array([0.5, 1.5, 2.5, 3.5, 4.5])
yy2 = np.array([1, 2, 3, 4, 5])
track2 = Link.from_arrays(tt2, xx2, yy2)
tt3 = np.array([11, 13, 15, 17, 19])
xx3 = np.array([5, 6, 7, 8, 9])
yy3 = np.array([6, 7, 8, 9, 10])
track3 = Link.from_arrays(tt3, xx3, yy3)
res = tracking_pipeline.link_nearby_tracks(
[track1, track2, track3],
max_track_lag=2.1,
max_link_dist=2,
min_track_len=5,
)
exp = [track1, Link.join(track2, track3)]
self.assertEqual(res, exp)
def test_links_two_close_tracks(self):
tt1 = np.array([1, 3, 5, 7, 9])
xx1 = np.array([0, 1, 2, 3, 4])
yy1 = np.array([1, 2, 3, 4, 5])
track1 = Link.from_arrays(tt1, xx1, yy1)
tt2 = np.array([11, 13, 15, 17, 19])
xx2 = np.array([5, 6, 7, 8, 9])
yy2 = np.array([6, 7, 8, 9, 10])
track2 = Link.from_arrays(tt2, xx2, yy2)
res = tracking_pipeline.link_nearby_tracks(
[track1, track2],
max_track_lag=2.1,
max_link_dist=2,
min_track_len=5,
)
exp = [Link.join(track1, track2)]
self.assertEqual(res, exp)
def test_doesnt_link_far_tracks_in_space(self):
tt1 = np.array([1, 3, 5, 7, 9])
xx1 = np.array([0, 1, 2, 3, 4])
yy1 = np.array([1, 2, 3, 4, 5])
track1 = Link.from_arrays(tt1, xx1, yy1)
tt2 = np.array([11, 13, 15, 17, 19])
xx2 = np.array([5, 6, 7, 8, 9])
yy2 = np.array([6, 7, 8, 9, 10])
track2 = Link.from_arrays(tt2, xx2, yy2)
res = tracking_pipeline.link_nearby_tracks(
[track1, track2],
max_track_lag=2.1,
max_link_dist=1,
min_track_len=5,
)
exp = [track1, track2]
self.assertEqual(res, exp)
def test_doesnt_link_far_tracks_in_time(self):
tt1 = np.array([1, 3, 5, 7, 9])
xx1 = np.array([0, 1, 2, 3, 4])
yy1 = np.array([1, 2, 3, 4, 5])
track1 = Link.from_arrays(tt1, xx1, yy1)
tt2 = np.array([11, 13, 15, 17, 19])
xx2 = np.array([5, 6, 7, 8, 9])
yy2 = np.array([6, 7, 8, 9, 10])
track2 = Link.from_arrays(tt2, xx2, yy2)
res = tracking_pipeline.link_nearby_tracks(
[track1, track2],
max_track_lag=1,
max_link_dist=2,
min_track_len=5,
)
exp = [track1, track2]
self.assertEqual(res, exp)
def test_doesnt_link_temporally_overlaping_tracks(self):
tt1 = np.array([1, 3, 5, 7, 9])
xx1 = np.array([0, 1, 2, 3, 4])
yy1 = np.array([1, 2, 3, 4, 5])
track1 = Link.from_arrays(tt1, xx1, yy1)
tt2 = np.array([9, 11, 13, 15, 17])
xx2 = np.array([5, 6, 7, 8, 9])
yy2 = np.array([6, 7, 8, 9, 10])
track2 = Link.from_arrays(tt2, xx2, yy2)
res = tracking_pipeline.link_nearby_tracks(
[track1, track2],
max_track_lag=1,
max_link_dist=2,
min_track_len=5,
)
exp = [track1, track2]
self.assertEqual(res, exp)
class TestFilterByLinkDist(unittest.TestCase):
def test_filters_simple_tracks_no_motion(self):
tracks = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
]
res = tracking_pipeline.filter_by_link_dist(tracks, max_link_dist=1)
exp = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
]
self.assertEqual(len(res), len(exp))
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r[2], e[2])
def test_filters_simple_tracks_some_motion(self):
tracks = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1.1, 2], [2.1, 3], [3.1, 4]])),
(None, None, np.array([[1.2, 2], [2.2, 3], [3.2, 4]])),
]
res = tracking_pipeline.filter_by_link_dist(tracks, max_link_dist=1)
exp = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1.1, 2], [2.1, 3], [3.1, 4]])),
(None, None, np.array([[1.2, 2], [2.2, 3], [3.2, 4]])),
]
self.assertEqual(len(res), len(exp))
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r[2], e[2])
def test_filters_simple_tracks_too_much_motion(self):
tracks = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[2.1, 2], [3.1, 3], [3.1, 4]])),
(None, None, np.array([[3.2, 2], [4.2, 3], [3.2, 4]])),
]
res = tracking_pipeline.filter_by_link_dist(tracks, max_link_dist=1)
exp = [
(None, None, np.array([[3, 4]])),
(None, None, np.array([[3.1, 4]])),
(None, None, np.array([[3.2, 4]])),
]
self.assertEqual(len(res), len(exp))
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r[2], e[2])
def test_filters_with_empty_detections_in_middle(self):
tracks = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.zeros((0, 2))),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
]
res = tracking_pipeline.filter_by_link_dist(tracks, max_link_dist=1)
exp = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.zeros((0, 2))),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
]
self.assertEqual(len(res), len(exp))
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r[2], e[2])
def test_filters_with_empty_detections_at_start(self):
tracks = [
(None, None, np.zeros((0, 2))),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
]
res = tracking_pipeline.filter_by_link_dist(tracks, max_link_dist=1)
exp = [
(None, None, np.zeros((0, 2))),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
]
self.assertEqual(len(res), len(exp))
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r[2], e[2])
def test_filters_with_empty_detections_at_end(self):
tracks = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.zeros((0, 2))),
]
res = tracking_pipeline.filter_by_link_dist(tracks, max_link_dist=1)
exp = [
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.array([[1, 2], [2, 3], [3, 4]])),
(None, None, np.zeros((0, 2))),
]
self.assertEqual(len(res), len(exp))
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r[2], e[2])
class TestLoadTrack(FileSystemTestCase):
def test_loads_csv_file_no_image_no_mask(self):
csvfile = self.tempdir / 'test.csv'
cx = np.array([0.0, 0.0, 0.5, 1.0])
cy = np.array([0.0, 0.1, 0.4, 0.9])
cv = np.array([0.09, 0.1, 0.3, 0.8])
save_point_csvfile(csvfile, cx, cy, cv)
exp_img = np.zeros((1000, 1000))
exp_x =
|
np.array([0, 500, 1000])
|
numpy.array
|
from lorenz import *
import numpy as np
from pytest import approx
from scipy.integrate import odeint
def test_lorenz96_constant():
"""
For the case of a horizontal distribution,
so that the slope should be a zero over
time.
"""
obs_i = np.array([8., 8., 8., 8., 8.])
exp_i = lorenz96(np.array([0, 0, 0, 0, 0]), np.arange(0, 4, 1))
assert (obs_i == exp_i).all()
return
def test_lorenz96_linear_distribution():
"""
For the case of a linear input.
"""
obs_i = np.array([0., 7., 9., 11., -2.])
exp_i = lorenz96(np.array([0, 1, 2, 3, 4]), np.arange(0, 4, 0.1))
assert (obs_i == exp_i).all()
return
def test_generate_L96_zero():
"""
For a time that contains one unit and
describes a data set of zero, perturbation
of zero, four variables, and a forcing
constant of zero
"""
obs_i = np.array([
[0., 0., 0., 0.],
[0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]
])
exp_i = generate_L96(np.zeros(4), 0, 4, 0)
assert (obs_i == exp_i).all()
return
def test_generate_L96_constant():
"""
For a time that contains multiple units
and describes a data set of constants
with a perturbation of 0, five variables
and a forcing constant of 8
"""
obs_i =
|
np.array([
[8., 8., 8., 8., 8.],
[8., 8., 8., 8., 8.], [8., 8., 8., 8., 8.],
[8., 8., 8., 8., 8.], [8., 8., 8., 8., 8.]
])
|
numpy.array
|
import math
import numpy as np
import pytest
import torch
from deepocr.transforms import (ChannelShuffle, ColorInversion, GaussianNoise, RandomCrop, RandomHorizontalFlip,
RandomRotate, Resize)
from deepocr.transforms.functional import crop_detection, rotate_sample
def test_resize():
output_size = (32, 32)
transfo = Resize(output_size)
input_t = torch.ones((3, 64, 64), dtype=torch.float32)
out = transfo(input_t)
assert torch.all(out == 1)
assert out.shape[-2:] == output_size
assert repr(transfo) == f"Resize(output_size={output_size}, interpolation='bilinear')"
transfo = Resize(output_size, preserve_aspect_ratio=True)
input_t = torch.ones((3, 32, 64), dtype=torch.float32)
out = transfo(input_t)
assert out.shape[-2:] == output_size
assert not torch.all(out == 1)
# Asymetric padding
assert torch.all(out[:, -1] == 0) and torch.all(out[:, 0] == 1)
# Symetric padding
transfo = Resize(output_size, preserve_aspect_ratio=True, symmetric_pad=True)
assert repr(transfo) == (f"Resize(output_size={output_size}, interpolation='bilinear', "
f"preserve_aspect_ratio=True, symmetric_pad=True)")
out = transfo(input_t)
assert out.shape[-2:] == output_size
# symetric padding
assert torch.all(out[:, -1] == 0) and torch.all(out[:, 0] == 0)
# Inverse aspect ratio
input_t = torch.ones((3, 64, 32), dtype=torch.float32)
out = transfo(input_t)
assert not torch.all(out == 1)
assert out.shape[-2:] == output_size
# Same aspect ratio
output_size = (32, 128)
transfo = Resize(output_size, preserve_aspect_ratio=True)
out = transfo(torch.ones((3, 16, 64), dtype=torch.float32))
assert out.shape[-2:] == output_size
# FP16
input_t = torch.ones((3, 64, 64), dtype=torch.float16)
out = transfo(input_t)
assert out.dtype == torch.float16
@pytest.mark.parametrize(
"rgb_min",
[
0.2,
0.4,
0.6,
],
)
def test_invert_colorize(rgb_min):
transfo = ColorInversion(min_val=rgb_min)
input_t = torch.ones((8, 3, 32, 32), dtype=torch.float32)
out = transfo(input_t)
assert torch.all(out <= 1 - rgb_min + 1e-4)
assert torch.all(out >= 0)
input_t = torch.full((8, 3, 32, 32), 255, dtype=torch.uint8)
out = transfo(input_t)
assert torch.all(out <= int(math.ceil(255 * (1 - rgb_min + 1e-4))))
assert torch.all(out >= 0)
# FP16
input_t = torch.ones((8, 3, 32, 32), dtype=torch.float16)
out = transfo(input_t)
assert out.dtype == torch.float16
def test_rotate_sample():
img = torch.ones((3, 200, 100), dtype=torch.float32)
boxes = np.array([0, 0, 100, 200])[None, ...]
polys = np.stack((boxes[..., [0, 1]], boxes[..., [2, 1]], boxes[..., [2, 3]], boxes[..., [0, 3]]), axis=1)
rel_boxes = np.array([0, 0, 1, 1], dtype=np.float32)[None, ...]
rel_polys = np.stack(
(rel_boxes[..., [0, 1]], rel_boxes[..., [2, 1]], rel_boxes[..., [2, 3]], rel_boxes[..., [0, 3]]),
axis=1
)
# No angle
rotated_img, rotated_geoms = rotate_sample(img, boxes, 0, False)
assert torch.all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
rotated_img, rotated_geoms = rotate_sample(img, boxes, 0, True)
assert torch.all(rotated_img == img) and
|
np.all(rotated_geoms == rel_polys)
|
numpy.all
|
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import unittest
import numpy as np
import chi
class TestCovariateModel(unittest.TestCase):
"""
Tests the chi.CovariateModel class.
"""
@classmethod
def setUpClass(cls):
cls.cov_model = chi.CovariateModel()
def test_check_compatibility(self):
pop_model = 'some model'
with self.assertRaisesRegex(NotImplementedError, ''):
self.cov_model.check_compatibility(pop_model)
def test_compute_individual_parameters(self):
parameters = 'some parameters'
eta = 'some fluctuations'
covariates = 'some covariates'
with self.assertRaisesRegex(NotImplementedError, ''):
self.cov_model.compute_individual_parameters(
parameters, eta, covariates
)
def test_compute_individual_sensitivities(self):
parameters = 'some parameters'
eta = 'some fluctuations'
covariates = 'some covariates'
with self.assertRaisesRegex(NotImplementedError, ''):
self.cov_model.compute_individual_sensitivities(
parameters, eta, covariates
)
def test_compute_population_parameters(self):
parameters = 'some parameters'
with self.assertRaisesRegex(NotImplementedError, ''):
self.cov_model.compute_population_parameters(
parameters)
def test_compute_population_sensitivities(self):
parameters = 'some parameters'
with self.assertRaisesRegex(NotImplementedError, ''):
self.cov_model.compute_population_sensitivities(
parameters)
def test_get_covariate_names(self):
names = self.cov_model.get_covariate_names()
self.assertIsNone(names)
def test_get_parameter_names(self):
names = self.cov_model.get_parameter_names()
self.assertIsNone(names)
def test_n_covariates(self):
n = self.cov_model.n_covariates()
self.assertIsNone(n)
def test_n_parameters(self):
n = self.cov_model.n_parameters()
self.assertIsNone(n)
def test_set_covariate_names(self):
names = 'some names'
with self.assertRaisesRegex(NotImplementedError, ''):
self.cov_model.set_covariate_names(names)
def test_set_parameter_names(self):
names = 'some names'
with self.assertRaisesRegex(NotImplementedError, ''):
self.cov_model.set_parameter_names(names)
class TestLogNormalLinearCovariateModel(unittest.TestCase):
"""
Tests the chi.LogNormalLinearCovariateModel class.
"""
@classmethod
def setUpClass(cls):
cls.cov_model = chi.LogNormalLinearCovariateModel(n_covariates=0)
cls.cov_model2 = chi.LogNormalLinearCovariateModel(n_covariates=2)
def test_check_compatibility_fail(self):
# Check that warning is raised with a population model
# that is not a GaussianModel
pop_model = chi.LogNormalModel()
with self.assertWarns(UserWarning):
self.cov_model.check_compatibility(pop_model)
@unittest.expectedFailure
def test_check_compatibility_pass(self):
# Check that warning is not raised with a GaussianModel
pop_model = chi.GaussianModel()
with self.assertWarns(UserWarning):
self.cov_model.check_compatibility(pop_model)
def test_compute_individual_parameters(self):
n_ids = 5
# Test case I: sigma almost 0
# Then psi = np.exp(mu)
# Test case I.1
parameters = [1, 1E-10]
eta = np.linspace(0.5, 1.5, n_ids)
covariates = 'some covariates'
# Compute psis
psis = self.cov_model.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp([parameters[0]] * n_ids)
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case I.2
shifts = [-1]
parameters = [1, 1E-10] + shifts
eta = np.linspace(0.5, 1.5, n_ids)
covariates = np.ones(shape=(n_ids, 1))
# Compute psis
psis = self.cov_model2.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(
np.array([parameters[0]] * n_ids) + covariates @ np.array(shifts))
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case I.3
parameters = [1, 1E-10]
eta = np.linspace(0.5, 10, n_ids)
covariates = 'some covariates'
# Compute psis
psis = self.cov_model.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp([parameters[0]] * n_ids)
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case I.4
shifts = [5]
parameters = [1, 1E-15] + shifts
eta = np.linspace(0.5, 10, n_ids)
covariates = np.ones(shape=(n_ids, 1)) * 2
# Compute psis
psis = self.cov_model2.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(
np.array([parameters[0]] * n_ids) + covariates @ np.array(shifts))
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case II: mu = 0, sigma != 0
# Then psi = np.exp(mu)
# Test case II.1
parameters = [0, 1]
eta = np.linspace(0.5, 1.5, n_ids)
covariates = 'some covariates'
# Compute psis
psis = self.cov_model.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(parameters[1] * eta)
self.assertEqual(len(psis), n_ids)
self.assertEqual(psis[0], ref_psis[0])
self.assertEqual(psis[1], ref_psis[1])
self.assertEqual(psis[2], ref_psis[2])
self.assertEqual(psis[3], ref_psis[3])
self.assertEqual(psis[4], ref_psis[4])
# Test case II.2
shifts = [-1]
parameters = [0, 1] + shifts
eta = np.linspace(0.5, 1.5, n_ids)
covariates = np.ones(shape=(n_ids, 1))
# Compute psis
psis = self.cov_model2.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(
parameters[1] * eta + covariates @ np.array(shifts))
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case II.3
parameters = [0, 0.1]
eta = np.linspace(0.5, 1.5, n_ids)
covariates = 'some covariates'
# Compute psis
psis = self.cov_model.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(parameters[1] * eta)
self.assertEqual(len(psis), n_ids)
self.assertEqual(psis[0], ref_psis[0])
self.assertEqual(psis[1], ref_psis[1])
self.assertEqual(psis[2], ref_psis[2])
self.assertEqual(psis[3], ref_psis[3])
self.assertEqual(psis[4], ref_psis[4])
# Test case II.4
shifts = [5]
parameters = [0, 0.1] + shifts
eta = np.linspace(0.5, 10, n_ids)
covariates = np.ones(shape=(n_ids, 1)) * 2
# Compute psis
psis = self.cov_model2.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(
parameters[1] * eta + covariates @ np.array(shifts))
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case III: mu != 0, sigma != 0
# Then psi = np.exp(mu)
# Test case III.1
parameters = [-1, 1]
eta = np.linspace(0.5, 1.5, n_ids)
covariates = 'some covariates'
# Compute psis
psis = self.cov_model.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(parameters[0] + parameters[1] * eta)
self.assertEqual(len(psis), n_ids)
self.assertEqual(psis[0], ref_psis[0])
self.assertEqual(psis[1], ref_psis[1])
self.assertEqual(psis[2], ref_psis[2])
self.assertEqual(psis[3], ref_psis[3])
self.assertEqual(psis[4], ref_psis[4])
# Test case III.2
shifts = [-1]
parameters = [-1, 1] + shifts
eta = np.linspace(0.5, 1.5, n_ids)
covariates = np.ones(shape=(n_ids, 1))
# Compute psis
psis = self.cov_model2.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(
parameters[0] + parameters[1] * eta +
covariates @ np.array(shifts)
)
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case III.3
parameters = [2, 0.1]
eta = np.linspace(0.5, 1.5, n_ids)
covariates = 'some covariates'
# Compute psis
psis = self.cov_model.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(parameters[0] + parameters[1] * eta)
self.assertEqual(len(psis), n_ids)
self.assertEqual(psis[0], ref_psis[0])
self.assertEqual(psis[1], ref_psis[1])
self.assertEqual(psis[2], ref_psis[2])
self.assertEqual(psis[3], ref_psis[3])
self.assertEqual(psis[4], ref_psis[4])
# Test case III.4
shifts = [5]
parameters = [2, 0.1] + shifts
eta = np.linspace(0.5, 10, n_ids)
covariates = np.ones(shape=(n_ids, 1)) * 2
# Compute psis
psis = self.cov_model2.compute_individual_parameters(
parameters, eta, covariates)
ref_psis = np.exp(
parameters[0] + parameters[1] * eta +
covariates @ np.array(shifts)
)
self.assertEqual(len(psis), n_ids)
self.assertAlmostEqual(psis[0], ref_psis[0])
self.assertAlmostEqual(psis[1], ref_psis[1])
self.assertAlmostEqual(psis[2], ref_psis[2])
self.assertAlmostEqual(psis[3], ref_psis[3])
self.assertAlmostEqual(psis[4], ref_psis[4])
# Test case IV: sigma = 0
parameters = [2, 0]
eta =
|
np.linspace(0.5, 1.5, n_ids)
|
numpy.linspace
|
import errno
import multiprocessing
import os
import timeit
from subprocess import CalledProcessError
import numpy as np
from numpy.testing import assert_equal
import pytest
import nengo
from nengo.cache import (CacheIndex, DecoderCache, Fingerprint,
get_fragment_size, WriteableCacheIndex)
from nengo.exceptions import CacheIOWarning, FingerprintError
from nengo.solvers import LstsqL2
from nengo.utils.compat import int_types
class SolverMock(object):
n_calls = {}
def __init__(self):
self.n_calls[self] = 0
def __call__(self, conn, gain, bias, x, targets, rng=np.random, E=None):
self.n_calls[self] += 1
if E is None:
return np.random.rand(x.shape[1], targets.shape[1]), {'info': 'v'}
else:
return np.random.rand(x.shape[1], E.shape[1]), {'info': 'v'}
def get_solver_test_args(**kwargs):
M = 100
N = 10
D = 2
conn = nengo.Connection(
nengo.Ensemble(N, D, add_to_container=False),
nengo.Node(size_in=D, add_to_container=False),
add_to_container=False)
conn.solver = kwargs.pop('solver', nengo.solvers.LstsqL2nz())
defaults = {
'conn': conn,
'gain': np.ones(N),
'bias': np.ones(N),
'x': np.ones((M, D)),
'targets': np.ones((M, N)),
'rng': np.random.RandomState(42),
}
defaults.update(kwargs)
return defaults
def get_weight_solver_test_args():
M = 100
N = 10
N2 = 5
D = 2
conn = nengo.Connection(
nengo.Ensemble(N, D, add_to_container=False),
nengo.Node(size_in=D, add_to_container=False),
solver=nengo.solvers.LstsqL2nz(), add_to_container=False)
return {
'conn': conn,
'gain': np.ones(N),
'bias': np.ones(N),
'x': np.ones((M, D)),
'targets': np.ones((M, N)),
'rng': np.random.RandomState(42),
'E': np.ones((D, N2)),
}
def test_decoder_cache(tmpdir):
cache_dir = str(tmpdir)
# Basic test, that results are cached.
with DecoderCache(cache_dir=cache_dir) as cache:
solver_mock = SolverMock()
decoders1, solver_info1 = cache.wrap_solver(solver_mock)(
**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
decoders2, solver_info2 = cache.wrap_solver(solver_mock)(
**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1 # result read from cache?
assert_equal(decoders1, decoders2)
assert solver_info1 == solver_info2
solver_args = get_solver_test_args()
solver_args['gain'] *= 2
decoders3, solver_info3 = cache.wrap_solver(solver_mock)(**solver_args)
assert SolverMock.n_calls[solver_mock] == 2
assert np.any(decoders1 != decoders3)
# Test that the cache does not load results of another solver.
another_solver = SolverMock()
cache.wrap_solver(another_solver)(**get_solver_test_args(
solver=nengo.solvers.LstsqNoise()))
assert SolverMock.n_calls[another_solver] == 1
def test_corrupted_decoder_cache(tmpdir):
cache_dir = str(tmpdir)
with DecoderCache(cache_dir=cache_dir) as cache:
solver_mock = SolverMock()
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
# corrupt the cache
for path in cache.get_files():
with open(path, 'w') as f:
f.write('corrupted')
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 2
def test_corrupted_decoder_cache_index(tmpdir):
cache_dir = str(tmpdir)
with DecoderCache(cache_dir=cache_dir):
pass # Initialize cache with required files
assert len(os.listdir(cache_dir)) == 2 # index, index.lock
# Write corrupted index
with open(os.path.join(cache_dir, CacheIndex._INDEX), 'w') as f:
f.write('(d') # empty dict, but missing '.' at the end
# Try to load index
with DecoderCache(cache_dir=cache_dir):
pass
assert len(os.listdir(cache_dir)) == 2 # index, index.lock
def test_decoder_cache_invalidation(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
# Basic test, that results are cached.
with DecoderCache(cache_dir=cache_dir) as cache:
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
cache.invalidate()
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 2
def test_decoder_cache_size_includes_overhead(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
with DecoderCache(cache_dir=cache_dir) as cache:
cache.wrap_solver(solver_mock)(**get_solver_test_args())
fragment_size = get_fragment_size(cache_dir)
actual_size = sum(os.stat(p).st_size for p in cache.get_files())
assert actual_size % fragment_size != 0, (
'Test succeeded by chance. Adjust get_solver_test_args() to '
'produce date not aligned with the files system fragment size.')
assert cache.get_size_in_bytes() % fragment_size == 0
def test_decoder_cache_shrinking(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
another_solver = SolverMock()
with DecoderCache(cache_dir=cache_dir) as cache:
cache.wrap_solver(solver_mock)(**get_solver_test_args())
# Ensure differing time stamps (depending on the file system the
# timestamp resolution might be as bad as 1 day).
for path in cache.get_files():
timestamp = os.stat(path).st_atime
timestamp -= 60 * 60 * 24 * 2 # 2 days
os.utime(path, (timestamp, timestamp))
with DecoderCache(cache_dir=cache_dir) as cache:
cache.wrap_solver(another_solver)(**get_solver_test_args(
solver=nengo.solvers.LstsqNoise()))
cache_size = cache.get_size_in_bytes()
assert cache_size > 0
cache.shrink(cache_size - 1)
# check that older cached result was removed
assert SolverMock.n_calls[solver_mock] == 1
cache.wrap_solver(another_solver)(**get_solver_test_args(
solver=nengo.solvers.LstsqNoise()))
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 2
assert SolverMock.n_calls[another_solver] == 1
def test_decoder_cache_shrink_threadsafe(monkeypatch, tmpdir):
"""Tests that shrink handles files deleted by other processes."""
cache_dir = str(tmpdir)
solver_mock = SolverMock()
with DecoderCache(cache_dir=cache_dir) as cache:
cache.wrap_solver(solver_mock)(**get_solver_test_args())
limit = cache.get_size()
# Ensure differing time stamps (depending on the file system the
# timestamp resolution might be as bad as 1 day).
for filename in os.listdir(cache.cache_dir):
path = os.path.join(cache.cache_dir, filename)
timestamp = os.stat(path).st_atime
timestamp -= 60 * 60 * 24 * 2 # 2 days
os.utime(path, (timestamp, timestamp))
cache.wrap_solver(solver_mock)(**get_solver_test_args(
solver=nengo.solvers.LstsqNoise()))
cache_size = cache.get_size_in_bytes()
assert cache_size > 0
def raise_file_not_found(orig_fn):
def fn(filename, *args, **kwargs):
if filename.endswith('.lock'):
return orig_fn(filename, *args, **kwargs)
raise OSError(errno.ENOENT, "File not found.")
return fn
monkeypatch.setattr(cache, 'get_size_in_bytes', lambda: cache_size)
monkeypatch.setattr('os.stat', raise_file_not_found(os.stat))
monkeypatch.setattr('os.remove', raise_file_not_found(os.remove))
monkeypatch.setattr('os.unlink', raise_file_not_found(os.unlink))
cache.shrink(limit)
def test_decoder_cache_with_E_argument_to_solver(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
with DecoderCache(cache_dir=cache_dir) as cache:
decoders1, solver_info1 = cache.wrap_solver(solver_mock)(
**get_weight_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
decoders2, solver_info2 = cache.wrap_solver(solver_mock)(
**get_weight_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1 # read from cache?
assert_equal(decoders1, decoders2)
assert solver_info1 == solver_info2
class DummyA(object):
def __init__(self, attr=0):
self.attr = attr
nengo.cache.Fingerprint.whitelist(DummyA)
class DummyB(object):
def __init__(self, attr=0):
self.attr = attr
nengo.cache.Fingerprint.whitelist(DummyB)
def dummy_fn(arg):
pass
@pytest.mark.parametrize('reference, equal, different', (
(True, True, False), # bool
(False, False, True), # bool
(1.0, 1.0, 2.0), # float
(1.0 + 2.0j, 1 + 2j, 2.0 + 1j), # complex
(b'a', b'a', b'b'), # bytes
((0, 1), (0, 1), (0, 2)), # tuple
([0, 1], [0, 1], [0, 2]), # list
(u'a', u'a', u'b'), # unicode string
(np.eye(2), np.eye(2), np.array([[0, 1], [1, 0]])), # array
(DummyA(), DummyA(), DummyB()), # object instance
(DummyA(1), DummyA(1), DummyA(2)), # object instance
(LstsqL2(reg=.1), LstsqL2(reg=.1), LstsqL2(reg=.2)), # solver
) + tuple((typ(1), typ(1), typ(2)) for typ in int_types))
def test_fingerprinting(reference, equal, different):
assert str(Fingerprint(reference)) == str(Fingerprint(equal))
assert str(Fingerprint(reference)) != str(Fingerprint(different))
@pytest.mark.parametrize('obj', (
np.array([object()]), # array
np.array([(1.,)], dtype=[('field1', 'f8')]), # array
{'a': 1, 'b': 2}, # dict
object(), # object instance
dummy_fn, # function
))
def test_unsupported_fingerprinting(obj):
with pytest.raises(FingerprintError):
Fingerprint(obj)
def test_fails_for_lambda_expression():
with pytest.raises(FingerprintError):
Fingerprint(lambda x: x)
def test_cache_works(tmpdir, RefSimulator, seed):
cache_dir = str(tmpdir)
model = nengo.Network(seed=seed)
with model:
nengo.Connection(nengo.Ensemble(10, 1), nengo.Ensemble(10, 1))
assert len(os.listdir(cache_dir)) == 0
with RefSimulator(model, model=nengo.builder.Model(
dt=0.001, decoder_cache=DecoderCache(cache_dir=cache_dir))):
assert len(os.listdir(cache_dir)) == 3 # index, index.lock, and *.nco
def test_cache_not_used_without_seed(tmpdir, RefSimulator):
cache_dir = str(tmpdir)
model = nengo.Network()
with model:
nengo.Connection(nengo.Ensemble(10, 1), nengo.Ensemble(10, 1))
assert len(os.listdir(cache_dir)) == 0
with RefSimulator(model, model=nengo.builder.Model(
dt=0.001, decoder_cache=DecoderCache(cache_dir=cache_dir))):
assert len(os.listdir(cache_dir)) == 2 # index, index.lock
def build_many_ensembles(cache_dir, RefSimulator):
with nengo.Network(seed=1) as model:
for _ in range(100):
nengo.Connection(nengo.Ensemble(10, 1), nengo.Ensemble(10, 1))
with RefSimulator(model, model=nengo.builder.Model(
dt=0.001, decoder_cache=DecoderCache(cache_dir=cache_dir))):
pass
@pytest.mark.slow
def test_cache_concurrency(tmpdir, RefSimulator):
cache_dir = str(tmpdir)
n_processes = 100
processes = [
multiprocessing.Process(
target=build_many_ensembles, args=(cache_dir, RefSimulator))
for _ in range(n_processes)]
for p in processes:
p.start()
for p in processes:
p.join(60)
for p in processes:
assert p.exitcode == 0
def reject_outliers(data):
med = np.median(data)
limits = 1.5 * (np.percentile(data, [25, 75]) - med) + med
return np.asarray(data)[np.logical_and(data > limits[0], data < limits[1])]
class TestCacheBenchmark(object):
n_trials = 25
setup = '''
import numpy as np
import nengo
import nengo.cache
from nengo.rc import rc
rc.set("decoder_cache", "path", {tmpdir!r})
model = nengo.Network(seed=1)
with model:
a = nengo.Ensemble({N}, dimensions={D}, n_eval_points={M})
b = nengo.Ensemble({N}, dimensions={D}, n_eval_points={M})
conn = nengo.Connection(a, b)
'''
without_cache = {
'rc': '''
rc.set("progress", "progress_bar", "none")
rc.set("decoder_cache", "enabled", "False")
''',
'stmt': '''
with nengo.Simulator(model):
pass
'''
}
with_cache_miss_ro = {
'rc': '''
rc.set("progress", "progress_bar", "none")
with nengo.cache.DecoderCache() as cache:
cache.invalidate()
rc.set("decoder_cache", "enabled", "True")
rc.set("decoder_cache", "readonly", "True")
''',
'stmt': '''
with nengo.Simulator(model):
pass
'''
}
with_cache_miss = {
'rc': '''
rc.set("progress", "progress_bar", "none")
with nengo.cache.DecoderCache() as cache:
cache.invalidate()
rc.set("decoder_cache", "enabled", "True")
rc.set("decoder_cache", "readonly", "False")
''',
'stmt': '''
with nengo.Simulator(model):
pass
'''
}
with_cache_hit = {
'rc': '''
rc.set("progress", "progress_bar", "none")
rc.set("decoder_cache", "enabled", "True")
rc.set("decoder_cache", "readonly", "False")
with nengo.Simulator(model):
pass
''',
'stmt': '''
with nengo.Simulator(model):
pass
'''
}
labels = ["no cache", "cache miss", "cache miss ro", "cache hit"]
keys = [l.replace(' ', '_') for l in labels]
param_to_axis_label = {
'D': "dimensions",
'N': "neurons",
'M': "evaluation points"
}
defaults = {'D': 1, 'N': 50, 'M': 1000}
def time_code(self, code, **kwargs):
return timeit.repeat(
stmt=code['stmt'], setup=self.setup.format(**kwargs) + code['rc'],
number=1, repeat=self.n_trials)
def time_all(self, **kwargs):
return (self.time_code(self.without_cache, **kwargs),
self.time_code(self.with_cache_miss, **kwargs),
self.time_code(self.with_cache_miss_ro, **kwargs),
self.time_code(self.with_cache_hit, **kwargs))
def get_args(self, varying_param, value):
args = dict(self.defaults) # make a copy
args[varying_param] = value
return args
@pytest.mark.slow
@pytest.mark.noassertions
@pytest.mark.parametrize('varying_param', ['D', 'N', 'M'])
def test_cache_benchmark(self, tmpdir, varying_param, analytics, plt):
varying = {
'D': np.asarray(np.linspace(1, 512, 10), dtype=int),
'N': np.asarray(np.linspace(10, 500, 8), dtype=int),
'M': np.asarray(np.linspace(750, 2500, 8), dtype=int)
}[varying_param]
axis_label = self.param_to_axis_label[varying_param]
times = [
self.time_all(
tmpdir=str(tmpdir), **self.get_args(varying_param, v))
for v in varying]
for i, data in enumerate(zip(*times)):
plt.plot(varying, np.median(data, axis=1), label=self.labels[i])
plt.xlim(right=varying[-1])
analytics.add_data(varying_param, varying, axis_label)
analytics.add_data(self.keys[i], data)
plt.xlabel("Number of %s" % axis_label)
plt.ylabel("Build time (s)")
plt.legend(loc='best')
@pytest.mark.compare
@pytest.mark.parametrize('varying_param', ['D', 'N', 'M'])
def test_compare_cache_benchmark(
self, varying_param, analytics_data, plt, logger):
stats = pytest.importorskip('scipy.stats')
d1, d2 = analytics_data
assert np.all(d1[varying_param] == d2[varying_param]), (
'Cannot compare different parametrizations')
axis_label = self.param_to_axis_label[varying_param]
logger.info("Cache, varying %s:", axis_label)
for label, key in zip(self.labels, self.keys):
clean_d1 = [reject_outliers(d) for d in d1[key]]
clean_d2 = [reject_outliers(d) for d in d2[key]]
diff = [np.median(b) - np.median(a)
for a, b in zip(clean_d1, clean_d2)]
p_values = np.array(
[2. * stats.mannwhitneyu(a, b, alternative='two-sided')[1]
for a, b in zip(clean_d1, clean_d2)])
overall_p = 1. - np.prod(1. - p_values)
if overall_p < .05:
logger.info(" %s: Significant change (p <= %.3f). See plots.",
label, np.ceil(overall_p * 1000.) / 1000.)
else:
logger.info(" %s: No significant change.", label)
plt.plot(d1[varying_param], diff, label=label)
plt.xlabel("Number of %s" % axis_label)
plt.ylabel("Difference in build time (s)")
plt.legend(loc='best')
class TestCacheShrinkBenchmark(object):
n_trials = 50
setup = '''
import numpy as np
import nengo
import nengo.cache
from nengo.rc import rc
rc.set("progress", "progress_bar", "none")
rc.set("decoder_cache", "path", {tmpdir!r})
for i in range(10):
model = nengo.Network(seed=i)
with model:
a = nengo.networks.EnsembleArray(10, 128, 1)
b = nengo.networks.EnsembleArray(10, 128, 1)
conn = nengo.Connection(a.output, b.input)
with nengo.Simulator(model):
pass
rc.set("decoder_cache", "size", "0KB")
cache = nengo.cache.DecoderCache()
'''
stmt = 'with cache: cache.shrink()'
@pytest.mark.slow
@pytest.mark.noassertions
def test_cache_shrink_benchmark(self, tmpdir, analytics, logger):
times = timeit.repeat(
stmt=self.stmt, setup=self.setup.format(tmpdir=str(tmpdir)),
number=1, repeat=self.n_trials)
logger.info("Shrink took a minimum of %f seconds.",
|
np.min(times)
|
numpy.min
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 16:45:59 2015
Script to create all the plots used in Varenna-Lausane paper.
@author: lshi
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hilbert
import sdp.scripts.nstx_reflectometry.load_nstx_exp_ref as nstx_exp
import sdp.scripts.FWR_Postprocess.FWR2D_NSTX_139047_Postprocess as fwr_pp
from sdp.diagnostic.fwr.analysis import phase, magnitude, Coherent_Signal, Cross_Correlation, Cross_Correlation_by_fft
from sdp.diagnostic.fwr.nstx.nstx import band_pass_filter
from sdp.math.funcs import band_pass_box, sweeping_correlation
from sdp.diagnostic.fwr.fwr2d.postprocess import fitting_cross_correlation,gaussian_fit,exponential_fit,remove_average_phase, remove_average_field
from sdp.math.spectra import spectrum
import pickle
with open('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/new_ref_pos.pck','r') as f:
ref_pos,freqs = pickle.load(f)
Z_mid = (ref_pos.shape[0]-1)/2
ref_pos_mid = ref_pos[Z_mid,:]
class Picture:
"""base class for paper pictures, contains abstract methods and components' names
"""
def __init__(self,title,description):
self.title = title
self.description = description
def prepare(self):
"""abstract method for preparing data before plotting
"""
pass
def show(self):
"""abstract method for plotting
"""
pass
#def close(self):
# """abstract method for closing the window
# """
# pass
class Plot1(Picture):
"""Plot1: 55GHz phase/dphase plot showing chosen time period (0.632-0.633s), overall time is chosen to be 0.4-0.8s
"""
def __init__(self,channel = 11):
Picture.__init__(self,'Fulltime Phase/dPhase Plot','Plot1: 55GHz phase/dphase plot showing chosen time period (0.632-0.633s), overall time is chosen to be 0.4-0.8s')
self.channel = channel
def prepare(self):
"""prepare the data for Plot1
"""
self.tstart = 0.4
self.tend = 0.8
self.t_on = 0.632
self.t_off = 0.633
loader = nstx_exp.loaders[self.channel]
sig,self.time = loader.signal(self.tstart,self.tend)
self.ph,self.dph = phase(sig)
def show(self):
"""Make the plot with specified formats.
"""
self.fig = plt.figure()
self.subfig1 = self.fig.add_subplot(211)
self.phase_plot = self.subfig1.plot(self.time,self.ph,'k-',linewidth=0.5)
ylim1 = self.subfig1.get_ylim()
self.vline1 = self.subfig1.plot([self.t_on,self.t_off],ylim1,'r-',linewidth=1)
self.subfig1.set_xbound(self.tstart,self.tend)
self.subfig1.set_title('(a)',y=-0.2)
self.subfig1.set_ylabel('$\phi(rad)$')
self.subfig2 = self.fig.add_subplot(212,sharex = self.subfig1)
self.dph_plot = self.subfig2.plot(self.time[:-1],self.dph,'k-',linewidth = 0.05)
ylim2 = self.subfig2.get_ylim()
self.vline2 = self.subfig2.plot([self.t_on,self.t_off],ylim2,'r-',linewidth=1)
self.subfig2.set_xbound(self.tstart,self.tend)
self.subfig2.set_title('(b)',y=-0.3)
self.subfig2.set_xlabel('time(s)')
self.subfig2.set_ylabel('$\Delta\phi(rad)$')
self.fig.canvas.draw()
class Plot2(Plot1):
"""Plot2: zoomed in plot for chosen channel
"""
def __init__(self,channel = 11):
Plot1.__init__(self,channel)
Picture.__init__(self,'Zoomed Phase/dPhase Plot','Plot2:chosen channel zoomed in for t1 to t2')
def show(self):
"""Make the plot with specified formats.
"""
self.fig = plt.figure()
self.subfig1 = self.fig.add_subplot(211)
self.phase_plot = self.subfig1.plot(self.time,self.ph,'k-',linewidth=0.5)
self.subfig1.set_xbound(self.tstart,self.tend)
self.subfig1.set_title('(c)',y=-0.2)
self.subfig1.set_ylabel('$\phi(rad)$')
self.subfig2 = self.fig.add_subplot(212,sharex = self.subfig1)
self.dph_plot = self.subfig2.plot(self.time[:-1],self.dph,'k-',linewidth = 0.2)
self.subfig2.set_xbound(self.tstart,self.tend)
self.subfig2.set_title('(d)',y=-0.3)
self.subfig2.set_xlabel('time(s)')
self.subfig2.set_ylabel('$\Delta\phi(rad)$')
self.fig.canvas.draw()
def zoom(self,t1,t2):
assert(t1<t2 and t1>self.time[0] and t2 < self.time[-1])
self.subfig1.set_xlim(t1,t2)
arg1,arg2 = np.searchsorted(self.time,[t1,t2])
self.subfig1.set_ylim(np.min(self.ph[arg1:arg2])-1,np.max(self.ph[arg1:arg2]))
self.fig.canvas.draw()
class Plot3(Picture):
"""Plot3: 62.5GHz phase signal frequency domain compared with corresponding FWR result, non-relevant frequencies shaden.
"""
def __init__(self,channel = 11,channel_freq = 62.5):
Picture.__init__(self,'Frequency domain comparison','Plot3: 62.5GHz phase signal frequency domain compared with corresponding FWR result, non-relevant frequencies shaden.')
self.channel = channel
self.channel_freq = channel_freq
def prepare(self):
self.tstart = 0.632
self.tend = 0.633
self.f_low_norm = 1e4 #lower frequency cutoff for normalization set to be 100 kHz
self.f_high_norm = 5e5 #high frequency cutoff for normalization set to 500 kHz
self.f_low_show = 4e4 # lower frequency shown for shading
self.f_high_show = 5e5# higher frequency shown for shading
loader = nstx_exp.loaders[self.channel]
sig,time = loader.signal(self.tstart,self.tend)
ph,dph = phase(sig)
n = len(time)
dt = time[1]-time[0]
#get the fft frequency array
'''
self.freqs_nstx = np.fft.rfftfreq(n,dt)
idx_low,idx_high = np.searchsorted(self.freqs_nstx,[self.f_low_norm,self.f_high_norm]) #note that only first half of the frequency array is holding positive frequencies. The rest are negative ones.
#get the spectra of the phase signal, since it's real, only positive frequencies are needed
spectrum_nstx = np.fft.rfft(ph)
self.power_density_nstx = np.real(spectrum_nstx*np.conj(spectrum_nstx))
pd_in_range = self.power_density_nstx[idx_low:idx_high]
df = self.freqs_nstx[1]-self.freqs_nstx[0]
total_power_in_range = 0.5*df*np.sum(pd_in_range[:-1]+pd_in_range[1:]) #trapezoidal formula of integration is used here.
#normalize the spectrum to make the in-range total energy be 1
self.power_density_nstx /= total_power_in_range
'''
self.spectrum_nstx,self.power_density_nstx,self.freqs_nstx = spectrum(ph,dt,True)
print('Experimental data ready.')
ref2d = fwr_pp.load_2d([self.channel_freq],np.arange(100,220,1))
sig_fwr = ref2d.E_out[0,:,0] #note that in reflectometer_output object, E_out is saved in shape (NF,NT,NC). Here we only need the time dimension for the chosen frequency and cross-section.
ph_fwr,dph_fwr = phase(sig_fwr)
dt_fwr = 1e-6
time_fwr = np.arange(100,220,1)*dt_fwr
n_fwr = len(time_fwr)
'''
#similar normalization method for FWR results, temporary variables are reused for fwr quantities
self.freqs_fwr = np.fft.rfftfreq(n_fwr,dt_fwr)
idx_low,idx_high = np.searchsorted(self.freqs_fwr,[self.f_low_norm,self.f_high_norm]) #note that only first half of the frequency array is holding positive frequencies. The rest are negative ones.
spectrum_fwr = np.fft.rfft(ph_fwr)
self.power_density_fwr = np.real(spectrum_fwr * np.conj(spectrum_fwr))
pd_in_range = self.power_density_fwr[idx_low:idx_high]
df = self.freqs_fwr[1]-self.freqs_fwr[0]
total_power_in_range = 0.5*df*np.sum(pd_in_range[:-1]+pd_in_range[1:]) #trapezoidal formula of integration is used here.
self.power_density_fwr /= total_power_in_range
'''
self.spectrum_fwr,self.power_density_fwr,self.freqs_fwr = spectrum(ph_fwr,dt_fwr,True)
print('FWR data ready.')
def show(self,black_white = False):
if(black_white):
ls_nstx = 'k.'
ls_fwr = 'k-'
else:
ls_nstx = 'b-'
ls_fwr = 'r-'
self.fig = plt.figure()
self.subfig = self.fig.add_subplot(111)
self.line_nstx = self.subfig.loglog(self.freqs_nstx,self.power_density_nstx,ls_nstx,linewidth = 0.5, label = 'EXP')
self.line_fwr = self.subfig.loglog(self.freqs_fwr,self.power_density_fwr,ls_fwr,label = 'FWR')
self.subfig.legend(loc = 'lower left')
self.subfig.set_xlabel('frequency (Hz)')
self.subfig.set_ylabel('normalized power density (a.u.)')
#Shade over not used frequency bands
freq_lowerband = np.linspace(1e3,self.f_low_show,10)
freq_higherband = np.linspace(self.f_high_show,5e6,10)
power_min = np.min([np.min(self.power_density_fwr),np.min(self.power_density_nstx)])
power_max = np.max([np.max(self.power_density_fwr),np.max(self.power_density_nstx)])
self.shade_lower = self.subfig.fill_between(freq_lowerband,power_min,power_max,color = 'g',alpha = 0.3)
self.shade_higher = self.subfig.fill_between(freq_higherband,power_min,power_max,color = 'g',alpha = 0.3)
class Plot4(Picture):
"""Plot 4: Comparison between filtered and original signals. From experimental data, 55GHz channel.
"""
def __init__(self,channel=11,channel_freq = 62.5):
Picture.__init__(self,'Comparison of filtered signals','Plot 4: Comparison between filtered and original signals. From both experimental and simulation')
self.channel = channel
self.channel_freq = channel_freq
def prepare(self,t_exp = 0.001):
self.tstart = 0.632
self.tend = self.tstart+t_exp
self.f_low = 4e4 #lower frequency set to be 40 kHz
self.f_high = 5e5 #high end set to 500 kHz
loader = nstx_exp.loaders[self.channel]
self.sig_nstx,self.time = loader.signal(self.tstart,self.tend)
self.phase_nstx,self.dph_nstx = phase(self.sig_nstx)
self.magnitude_nstx = magnitude(self.sig_nstx)
self.mean_mag_nstx = np.mean(self.magnitude_nstx)
n = len(self.time)
dt = self.time[1]-self.time[0]
#get the fft frequency array
self.freqs_nstx = np.fft.fftfreq(n,dt)
idx_low,idx_high = np.searchsorted(self.freqs_nstx[:n/2+1],[self.f_low,self.f_high]) #note that only first half of the frequency array is holding positive frequencies. The rest are negative ones.
#get the fft result for experimental data
self.phase_spectrum_nstx = np.fft.fft(self.phase_nstx) #Full fft is used here for filtering and inverse fft
self.filtered_phase_spectrum_nstx = band_pass_box(self.phase_spectrum_nstx,idx_low,idx_high)
self.mag_spectrum_nstx = np.fft.fft(self.magnitude_nstx)
self.filtered_mag_spectrum_nstx= band_pass_box(self.mag_spectrum_nstx,idx_low,idx_high)
self.filtered_phase_nstx = np.fft.ifft(self.filtered_phase_spectrum_nstx)
self.filtered_mag_nstx = np.fft.ifft(self.filtered_mag_spectrum_nstx) + self.mean_mag_nstx # We want to stack the magnitude fluctuation on top of the averaged magnitude
self.filtered_sig_nstx = self.filtered_mag_nstx * np.exp(1j * self.filtered_phase_nstx)
def show(self,black_white = False):
if(black_white):
ls_orig = 'k.'
ls_filt = 'k-'
else:
ls_orig = 'b-'
ls_filt = 'r-'
self.fig,(self.subfig1,self.subfig2,self.subfig3,self.subfig4) = plt.subplots(4,1,sharex=True)
self.orig_pha_nstx_plot = self.subfig1.plot(self.time,self.phase_nstx,ls_orig,linewidth = 1, label = 'ORIGINAL_PHASE')
self.filt_pha_nstx_plot= self.subfig2.plot(self.time,self.filtered_phase_nstx,ls_filt,linewidth = 1,label = 'FILTERED_PHASE')
self.orig_mag_nstx_plot = self.subfig3.plot(self.time,self.magnitude_nstx,ls_orig,linewidth = 1,label = 'ORIGINAL_MAGNITUDE')
self.filt_mag_nstx_plot= self.subfig4.plot(self.time,self.filtered_mag_nstx,ls_filt,linewidth = 1,label = 'FILTERED_MAGNITUDE')
mag_low,mag_high = self.subfig3.get_ybound()
self.subfig4.set_ybound(mag_low,mag_high)
#self.line_fwr = self.subfig.loglog(self.freqs_fwr,self.power_density_fwr,ls_fwr,label = 'FWR')
self.subfig1.legend(loc = 'best',prop = {'size':10})
self.subfig2.legend(loc = 'best',prop = {'size':10})
self.subfig3.legend(loc = 'best',prop = {'size':10})
self.subfig4.legend(loc = 'best',prop = {'size':10})
self.subfig4.set_xlabel('time (s)')
self.subfig1.set_ylabel('$\phi$ (rad)')
self.subfig2.set_ylabel('$\phi$ (rad)')
self.subfig3.set_ylabel('magnitude (a.u.)')
self.subfig4.set_ylabel('magnitude (a.u.)')
self.subfig4.set_xbound(self.tstart,self.tend)
class Plot5(Plot4):
"""Plot 5: Experimental g factor as a function of window width, show that filtered signal has indeed a stable g factor
"""
def __init__(self,channel=11,channel_freq=62.5):
Plot4.__init__(self,channel,channel_freq)
Picture.__init__(self,'Plot5:g factor VS window widths','Plot 5: Experimental g factor as a function of window width, show that filtered signal has indeed a stable g factor')
def prepare(self):
Plot4.prepare(self)
t_total = self.tend - self.tstart
self.avg_windows = np.logspace(-5,np.log10(t_total),50)
t_lower = self.time[0]+1e-5
idx_lower = np.searchsorted(self.time,t_lower)
t_upper = self.time[idx_lower]+self.avg_windows
idx_upper = np.searchsorted(self.time,t_upper)
self.g_orig = np.zeros_like(self.avg_windows)
self.g_filt = np.zeros_like(self.avg_windows)
for i in range(len(self.avg_windows)) :
idx = idx_lower + idx_upper[i]
sig = self.sig_nstx[idx_lower:idx]
sig_filt = self.filtered_sig_nstx[idx_lower:idx]
self.g_orig[i] = np.abs(Coherent_Signal(sig))
self.g_filt[i] = np.abs(Coherent_Signal(sig_filt))
def show(self,black_white = False):
if(black_white):
ls_orig = 'k--'
ls_filt = 'k-'
else:
ls_orig = 'b-'
ls_filt = 'r-'
self.fig = plt.figure()
self.subfig = self.fig.add_subplot(111)
self.g_orig_line = self.subfig.semilogx(self.avg_windows,self.g_orig,ls_orig,linewidth = 1,label = 'ORIGINAL')
self.g_filt_line = self.subfig.semilogx(self.avg_windows,self.g_filt,ls_filt,linewidth = 1,label = 'FILTERED')
self.subfig.legend(loc = 'best', prop = {'size':14})
self.subfig.set_xlabel('average time window(s)')
self.subfig.set_ylabel('$|g|$')
self.fig.canvas.draw()
class Plot6(Picture):
""" Plot 6: Four channel g factor plot. 55GHz, 57.5GHz, 60GHz, 62.5 GHz and 67.5GHz.
"""
def __init__(self):
Picture.__init__(self,'Plot6:g factors for 5 channels', 'Plot 6: Four channel g factor plot. 55GHz, 57.5GHz, 60GHz, 62.5 GHz. and 67.5GHz')
def prepare(self,t_exp = 0.001):
#prepare the cut-off locations on the mid-plane
self.x55 = ref_pos_mid[8]
self.x575 = ref_pos_mid[9]
self.x60 = ref_pos_mid[10]
self.x625 = ref_pos_mid[11]
self.x675 = ref_pos_mid[14]
self.x65 = ref_pos_mid[12]
self.x665 = ref_pos_mid[13]
self.x = [self.x55,self.x575,self.x60,self.x625,self.x675,self.x65,self.x665]
#First, we get experimental g factors ready
self.t_sections = [[0.632,0.633],[0.6334,0.6351],[0.636,0.6385]]
self.n_sections = len(self.t_sections)
self.g55 = []
self.g575 = []
self.g60 = []
self.g625 = []
self.g675 = []
self.f_low = 4e4
self.f_high = 5e5 #frequency filter range set to 40kHz-500kHz
l55 = nstx_exp.loaders[8]
l575 = nstx_exp.loaders[9]
l60 = nstx_exp.loaders[10]
l625 = nstx_exp.loaders[11]
l675 = nstx_exp.loaders[12]
for i in range(self.n_sections):
tstart = self.t_sections[i][0]
tend = self.t_sections[i][1]
sig55 ,time = l55.signal(tstart,tend)
sig575 ,time = l575.signal(tstart,tend)
sig60 ,time = l60.signal(tstart,tend)
sig625 ,time = l625.signal(tstart,tend)
sig675, time = l675.signal(tstart,tend)
dt = time[1]-time[0]
# Use band passing filter to filter the signal, so only mid range frequnecy perturbations are kept
sig55_filt = band_pass_filter(sig55,dt,self.f_low,self.f_high)
sig575_filt = band_pass_filter(sig575,dt,self.f_low,self.f_high)
sig60_filt = band_pass_filter(sig60,dt,self.f_low,self.f_high)
sig625_filt = band_pass_filter(sig625,dt,self.f_low,self.f_high)
sig675_filt = band_pass_filter(sig675,dt,self.f_low,self.f_high)
theads = np.arange(tstart,tend-t_exp,t_exp/10)
ttails = theads+t_exp/10
arg_heads = np.searchsorted(time,theads)
arg_tails = np.searchsorted(time,ttails)
#prepare the g-factors,keep them complex until we draw them
for j in range(len(arg_heads)):
self.g55.append(Coherent_Signal(sig55_filt[arg_heads[j]:arg_tails[j]]))
self.g575.append(Coherent_Signal(sig575_filt[arg_heads[j]:arg_tails[j]]))
self.g60.append(Coherent_Signal(sig60_filt[arg_heads[j]:arg_tails[j]]))
self.g625.append(Coherent_Signal(sig625_filt[arg_heads[j]:arg_tails[j]]))
self.g675.append(Coherent_Signal(sig675_filt[arg_heads[j]:arg_tails[j]]))
self.g_exp = [[self.g55,self.g575,self.g60,self.g625,self.g675],
[np.mean(np.abs(self.g55)),np.mean(np.abs(self.g575)),np.mean(np.abs(self.g60)),np.mean(np.abs(self.g625)),np.mean(np.abs(self.g675))],
[np.std(np.abs(self.g55)),np.std(np.abs(self.g575)),np.std(np.abs(self.g60)),np.std(np.abs(self.g625)),np.std(np.abs(self.g675))]]
# Now, we prepare the g factors from FWR2D
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_55.sav.npy'
E55_2d = np.load(E_file)
E55_2d = remove_average_phase(E55_2d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_57.5.sav.npy'
E575_2d = np.load(E_file)
E575_2d = remove_average_phase(E575_2d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_60.sav.npy'
E60_2d = np.load(E_file)
E60_2d = remove_average_phase(E60_2d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_62.5.sav.npy'
E625_2d = np.load(E_file)
E625_2d = remove_average_phase(E625_2d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_67.5.sav.npy'
E675_2d = np.load(E_file)
E675_2d = remove_average_phase(E675_2d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC_add_two_channels/E_out_65.0.sav.npy'
E65_2d = np.load(E_file)
E65_2d = remove_average_phase(E65_2d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC_add_two_channels/E_out_66.5.sav.npy'
E665_2d = np.load(E_file)
E665_2d = remove_average_phase(E665_2d)
self.g55_2d = Coherent_Signal(E55_2d.flatten())
self.g575_2d = Coherent_Signal(E575_2d.flatten())
self.g60_2d = Coherent_Signal(E60_2d.flatten())
self.g625_2d = Coherent_Signal(E625_2d.flatten())
self.g675_2d = Coherent_Signal(E675_2d.flatten())
self.g65_2d = Coherent_Signal(E65_2d.flatten())
self.g665_2d = Coherent_Signal(E665_2d.flatten())
self.g_2d = [self.g55_2d,self.g575_2d,self.g60_2d,self.g625_2d,self.g675_2d,self.g65_2d,self.g665_2d]
# And g-factors from FWR3D
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_55GHz/E_out.sav.npy'
E55_3d = np.load(E_file)
E55_3d = remove_average_phase(E55_3d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_57.5GHz/E_out.sav.npy'
E575_3d = np.load(E_file)
E575_3d = remove_average_phase(E575_3d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_60GHz/E_out.sav.npy'
E60_3d = np.load(E_file)
E60_3d = remove_average_phase(E60_3d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_62.5GHz/E_out.sav.npy'
E625_3d = np.load(E_file)
E625_3d = remove_average_phase(E625_3d)
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_67.5GHz/E_out.sav.npy'
E675_3d = np.load(E_file)
E675_3d = remove_average_phase(E675_3d)
self.g55_3d = Coherent_Signal(E55_3d.flatten())
self.g575_3d = Coherent_Signal(E575_3d.flatten())
self.g60_3d = Coherent_Signal(E60_3d.flatten())
self.g625_3d = Coherent_Signal(E625_3d.flatten())
self.g675_3d = Coherent_Signal(E675_3d.flatten())
self.g_3d = [self.g55_3d,self.g575_3d,self.g60_3d,self.g625_3d,self.g675_3d]
def show(self,black_white = False):
if(black_white):
color_exp = 'k'
marker_exp = 's'
ls_2d = 'k--'
marker_2d = 'o'
ls_3d = 'k-.'
marker_3d = '^'
else:
color_exp = 'b'
marker_exp = 's'
ls_2d = 'g-'
marker_2d = 'o'
ls_3d = 'r-'
marker_3d = '^'
self.fig = plt.figure()
self.subfig = self.fig.add_subplot(111)
self.g_exp_line = self.subfig.errorbar(self.x[:5] ,self.g_exp[1],yerr=self.g_exp[2],ecolor = color_exp,linewidth = 1,marker = marker_exp,label = 'EXP')
self.g_2d_line = self.subfig.errorbar(self.x[:7],np.abs(self.g_2d),yerr = 1./16, fmt = ls_2d,marker = marker_2d,linewidth = 1,label = 'FWR2D')
self.g_3d_line = self.subfig.errorbar(self.x[:5],np.abs(self.g_3d),yerr = 1./16, fmt = ls_3d,marker = marker_3d,linewidth = 1,label = 'FWR3D')
self.subfig.legend(loc = 'best', prop = {'size':14})
self.subfig.set_xlabel('R(m)')
self.subfig.set_ylabel('$|g|$')
self.subfig.set_ylim(0,1)
xticks = self.subfig.get_xticks()[::2]
xticklabels = [str(x) for x in xticks]
self.subfig.set_xticks(xticks)
self.subfig.set_xticklabels(xticklabels)
self.fig.canvas.draw()
class Plot7(Picture):
""" Plot 7: Cross-Correlation between 55GHz,57.5GHz, 60GHz, 62.5GHz and 67.5GHz channels. Center channel chosen to be 62.5GHz
"""
def __init__(self):
Picture.__init__(self,'Plot7:Multi channel cross-section plots','Plot 7: Cross-Correlation between 55GHz,57.5GHz, 60GHz, 62.5GHz and 67.5GHz channels.')
def prepare(self,center = 62.5,t_exp = 0.001):
#prepare the cut-off locations on the mid-plane
if center == 67.5:
channel_c = 14
elif center == 62.5:
channel_c = 11
elif center == 60:
channel_c = 10
elif center == 55:
channel_c = 8
else:
channel_c = 14
self.x55 = (ref_pos_mid[8]-ref_pos_mid[channel_c])
self.x575 = (ref_pos_mid[9]-ref_pos_mid[channel_c])
self.x60 = (ref_pos_mid[10]-ref_pos_mid[channel_c])
self.x625 = (ref_pos_mid[11]-ref_pos_mid[channel_c])
self.x675 = (ref_pos_mid[14]-ref_pos_mid[channel_c])
self.x65 = (ref_pos_mid[12]-ref_pos_mid[channel_c])
self.x665 = (ref_pos_mid[13]-ref_pos_mid[channel_c])
self.x = [self.x55,self.x575,self.x60,self.x625,self.x675,self.x65,self.x665]
#First, we get experimental g factors ready
self.tstart = 0.632
self.tend = self.tstart + t_exp
self.f_low = 4e4
self.f_high = 5e5 #frequency filter range set to 40kHz-500kHz
l55 = nstx_exp.loaders[8]
l575 = nstx_exp.loaders[9]
l60 = nstx_exp.loaders[10]
l625 = nstx_exp.loaders[11]
l675 = nstx_exp.loaders[12]
sig55 ,time = l55.signal(self.tstart,self.tend)
sig575 ,time = l575.signal(self.tstart,self.tend)
sig60 ,time = l60.signal(self.tstart,self.tend)
sig625 ,time = l625.signal(self.tstart,self.tend)
sig675,time = l675.signal(self.tstart,self.tend)
dt = time[1]-time[0]
# Use band passing filter to filter the signal, so only mid range frequnecy perturbations are kept
sig55_filt = band_pass_filter(sig55,dt,self.f_low,self.f_high)
sig575_filt = band_pass_filter(sig575,dt,self.f_low,self.f_high)
sig60_filt = band_pass_filter(sig60,dt,self.f_low,self.f_high)
sig625_filt = band_pass_filter(sig625,dt,self.f_low,self.f_high)
sig675_filt = band_pass_filter(sig675,dt,self.f_low,self.f_high)
#prepare the gamma-factors,keep them complex until we draw them
if center == 67.5:
sig_c = sig675_filt
elif center == 62.5:
sig_c = sig625_filt
elif center == 60:
sig_c = sig60_filt
elif center == 55:
sig_c = sig55_filt
else:
sig_c = sig675_filt
self.c55 = Cross_Correlation(sig_c,sig55_filt,'NORM')
self.c575 = Cross_Correlation(sig_c,sig575_filt,'NORM')
self.c60 = Cross_Correlation(sig_c,sig60_filt,'NORM')
self.c625 = Cross_Correlation(sig_c,sig625_filt,'NORM')
self.c675 = Cross_Correlation(sig_c,sig675_filt,'NORM')
self.c_exp = [self.c55,self.c575,self.c60,self.c625,self.c675]
# Now, we prepare the gamma factors from FWR2D
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_55.sav.npy'
E55_2d = remove_average_field(remove_average_phase((np.load(E_file))))[:,:120,:].flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_57.5.sav.npy'
E575_2d = remove_average_field(remove_average_phase((np.load(E_file))))[:,:120,:].flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_60.sav.npy'
E60_2d = remove_average_field(remove_average_phase((np.load(E_file))))[:,:120,:].flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_62.5.sav.npy'
E625_2d = remove_average_field(remove_average_phase((np.load(E_file))))[:,:120,:].flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC_add_two_channels/E_out_65.0.sav.npy'
E650_2d = remove_average_field(remove_average_phase((np.load(E_file))))[:,:120,:].flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC_add_two_channels/E_out_66.5.sav.npy'
E665_2d = remove_average_field(remove_average_phase((np.load(E_file))))[:,:120,:].flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/RUNS/RUN_NSTX_139047_All_Channel_All_Time_MULTIPROC/E_out_67.5.sav.npy'
E675_2d = remove_average_field(remove_average_phase((np.load(E_file))))[:,:120,:].flatten()
E2d = [E55_2d,E575_2d,E60_2d,E625_2d,E675_2d,E650_2d,E665_2d]
if center == 67.5:
E2d_c = 4
elif center == 62.5:
E2d_c = 3
elif center == 60:
E2d_c = 2
elif center == 55:
E2d_c = 0
else:
E2d_c = 4
self.c_2d = []
self.c_2d.append(Cross_Correlation(E2d[E2d_c],E2d[0],'NORM'))
self.c_2d.append(Cross_Correlation(E2d[E2d_c],E2d[1],'NORM'))
self.c_2d.append(Cross_Correlation(E2d[E2d_c],E2d[2],'NORM'))
self.c_2d.append(Cross_Correlation(E2d[E2d_c],E2d[3],'NORM'))
self.c_2d.append(Cross_Correlation(E2d[E2d_c],E2d[4],'NORM'))
self.c_2d.append(Cross_Correlation(E2d[E2d_c],E2d[5],'NORM'))
self.c_2d.append(Cross_Correlation(E2d[E2d_c],E2d[6],'NORM'))
# And gamma-factors from FWR3D
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_55GHz/E_out.sav.npy'
E55_3d = remove_average_field(remove_average_phase((np.load(E_file)))).flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_57.5GHz/E_out.sav.npy'
E575_3d = remove_average_field(remove_average_phase((np.load(E_file)))).flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_60GHz/E_out.sav.npy'
E60_3d = remove_average_field(remove_average_phase((np.load(E_file)))).flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_62.5GHz/E_out.sav.npy'
E625_3d = remove_average_field(remove_average_phase((np.load(E_file)))).flatten()
E_file = '/p/gkp/lshi/XGC1_NSTX_Case/Correlation_Runs/3DRUNS/RUN_NEWAll_16_cross_16_time_67.5GHz/E_out.sav.npy'
E675_3d = remove_average_field(remove_average_phase((np.load(E_file)))).flatten()
E3d = [E55_3d,E575_3d,E60_3d,E625_3d,E675_3d]
if center == 67.5:
E3d_c = 4
elif center == 62.5:
E3d_c = 3
elif center == 60:
E3d_c = 2
elif center == 55:
E3d_c = 0
else:
E3d_c = 4
self.c_3d = []
self.c_3d.append(Cross_Correlation(E3d[E3d_c],E3d[0],'NORM'))
self.c_3d.append(Cross_Correlation(E3d[E3d_c],E3d[1],'NORM'))
self.c_3d.append(Cross_Correlation(E3d[E3d_c],E3d[2],'NORM'))
self.c_3d.append(Cross_Correlation(E3d[E3d_c],E3d[3],'NORM'))
self.c_3d.append(Cross_Correlation(E3d[E3d_c],E3d[4],'NORM'))
# Gaussian fit of the cross-correlations
self.a_exp,self.sa_exp = fitting_cross_correlation(np.abs(self.c_exp),self.x[:5],'gaussian')
self.a_2d,self.sa_2d = fitting_cross_correlation(np.abs(self.c_2d),self.x[:7],'gaussian')
self.a_3d,self.sa_3d = fitting_cross_correlation(np.abs(self.c_3d),self.x[:5],'gaussian')
self.xmax = 2*np.sqrt(np.max((np.abs(self.a_exp),np.abs(self.a_2d),np.abs(self.a_3d))))
self.x_fit = np.linspace(-self.xmax,self.xmax,500)
self.fit_exp = gaussian_fit(self.x_fit,self.a_exp)
self.fit_2d = gaussian_fit(self.x_fit,self.a_2d)
self.fit_3d = gaussian_fit(self.x_fit,self.a_3d)
#Exponential fit of the cross-correlations
self.e_exp,self.se_exp = fitting_cross_correlation(np.abs(self.c_exp),self.x[:5],'exponential')
self.e_2d,self.se_2d = fitting_cross_correlation(
|
np.abs(self.c_2d)
|
numpy.abs
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 17:58:52 2018
@author: Zhaoyi.Shen
"""
import sys
# sys.path.append('/home/z1s/py/lib/')
from signal_processing import lfca
import numpy as np
import scipy as sp
from scipy import io
from matplotlib import pyplot as plt
from netCDF4 import Dataset,num2date
from datetime import datetime, timedelta
# with Dataset('/export/data1/rccheng/ERSSTv5/sst.mnmean.nc') as f:
# lat_axis = f['lat'][:]
# lon_axis = f['lon'][:]
# er_time = f['time'][:]
# er_sst = f['sst'][:]
# er_sst[er_sst<-9e36] = np.nan
# er_refstart = [(datetime(1800,1,1) + timedelta(days= i)-datetime(1900,1,1)).days for i in er_time]
# er_refend = [(datetime(1800,1,1) + timedelta(days= i)-datetime(2017,1,1)).days for i in er_time]
# sst = er_sst[er_refstart.index(0):er_refend.index(0),:,:].transpose([2,1,0])
# time = np.arange(1900,2016.99,1/12.)
# nlon = sst.shape[0]
# nlat = sst.shape[1]
# ntime = sst.shape[2]
filename = '/export/data1/rccheng/ERSSTv5/ERSST_1900_2016.mat'
mat = io.loadmat(filename)
lat_axis = mat['LAT_AXIS']
lon_axis = mat['LON_AXIS']
sst = mat['SST']
nlon = sst.shape[0]
nlat = sst.shape[1]
ntime = sst.shape[2]
time = np.arange(1900,2016.99,1/12.)
cutoff = 120
truncation = 30
#%%
mean_seasonal_cycle = np.zeros((nlon,nlat,12))
sst_anomalies = np.zeros((nlon,nlat,ntime))
for i in range(12):
mean_seasonal_cycle[...,i] = np.nanmean(sst[...,i:ntime:12],-1)
sst_anomalies[...,i:ntime:12] = sst[...,i:ntime:12] - mean_seasonal_cycle[...,i][...,np.newaxis]
#%%
s = sst_anomalies.shape
y, x = np.meshgrid(lat_axis,lon_axis)
area = np.cos(y*np.pi/180.)
area[np.where(np.isnan(np.mean(sst_anomalies,-1)))] = 0
#%%
domain = np.ones(area.shape)
domain[np.where(x<100)] = 0
domain[np.where((x<103) & (y<5))] = 0
domain[np.where((x<105) & (y<2))] = 0
domain[np.where((x<111) & (y<-6))] = 0
domain[np.where((x<114) & (y<-7))] = 0
domain[np.where((x<127) & (y<-8))] = 0
domain[np.where((x<147) & (y<-18))] = 0
domain[np.where(y>70)] = 0
domain[np.where((y>65) & ((x<175) | (x>200)))] = 0
domain[np.where(y<-45)] = 0
domain[np.where((x>260) & (y>17))] = 0
domain[np.where((x>270) & (y<=17) & (y>14))] = 0
domain[np.where((x>276) & (y<=14) & (y>9))] = 0
domain[
|
np.where((x>290) & (y<=9))
|
numpy.where
|
"""
THIS VERSION IS DEPRECATED, THE PYQTGRAPH-BACKEND IS CONTINUED TO BE DEVELOPED
IN MNE-PYTHON (currently https://github.com/mne-tools/mne-python/pull/9687)
"""
import datetime
import math
import platform
from functools import partial
import numpy as np
from PyQt5.QtCore import (QEvent, QPointF, Qt, pyqtSignal, QRunnable,
QObject, QThreadPool, QRectF)
from PyQt5.QtGui import (QFont, QIcon, QPixmap, QTransform,
QMouseEvent, QPainter, QImage, QPen)
from PyQt5.QtTest import QTest
from PyQt5.QtWidgets import (QAction, QColorDialog, QComboBox, QDialog,
QDockWidget, QDoubleSpinBox, QFormLayout,
QGridLayout, QHBoxLayout, QInputDialog,
QLabel, QMainWindow, QMessageBox,
QPushButton, QScrollBar, QSizePolicy,
QWidget, QStyleOptionSlider, QStyle,
QApplication, QGraphicsView, QProgressBar,
QVBoxLayout, QLineEdit, QCheckBox, QScrollArea)
from mne.annotations import _sync_onset
from mne.io.pick import _DATA_CH_TYPES_ORDER_DEFAULT
from mne.utils import logger
from mne.viz._figure import BrowserBase
from pyqtgraph import (AxisItem, GraphicsView, InfLineLabel, InfiniteLine,
LinearRegionItem,
PlotCurveItem, PlotItem, TextItem, ViewBox, functions,
mkBrush, mkPen, setConfigOption, mkQApp, mkColor)
from scipy.stats import zscore
name = 'pyqtgraph'
class RawTraceItem(PlotCurveItem):
"""Graphics-Object for single data trace."""
def __init__(self, mne, ch_idx):
super().__init__(clickable=True)
# ToDo: Does it affect performance, if the mne-object is referenced
# to in every RawTraceItem?
self.mne = mne
self.check_nan = self.mne.check_nan
self.set_ch_idx(ch_idx)
self.update_bad_color()
self.set_data()
def update_bad_color(self):
if self.isbad:
self.setPen(self.mne.ch_color_bad)
else:
self.setPen(self.color)
def set_ch_idx(self, ch_idx):
self.ch_idx = ch_idx
self.pick_idx =
|
np.argwhere(self.mne.picks == self.ch_idx)
|
numpy.argwhere
|
'''Test for bdpy.vstack'''
from unittest import TestCase, TestLoader, TextTestRunner
import numpy as np
import bdpy
from bdpy import vstack, metadata_equal
class TestVstack(TestCase):
def test_vstack(self):
x0_data = np.random.rand(10, 20)
x0_label = np.random.rand(10, 1)
x1_data = np.random.rand(10, 20)
x1_label = np.random.rand(10, 1)
bdata0 = bdpy.BData()
bdata0.add(x0_data, 'Data')
bdata0.add(x0_label, 'Label')
bdata1 = bdpy.BData()
bdata1.add(x1_data, 'Data')
bdata1.add(x1_label, 'Label')
bdata_merged = vstack([bdata0, bdata1])
np.testing.assert_array_equal(bdata_merged.select('Data'),
np.vstack([x0_data, x1_data]))
np.testing.assert_array_equal(bdata_merged.select('Label'),
np.vstack([x0_label, x1_label]))
def test_vstack_successive(self):
x0_data = np.random.rand(10, 20)
x0_label = np.random.rand(10, 1)
x0_run = np.arange(10).reshape(10, 1) + 1
x1_data = np.random.rand(10, 20)
x1_label = np.random.rand(10, 1)
x1_run = np.arange(10).reshape(10, 1) + 1
bdata0 = bdpy.BData()
bdata0.add(x0_data, 'Data')
bdata0.add(x0_label, 'Label')
bdata0.add(x0_run, 'Run')
bdata1 = bdpy.BData()
bdata1.add(x1_data, 'Data')
bdata1.add(x1_label, 'Label')
bdata1.add(x0_run, 'Run')
bdata_merged = vstack([bdata0, bdata1], successive=['Run'])
np.testing.assert_array_equal(bdata_merged.select('Data'),
np.vstack([x0_data, x1_data]))
np.testing.assert_array_equal(bdata_merged.select('Label'),
np.vstack([x0_label, x1_label]))
np.testing.assert_array_equal(bdata_merged.select('Run'),
np.vstack([x0_run,
x1_run + len(x0_run)]))
def test_vstack_minimal(self):
x0_data = np.random.rand(5, 10)
x0_label = np.random.rand(5, 1)
x1_data = np.random.rand(5, 10)
x1_label = np.random.rand(5, 1)
bdata0 = bdpy.BData()
bdata0.add(x0_data, 'Data')
bdata0.add(x0_label, 'Label')
bdata0.add_metadata('key shared', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, np.nan], 'Shared meta-data')
bdata0.add_metadata('key only in 0', np.random.rand(11), 'Meta-data only in bdata0')
bdata1 = bdpy.BData()
bdata1.add(x1_data, 'Data')
bdata1.add(x1_label, 'Label')
bdata1.add_metadata('key shared', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, np.nan], 'Shared meta-data')
bdata0.add_metadata('key only in 1', np.random.rand(11), 'Meta-data only in bdata1')
bdata_merged = vstack([bdata0, bdata1], metadata_merge='minimal')
np.testing.assert_array_equal(bdata_merged.select('Data'),
np.vstack([x0_data, x1_data]))
np.testing.assert_array_equal(bdata_merged.select('Label'),
np.vstack([x0_label, x1_label]))
np.testing.assert_array_equal(bdata_merged.get_metadata('key shared'),
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, np.nan])
self.assertFalse('key only in 0' in bdata_merged.metadata.key)
self.assertFalse('key only in 1' in bdata_merged.metadata.key)
def test_vstack_vmap(self):
x0_data = np.random.rand(10, 20)
x0_label = np.random.permutation(np.arange(10)).reshape(10, 1) + 1
x0_label_map = {k: 'label_%04d' % k for k in x0_label.flatten()}
x1_data = np.random.rand(10, 20)
x1_label = np.random.permutation(np.arange(10)).reshape(10, 1) + 1
x1_label_map = {k: 'label_%04d' % k for k in x1_label.flatten()}
bdata0 = bdpy.BData()
bdata0.add(x0_data, 'Data')
bdata0.add(x0_label, 'Label')
bdata0.add_vmap('Label', x0_label_map)
bdata1 = bdpy.BData()
bdata1.add(x1_data, 'Data')
bdata1.add(x1_label, 'Label')
bdata1.add_vmap('Label', x1_label_map)
bdata_merged = vstack([bdata0, bdata1])
np.testing.assert_array_equal(bdata_merged.select('Data'),
np.vstack([x0_data, x1_data]))
np.testing.assert_array_equal(bdata_merged.select('Label'),
np.vstack([x0_label, x1_label]))
# Check vmap
assert bdata0.get_vmap('Label') == bdata1.get_vmap('Label')
assert bdata_merged.get_vmap('Label') == bdata0.get_vmap('Label')
def test_vstack_vmap_merge_diff_vmap(self):
x0_data = np.random.rand(10, 20)
x0_label = np.random.permutation(np.arange(10)).reshape(10, 1) + 1
x0_label_map = {k: 'label_%04d' % k for k in x0_label.flatten()}
x1_data = np.random.rand(10, 20)
x1_label = np.random.permutation(np.arange(10)).reshape(10, 1) + 11
x1_label_map = {k: 'label_%04d' % k for k in x1_label.flatten()}
bdata0 = bdpy.BData()
bdata0.add(x0_data, 'Data')
bdata0.add(x0_label, 'Label')
bdata0.add_vmap('Label', x0_label_map)
bdata1 = bdpy.BData()
bdata1.add(x1_data, 'Data')
bdata1.add(x1_label, 'Label')
bdata1.add_vmap('Label', x1_label_map)
bdata_merged = vstack([bdata0, bdata1])
np.testing.assert_array_equal(bdata_merged.select('Data'),
np.vstack([x0_data, x1_data]))
np.testing.assert_array_equal(bdata_merged.select('Label'),
|
np.vstack([x0_label, x1_label])
|
numpy.vstack
|
from typing import Tuple
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier
from shap import TreeExplainer
def select_features(
train: pd.DataFrame, label: pd.Series, test: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
model = LGBMClassifier(random_state=42)
print(f"{model.__class__.__name__} Train Start!")
model.fit(train, label)
explainer = TreeExplainer(model)
shap_values = explainer.shap_values(test)
shap_sum =
|
np.abs(shap_values)
|
numpy.abs
|
""" this is used mainly to re-run experiments off-line from logged data, else see -> main"""
import time
import json
import sys
import math
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
from sklearn import svm
import copy
import numpy as np
from data_operators import *
from visualization import *
# print ("Random number with seed 30")
np.random.seed(123)
acc_test_dict = [
["15mil", "na"],
["8mil", "na"],
["15mil", "8mil", "na"],
["10mil", "na"],
["5mil", "na"],
["10mil", "5mil", "na"],
]
def generate_action(current_state, idx, min_vec, max_vec, number_of_actions):
if min_vec.shape[0] <= idx:
return current_state
else:
if min_vec[idx] != 0 or max_vec[idx] != 0:
if number_of_actions == 1:
actions =
|
np.array((max_vec[idx]-min_vec[idx])/2)
|
numpy.array
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Tests if two matrices are identical to some tolerance.
"""
from numpy import array, max, abs, nonzero, argmax, zeros
from pypower.t.t_ok import t_ok
from pypower.t.t_globals import TestGlobals
def t_is(got, expected, prec=5, msg=''):
"""Tests if two matrices are identical to some tolerance.
Increments the global test count and if the maximum difference
between corresponding elements of C{got} and C{expected} is less
than 10**(-C{prec}) then it increments the passed tests count,
otherwise increments the failed tests count. Prints 'ok' or 'not ok'
followed by the MSG, unless the global variable t_quiet is true.
Intended to be called between calls to C{t_begin} and C{t_end}.
@author: <NAME> (PSERC Cornell)
"""
if isinstance(got, int) or isinstance(got, float):
got = array([got], float)
elif isinstance(got, list) or isinstance(got, tuple):
got = array(got, float)
if isinstance(expected, int) or isinstance(expected, float):
expected = array([expected], float)
elif isinstance(expected, list) or isinstance(expected, tuple):
expected =
|
array(expected, float)
|
numpy.array
|
"""
Tools for loading datasets.
Author: <NAME>
Contact: <EMAIL>
Date: August 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import mimetypes
import zipfile
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import imageio
import requests
from .. import _globals
# URL to the Omniglot python raw archive files on GitHub
OMNIGLOT_GITHUB_RAW_FILES = 'https://github.com/brendenlake/omniglot/raw/master/python/'
def load_mnist(path='mnist.npz'):
"""Load MNIST dataset wrapper (see tf.keras.datasets.mnist for more info)."""
return tf.keras.datasets.mnist.load_data(path=path)
def load_omniglot(path='data/omniglot.npz'):
"""Load the Omniglot dataset from Brenden Lakes official repo.
Data is returned as a tuple (background_set, evaluation_set), where each set
consists of the tuple (x_data, y_labels, z_alphabet).
Parameters
----------
path : str
Path to store cached dataset numpy archive.
Returns
-------
omniglot : tuple of NumPy arrays
Omniglot dataset returned as (background_set, evaluation_set).
Notes
-----
Omniglot [1]_ is known as the inverse of MNIST as it contains many classes with
few examples per class.
The images are 105x105 single channel arrays encoded with standard RGB color
space. In other words, each pixel is an integer value in the range of 0-255,
where 0 is black and 255 is white. The characters are represented by the
dark portions (0) of the image, whle the background is white (255).
In contrast, MNIST is encoded with an inverse grayscale color map, where
light portions (255) of the image represent the digit, and the background
is black (0). Thus if we want to pretrain a network on Omniglot in order to
learn features relevant to MNIST, or vice versa, we would need to invert the
values of MNIST.
Invert operation for flattened 1D array shape (height*width):
>>> x_data = list(map(lambda x: 255 - x, x_data))
Or, for 2D image array of shape (height, width):
>>> x_data = [list(map(lambda x: 255 - x, x_row)) for x_row in x_data]
The datasets (background_set, evaluation_set) are broken down as follow:
- background_set: 30 alphabets, 964 character classes, 19280 exemplars (20 per class).
- evaluation_set: 20 alphabets, 659 character classes, 13180 exemplars (20 per class).
Where each dataset consists of:
- x_data: set of 105x105 Omniglot handwritten character images.
- y_labels: set of image string labels (format: "{character_id}_{alphabet_index}").
- z_alphabet: set of alphabets that characters are drawn from.
References
----------
.. [1] <NAME>, <NAME>, <NAME> (2015):
Human-level concept learning through probabilistic program induction.
http://www.sciencemag.org/content/350/6266/1332.short
https://github.com/brendenlake/omniglot
"""
omniglot = ()
if os.path.isfile(path):
np_data = np.load(path)
omniglot += ((np.ascontiguousarray(np_data['x_train']), np_data['y_train'], np_data['z_train']), )
omniglot += ((np.ascontiguousarray(np_data['x_test']), np_data['y_test'], np_data['z_test']), )
else:
print("Downloading Omniglot datasets ...")
files = ['images_background.zip', 'images_evaluation.zip']
for filename in files:
# Download omniglot archives to temporary files
file_url = OMNIGLOT_GITHUB_RAW_FILES + filename
with open(filename, 'wb') as fp:
response = requests.get(file_url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # No content length header
fp.write(response.content)
else:
chunk_size = 1024 # 1 kB iterations
total_length = int(total_length)
n_chunks = int(np.ceil(total_length/chunk_size))
for _, data in zip(tqdm(range(n_chunks), unit='KB'),
response.iter_content(chunk_size=chunk_size)):
fp.write(data) # Write data to temp file
# Extract omniglot features from downloaded archives
x_data = []
y_labels = []
z_alphabets = []
with zipfile.ZipFile(filename, 'r') as archive:
arch_members = archive.namelist()
for arch_member in arch_members:
if mimetypes.guess_type(arch_member)[0] == 'image/png':
# Split image path into parts to get label and alphabet
path_head, image_filename = os.path.split(arch_member)
path_head, character = os.path.split(path_head)
_, alphabet = os.path.split(path_head)
# Label is "{character_id}_{alphabet_index}"
label = "{}_{}".format(image_filename.split('_')[0],
character.replace('character', ''))
# Extract and read image data array
with archive.open(arch_member) as extract_image:
image_data = imageio.imread(extract_image.read())
x_data.append(image_data)
y_labels.append(label)
z_alphabets.append(alphabet)
os.remove(filename) # Delete temporary archive files ...
omniglot += ((
np.ascontiguousarray(x_data, dtype=_globals.NP_INT),
np.asarray(y_labels),
np.asarray(z_alphabets)), )
# Save downloaded and extracted data in numpy archive for later use
dirname = os.path.dirname(path)
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
np.savez_compressed(path,
x_train=omniglot[0][0],
y_train=omniglot[0][1],
z_train=omniglot[0][2],
x_test=omniglot[1][0],
y_test=omniglot[1][1],
z_test=omniglot[1][2])
return omniglot
def load_flickraudio(
path='flickr_audio.npz',
feats_type='mfcc',
encoding='latin1',
remove_labels=None):
"""TODO(rpeloff) load the Flickr-Audio extracted features."""
valid_feats = ['mfcc', 'fbank']
if feats_type not in valid_feats:
raise ValueError("Invalid value specified for feats_type: {}. Expected "
"one of: {}.".format(feats_type, valid_feats))
remove_labels = [] if remove_labels is None else remove_labels
flickraudio = ()
if os.path.isfile(path):
np_data = np.load(path, encoding=encoding)[feats_type] # select mfcc or fbanks
# Add train words (and remove optional remove_labels words):
train_labels = np.asarray(np_data[0][1], dtype=str)
valid_ind = [i for i in range(len(train_labels))
if train_labels[i] not in remove_labels]
flickraudio += ((np.ascontiguousarray(np_data[0][0])[valid_ind], # features
train_labels[valid_ind], # labels
np.asarray(np_data[0][2], dtype=str)[valid_ind], # speakers
np.asarray(np_data[0][3], dtype=str)[valid_ind]), ) # segment_keys
# Add dev words (and remove optional remove_labels words):
dev_labels = np.asarray(np_data[1][1], dtype=str)
valid_ind = [i for i in range(len(dev_labels))
if dev_labels[i] not in remove_labels]
flickraudio += ((np.ascontiguousarray(np_data[1][0])[valid_ind], # features
dev_labels[valid_ind], # labels
np.asarray(np_data[1][2], dtype=str)[valid_ind], # speakers
|
np.asarray(np_data[1][3], dtype=str)
|
numpy.asarray
|
import os
import tempfile
import unittest
import mock
import numpy
import pytest
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import link
from chainer import links
from chainer import optimizers
from chainer.serializers import npz
from chainer import testing
from chainer.testing import attr
import chainerx
class TestDictionarySerializer(unittest.TestCase):
def setUp(self):
self.serializer = npz.DictionarySerializer({})
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def test_get_item(self):
child = self.serializer['x']
self.assertIsInstance(child, npz.DictionarySerializer)
self.assertEqual(child.path, 'x/')
def test_get_item_strip_slashes(self):
child = self.serializer['/x/']
self.assertEqual(child.path, 'x/')
def check_serialize(self, data, query):
ret = self.serializer(query, data)
dset = self.serializer.target['w']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, data.shape)
self.assertEqual(dset.size, data.size)
self.assertEqual(dset.dtype, data.dtype)
numpy.testing.assert_array_equal(dset, backend.CpuDevice().send(data))
self.assertIs(ret, data)
@attr.chainerx
def test_serialize_chainerx(self):
self.check_serialize(chainerx.asarray(self.data), 'w')
def test_serialize_cpu(self):
self.check_serialize(self.data, 'w')
@attr.gpu
def test_serialize_gpu(self):
self.check_serialize(cuda.to_gpu(self.data), 'w')
def test_serialize_cpu_strip_slashes(self):
self.check_serialize(self.data, '/w')
@attr.gpu
def test_serialize_gpu_strip_slashes(self):
self.check_serialize(cuda.to_gpu(self.data), '/w')
def test_serialize_scalar(self):
ret = self.serializer('x', 10)
dset = self.serializer.target['x']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.size, 1)
self.assertEqual(dset.dtype, int)
self.assertEqual(dset[()], 10)
self.assertIs(ret, 10)
def test_serialize_none(self):
ret = self.serializer('x', None)
dset = self.serializer.target['x']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.dtype, numpy.object)
self.assertIs(dset[()], None)
self.assertIs(ret, None)
@testing.parameterize(*testing.product({'compress': [False, True]}))
class TestNpzDeserializer(unittest.TestCase):
def setUp(self):
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with open(path, 'wb') as f:
savez = numpy.savez_compressed if self.compress else numpy.savez
savez(
f, **{'x/': None, 'y': self.data, 'z': numpy.asarray(10),
'zf32': numpy.array(-2**60, dtype=numpy.float32),
'zi64': numpy.array(-2**60, dtype=numpy.int64),
'w': None})
try:
self.npzfile = numpy.load(path, allow_pickle=True)
except TypeError:
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(self.npzfile)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.deserializer['x']
self.assertIsInstance(child, npz.NpzDeserializer)
self.assertEqual(child.path[-2:], 'x/')
def test_get_item_strip_slashes(self):
child = self.deserializer['/x/']
self.assertEqual(child.path, 'x/')
def check_deserialize(self, y, query):
ret = self.deserializer(query, y)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(y), self.data)
self.assertIs(ret, y)
def check_deserialize_by_passing_none(self, y, query):
ret = self.deserializer(query, None)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(ret), self.data)
@attr.chainerx
def test_deserialize_chainerx(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y), 'y')
@attr.chainerx
@attr.gpu
def test_deserialize_chainerx_non_native(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y, device='cuda:0'), 'y')
def test_deserialize_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y, 'y')
def test_deserialize_by_passing_none_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_by_passing_none(y, 'y')
@attr.gpu
def test_deserialize_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y), 'y')
@attr.ideep
def test_deserialize_ideep(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(intel64.mdarray(y), 'y')
@attr.gpu
def test_deserialize_by_passing_none_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_by_passing_none(cuda.to_gpu(y), 'y')
def test_deserialize_cpu_strip_slashes(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y, '/y')
@attr.gpu
def test_deserialize_gpu_strip_slashes(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y), '/y')
def test_deserialize_different_dtype_cpu(self):
y =
|
numpy.empty((2, 3), dtype=numpy.float16)
|
numpy.empty
|
"""
This module defines a simplified interface for generating ABINIT input files.
Note that not all the features of Abinit are supported by BasicAbinitInput.
For a more comprehensive implementation, use the AbinitInput object provided by AbiPy.
"""
import abc
import copy
import json
import logging
import os
from collections import namedtuple
from collections.abc import Mapping, MutableMapping
from enum import Enum
import numpy as np
from monty.collections import AttrDict
from monty.json import MSONable
from monty.string import is_string, list_strings
from pymatgen.core.structure import Structure
from pymatgen.io.abinit import abiobjects as aobj
from pymatgen.io.abinit.pseudos import Pseudo, PseudoTable
from pymatgen.io.abinit.variable import InputVariable
from pymatgen.util.serialization import pmg_serialize
logger = logging.getLogger(__file__)
# List of Abinit variables used to specify the structure.
# This variables should not be passed to set_vars since
# they will be generated with structure.to_abivars()
GEOVARS = {
"acell",
"rprim",
"rprimd",
"angdeg",
"xred",
"xcart",
"xangst",
"znucl",
"typat",
"ntypat",
"natom",
}
# Variables defining tolerances (used in pop_tolerances)
_TOLVARS = {
"toldfe",
"tolvrs",
"tolwfr",
"tolrff",
"toldff",
"tolimg",
"tolmxf",
"tolrde",
}
# Variables defining tolerances for the SCF cycle that are mutally exclusive
_TOLVARS_SCF = {
"toldfe",
"tolvrs",
"tolwfr",
"tolrff",
"toldff",
}
# Variables determining if data files should be read in input
_IRDVARS = {
"irdbseig",
"irdbsreso",
"irdhaydock",
"irdddk",
"irdden",
"ird1den",
"irdqps",
"irdkss",
"irdscr",
"irdsuscep",
"irdvdw",
"irdwfk",
"irdwfkfine",
"irdwfq",
"ird1wf",
}
# Name of the (default) tolerance used by the runlevels.
_runl2tolname = {
"scf": "tolvrs",
"nscf": "tolwfr",
"dfpt": "toldfe", # ?
"screening": "toldfe", # dummy
"sigma": "toldfe", # dummy
"bse": "toldfe", # ?
"relax": "tolrff",
}
# Tolerances for the different levels of accuracy.
T = namedtuple("T", "low normal high")
_tolerances = {
"toldfe": T(1.0e-7, 1.0e-8, 1.0e-9),
"tolvrs": T(1.0e-7, 1.0e-8, 1.0e-9),
"tolwfr": T(1.0e-15, 1.0e-17, 1.0e-19),
"tolrff": T(0.04, 0.02, 0.01),
}
del T
# Default values used if user does not specify them
_DEFAULTS = dict(
kppa=1000,
)
def as_structure(obj):
"""
Convert obj into a Structure. Accepts:
- Structure object.
- Filename
- Dictionaries (MSONable format or dictionaries with abinit variables).
"""
if isinstance(obj, Structure):
return obj
if is_string(obj):
return Structure.from_file(obj)
if isinstance(obj, Mapping):
if "@module" in obj:
return Structure.from_dict(obj)
return aobj.structure_from_abivars(cls=None, **obj)
raise TypeError(f"Don't know how to convert {type(obj)} into a structure")
class ShiftMode(Enum):
"""
Class defining the mode to be used for the shifts.
G: Gamma centered
M: Monkhorst-Pack ((0.5, 0.5, 0.5))
S: Symmetric. Respects the chksymbreak with multiple shifts
O: OneSymmetric. Respects the chksymbreak with a single shift (as in 'S' if a single shift is given, gamma
centered otherwise.
"""
GammaCentered = "G"
MonkhorstPack = "M"
Symmetric = "S"
OneSymmetric = "O"
@classmethod
def from_object(cls, obj):
"""
Returns an instance of ShiftMode based on the type of object passed. Converts strings to ShiftMode depending
on the iniital letter of the string. G for GammaCenterd, M for MonkhorstPack,
S for Symmetric, O for OneSymmetric.
Case insensitive.
"""
if isinstance(obj, cls):
return obj
if is_string(obj):
return cls(obj[0].upper())
raise TypeError(f"The object provided is not handled: type {type(obj)}")
def _stopping_criterion(runlevel, accuracy):
"""Return the stopping criterion for this runlevel with the given accuracy."""
tolname = _runl2tolname[runlevel]
return {tolname: getattr(_tolerances[tolname], accuracy)}
def _find_ecut_pawecutdg(ecut, pawecutdg, pseudos, accuracy):
"""Return a |AttrDict| with the value of ``ecut`` and ``pawecutdg``."""
# Get ecut and pawecutdg from the pseudo hints.
if ecut is None or (pawecutdg is None and any(p.ispaw for p in pseudos)):
has_hints = all(p.has_hints for p in pseudos)
if ecut is None:
if has_hints:
ecut = max(p.hint_for_accuracy(accuracy).ecut for p in pseudos)
else:
raise RuntimeError("ecut is None but pseudos do not provide hints for ecut")
if pawecutdg is None and any(p.ispaw for p in pseudos):
if has_hints:
pawecutdg = max(p.hint_for_accuracy(accuracy).pawecutdg for p in pseudos)
else:
raise RuntimeError("pawecutdg is None but pseudos do not provide hints")
return AttrDict(ecut=ecut, pawecutdg=pawecutdg)
def _find_scf_nband(structure, pseudos, electrons, spinat=None):
"""Find the value of ``nband``."""
if electrons.nband is not None:
return electrons.nband
nsppol, smearing = electrons.nsppol, electrons.smearing
# Number of valence electrons including possible extra charge
nval = num_valence_electrons(structure, pseudos)
nval -= electrons.charge
# First guess (semiconductors)
nband = nval // 2
# TODO: Find better algorithm
# If nband is too small we may kill the job, increase nband and restart
# but this change could cause problems in the other steps of the calculation
# if the change is not propagated e.g. phonons in metals.
if smearing:
# metallic occupation
nband = max(np.ceil(nband * 1.2), nband + 10)
else:
nband = max(
|
np.ceil(nband * 1.1)
|
numpy.ceil
|
"""
GLFSet.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Sat Oct 24 10:42:45 PDT 2015
Description:
"""
import re
import ares
import numpy as np
from ..util import read_lit
import matplotlib.pyplot as pl
from .ModelSet import ModelSet
from ..phenom.DustCorrection import DustCorrection
from ..util.SetDefaultParameterValues import SetAllDefaults
ln10 = np.log(10.)
phi_of_M = lambda M, pstar, Mstar, alpha: 0.4 * ln10 * pstar \
* (10**(0.4 * (Mstar - M)*(1. + alpha))) \
* np.exp(-10**(0.4 * (Mstar - M)))
class ModelSetLF(ModelSet):
"""
Basically a ModelSet instance with routines specific to the high-z
galaxy luminosity function.
"""
@property
def dc(self):
if not hasattr(self, '_dc'):
self._dc = DustCorrection()
return self._dc
def get_data(self, z):
i = self.data['z'].index(z)
return self.data['x'][i], self.data['y'][i], self.data['err'][i]
def SFE(self, z, ax=None, fig=1, name='fstar', shade_by_like=False,
like=0.685, scatter_kwargs={}, take_log=False, un_log=False,
multiplier=1, skip=0, stop=None, **kwargs):
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
if shade_by_like:
q1 = 0.5 * 100 * (1. - like)
q2 = 100 * like + q1
info = self.blob_info(name)
ivars = self.blob_ivars[info[0]]
# We assume that ivars are [redshift, magnitude]
M = ivars[1]
loc = np.argmax(self.logL[skip:stop])
sfe = []
for i, mass in enumerate(M):
data, is_log = self.ExtractData(name, ivar=[z, mass],
take_log=take_log, un_log=un_log, multiplier=multiplier)
if not shade_by_like:
sfe.append(data[name][skip:stop][loc])
else:
lo, hi = np.percentile(data[name][skip:stop].compressed(),
(q1, q2))
sfe.append((lo, hi))
if shade_by_like:
sfe = np.array(sfe).T
if take_log:
sfe = 10**sfe
else:
zeros = np.argwhere(sfe == 0)
for element in zeros:
sfe[element[0],element[1]] = 1e-15
ax.fill_between(M, sfe[0], sfe[1], **kwargs)
ax.set_xscale('log')
ax.set_yscale('log')
else:
if take_log:
sfe = 10**sfe
ax.loglog(M, sfe, **kwargs)
ax.set_xlabel(r'$M_h / M_{\odot}$')
ax.set_ylabel(r'$f_{\ast}(M)$')
ax.set_ylim(1e-4, 1)
ax.set_xlim(1e7, 1e14)
pl.draw()
return ax
def LuminosityFunction(self, z, ax=None, fig=1, compare_to=None, popid=0,
name='galaxy_lf', shade_by_like=False, like=0.685, scatter_kwargs={},
Mlim=(-24, -10), N=1, take_log=False, un_log=False,
multiplier=1, skip=0, stop=None, **kwargs):
"""
Plot the luminosity function used to train the SFE.
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
if shade_by_like:
q1 = 0.5 * 100 * (1. - like)
q2 = 100 * like + q1
# Plot fits compared to observational data
M = np.arange(Mlim[0], Mlim[1], 0.05)
lit = read_lit(compare_to)
if (compare_to is not None) and (z in lit.redshifts) and (not gotax):
phi = np.array(lit.data['lf'][z]['phi'])
err = np.array(lit.data['lf'][z]['err'])
uplims = phi - err <= 0.0
ax.errorbar(lit.data['lf'][z]['M'], lit.data['lf'][z]['phi'],
yerr=lit.data['lf'][z]['err'], fmt='o', zorder=10,
uplims=uplims, **scatter_kwargs)
info = self.blob_info(name)
ivars = self.blob_ivars[info[0]]
# We assume that ivars are [redshift, magnitude]
mags_disk = ivars[1]
#
#if self.pf['pop_lf_dustcorr{%i}' % popid]:
#mags_disk += self.dc.AUV(z, mags_disk)
loc = np.argmax(self.logL[skip:stop])
phi = []
for i, mag in enumerate(mags_disk):
data, is_log = self.ExtractData(name, ivar=[z, mags_disk[i]],
take_log=take_log, un_log=un_log, multiplier=multiplier)
if not shade_by_like:
phi.append(data[name][skip:stop][loc])
else:
lo, hi = np.percentile(data[name][skip:stop].compressed(),
(q1, q2))
phi.append((lo, hi))
if shade_by_like:
phi = np.array(phi).T
if take_log:
phi = 10**phi
else:
zeros = np.argwhere(phi == 0)
for element in zeros:
phi[element[0],element[1]] = 1e-15
ax.fill_between(mags_disk, phi[0], phi[1], **kwargs)
ax.set_yscale('log')
else:
if take_log:
phi = 10**phi
ax.semilogy(mags_disk, phi, **kwargs)
ax.set_xlabel(r'$M_{\mathrm{UV}}$')
ax.set_ylabel(r'$\phi(M)$')
ax.set_ylim(1e-8, 10)
ax.set_xlim(-25, -10)
pl.draw()
return ax
def FaintEndSlope(self, z, mag=None, ax=None, fig=1, N=100,
name='alpha_lf', best_only=False, **kwargs):
"""
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
info = self.blob_info(name)
ivars = self.blob_ivars[info[0]]
i = list(ivars[0]).index(z)
M = ivars[1]
loc = np.argmax(self.logL)
alpha = []
for i, mag in enumerate(M):
data, is_log = self.ExtractData(name, ivar=[z, M[i]])
if best_only:
alpha.append(data[name][loc])
else:
alpha.append(data[name])
alpha =
|
np.array(alpha)
|
numpy.array
|
import os
import json
import numpy as np
import osmnx as ox
import pandas as pd
import bmm
from . import utils
seed = 0
np.random.seed(seed)
timestamps = 15
ffbsi_n_samps = int(1e3)
fl_n_samps = np.array([50, 100, 150, 200])
lags = np.array([0, 3, 10])
max_rejections = 30
initial_truncation = None
num_repeats = 20
max_speed = 35
proposal_dict = {'proposal': 'optimal',
'num_inter_cut_off': 10,
'resample_fails': False,
'd_max_fail_multiplier': 2.}
setup_dict = {'seed': seed,
'ffbsi_n_samps': ffbsi_n_samps,
'fl_n_samps': fl_n_samps.tolist(),
'lags': lags.tolist(),
'max_rejections': max_rejections,
'initial_truncation': initial_truncation,
'num_repeats': num_repeats,
'num_inter_cut_off': proposal_dict['num_inter_cut_off'],
'max_speed': max_speed,
'resample_fails': proposal_dict['resample_fails'],
'd_max_fail_multiplier': proposal_dict['d_max_fail_multiplier']}
print(setup_dict)
porto_sim_dir = os.getcwd()
graph_path = porto_sim_dir + '/portotaxi_graph_portugal-140101.osm._simple.graphml'
graph = ox.load_graphml(graph_path)
test_route_data_path = porto_sim_dir + '/test_route.csv'
# Load long-lat polylines
polyline_ll = np.array(json.loads(pd.read_csv(test_route_data_path)['POLYLINE'][0]))
# Convert to utm
polyline = bmm.long_lat_to_utm(polyline_ll, graph)
save_dir = porto_sim_dir + '/tv_output/'
# Create save_dir if not found
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Save simulation parameters
with open(save_dir + 'setup_dict', 'w+') as f:
json.dump(setup_dict, f)
# Setup map-matching model
mm_model = bmm.ExponentialMapMatchingModel()
mm_model.max_speed = max_speed
# Run FFBSi
ffbsi_route = bmm.offline_map_match(graph,
polyline,
ffbsi_n_samps,
timestamps=timestamps,
mm_model=mm_model,
max_rejections=max_rejections,
initial_d_truncate=initial_truncation,
**proposal_dict)
utils.clear_cache()
fl_pf_routes = np.empty((num_repeats, len(fl_n_samps), len(lags)), dtype=object)
fl_bsi_routes = np.empty((num_repeats, len(fl_n_samps), len(lags)), dtype=object)
n_pf_failures = 0
n_bsi_failures = 0
for i in range(num_repeats):
for j, n in enumerate(fl_n_samps):
for k, lag in enumerate(lags):
print(i, j, k)
# try:
fl_pf_routes[i, j, k] = bmm._offline_map_match_fl(graph,
polyline,
n,
timestamps=timestamps,
mm_model=mm_model,
lag=lag,
update='PF',
max_rejections=max_rejections,
initial_d_truncate=initial_truncation,
**proposal_dict)
print(f'FL PF {i} {j} {k}: {fl_pf_routes[i, j, k].time}')
# except:
# n_pf_failures += 1
print(f'FL PF failures: {n_pf_failures}')
utils.clear_cache()
if lag == 0 and fl_pf_routes[i, j, k] is not None:
fl_bsi_routes[i, j, k] = fl_pf_routes[i, j, k].copy()
print(f'FL BSi {i} {j} {k}:', fl_bsi_routes[i, j, k].time)
else:
# try:
fl_bsi_routes[i, j, k] = bmm._offline_map_match_fl(graph,
polyline,
n,
timestamps=timestamps,
mm_model=mm_model,
lag=lag,
update='BSi',
max_rejections=max_rejections,
initial_d_truncate=initial_truncation,
**proposal_dict)
print(f'FL BSi {i} {j} {k}:', fl_bsi_routes[i, j, k].time)
# except:
# n_bsi_failures += 1
print(f'FL BSi failures: {n_bsi_failures}')
utils.clear_cache()
print(f'FL PF failures: {n_pf_failures}')
print(f'FL BSi failures: {n_bsi_failures}')
np.save(save_dir + 'fl_pf', fl_pf_routes)
|
np.save(save_dir + 'fl_bsi', fl_bsi_routes)
|
numpy.save
|
# -*- coding: utf-8 -*-
import cv2
import os, sys
sys.path.append('./')
import numpy as np
import glob
import math
"""
Created on Thu Jan 10 10:48:00 2013
@author: <NAME>
"""
def read_YUV420(image_path, rows, cols, numfrm):
"""
读取YUV文件,解析为Y, U, V图像
:param image_path: YUV图像路径
:param rows: 给定高
:param cols: 给定宽
:return: 列表,[Y, U, V]
"""
# create Y
gray = np.zeros((rows, cols), np.uint8)
# print(type(gray))
# print(gray.shape)
# create U,V
img_U = np.zeros((int(rows / 2), int(cols / 2)), np.uint8)
# print(type(img_U))
# print(img_U.shape)
img_V = np.zeros((int(rows / 2), int(cols / 2)), np.uint8)
# print(type(img_V))
# print(img_V.shape)
Y = []
U = []
V = []
reader=open(image_path,'rb')
# with open(image_path, 'rb') as reader:
for num in range(numfrm-1):
Y_buf = reader.read(cols * rows)
gray = np.reshape(
|
np.frombuffer(Y_buf, dtype=np.uint8)
|
numpy.frombuffer
|
"""Matrices associated to hypergraphs."""
from warnings import warn
import numpy as np
from scipy.sparse import csr_matrix, diags
__all__ = [
"incidence_matrix",
"adjacency_matrix",
"intersection_profile",
"degree_matrix",
"laplacian",
"multiorder_laplacian",
"clique_motif_matrix",
]
def incidence_matrix(
H, order=None, sparse=True, index=False, weight=lambda node, edge, H: 1
):
"""
A function to generate a weighted incidence matrix from a Hypergraph object,
where the rows correspond to nodes and the columns correspond to edges.
Parameters
----------
H: Hypergraph object
The hypergraph of interest
order: int, optional
Order of interactions to use. If None (default), all orders are used. If int,
must be >= 1.
sparse: bool, default: True
Specifies whether the output matrix is a scipy sparse matrix or a numpy matrix
index: bool, default: False
Specifies whether to output dictionaries mapping the node and edge IDs to indices
weight: lambda function, default=lambda function outputting 1
A function specifying the weight, given a node and edge
Returns
-------
I: numpy.ndarray or scipy csr_matrix
The incidence matrix, has dimension (n_nodes, n_edges)
rowdict: dict
The dictionary mapping indices to node IDs, if index is True
coldict: dict
The dictionary mapping indices to edge IDs, if index is True
"""
edge_ids = H.edges
if order is not None:
edge_ids = [id_ for id_, edge in H._edge.items() if len(edge) == order + 1]
if not edge_ids:
return (np.array([]), {}, {}) if index else np.array([])
node_ids = H.nodes
num_edges = len(edge_ids)
num_nodes = len(node_ids)
node_dict = dict(zip(node_ids, range(num_nodes)))
edge_dict = dict(zip(edge_ids, range(num_edges)))
if node_dict and edge_dict:
if index:
rowdict = {v: k for k, v in node_dict.items()}
coldict = {v: k for k, v in edge_dict.items()}
if sparse:
# Create csr sparse matrix
rows = []
cols = []
data = []
for node in node_ids:
memberships = H.nodes.memberships(node)
# keep only those with right order
memberships = [i for i in memberships if i in edge_ids]
if len(memberships) > 0:
for edge in memberships:
data.append(weight(node, edge, H))
rows.append(node_dict[node])
cols.append(edge_dict[edge])
else: # include disconnected nodes
for edge in edge_ids:
data.append(0)
rows.append(node_dict[node])
cols.append(edge_dict[edge])
I = csr_matrix((data, (rows, cols)))
else:
# Create an np.matrix
I =
|
np.zeros((num_nodes, num_edges), dtype=int)
|
numpy.zeros
|
#!/usr/bin/env/ python3
"""
vortex detection tool, by <NAME>, 2017-04\n
This program load NetCDF files from DNS simulations or PIV experiments
and detect the vortices and apply a fitting to them.
"""
import argparse
import numpy as np
import sys
sys.path.insert(1, '../vortexfitting')
import fitting # noqa: E402
# import schemes # noqa: E402
# import detection # noqa: E402
from classes import VelocityField # noqa: E402
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Optional app description',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--input', dest='infilename',
default='../data/example_data_numerical_PIV.nc',
help='input NetCDF file', metavar='FILE')
args = parser.parse_args()
print('Some tests for the Lamb-Oseen model')
print(args.infilename)
vfield = VelocityField(args.infilename, 0, '/', 'piv_netcdf')
def test_oseen(core_radius, gamma, dist, xdrift, ydrift, u_advection, v_advection):
print('core_radius:', core_radius, 'Gamma', gamma, 'xdrift', xdrift,
'ydrift', ydrift, 'u_advection', u_advection, 'v_advection', v_advection)
model = [[], [], [], [], [], []]
model[0] = core_radius
model[1] = gamma
core_radius_ori = model[0]
gamma_ori = model[1]
x_index = np.linspace(-1, 1, dist)
y_index =
|
np.linspace(-1, 1, dist)
|
numpy.linspace
|
import pandas as pd # importing the Pandas library required for file reading and file importing into the code
import numpy as np # mathematical library used for calling basic maths functions
#-----------------------------------------------------------------------------------------------------------------------------------------------
#BASIC FUNCTIONS REQUIRED
def sigmoid(x) : #calculates the conditional probability of the prediction.
denom = (1.0 + np.exp(-x)) #The greater the odds of a positive event occuring the greater the odds
return 1.0/denom
def V(X,w) : # Given an input feature vector.
net = np.dot(X,w)
return sigmoid(net) #this function returns the sigmoid of weighted sum(the input vector and the weight vectors are inclusive of the bias term for this code)
def Error(X,w,y) : # Cost Function
f1 = np.sum(np.dot(y.T,np.log(V(X,w)))) # This is the main function that gives the information about how far away the parameters are from their locallly optimized values.
f2 = np.sum(np.dot((1-y).T,np.log(1-V(X,w)))) # Also known as negative log likelihood function. This is obtained since the outcomes are conditional probabilities for each class and each feature vector is independent of the others.
return -(f1 + f2)/y.size # The main idea of this implementation is the minimization of this cost function to obtain optimized parameters.
def gradError(X,w,y) : # The partial derivative of cost function w.r.t the Weights.
prediction = V(X,w)
X_trans = X.T # Transpose of feature vector
return (np.dot(X_trans,(V(X,w) - y)))/(y.size)
# Gradient of Cost Function, X: feature vector, w: weight matrix, y: function class to be learned, V(X,w): predicted class
#-----------------------------------------------------------------------------------------------------------------------------------------------
# FUNCTION REQUIRED FOR NORMALIZATION OF INPUT DATA
def normalized(X):
X_mean=X.mean(axis=0) # Calculates the mean value for the input data set
X_std=X.std(axis=0) # Calculates the standard deviation for the input data set
return (X-X_mean)/X_std # Returns the normalized data set
# -----------------------------------------------------------------------------------------------------------------------------------------------
# DATA HANDLING PART OF THE CODE USING PANDAS LIBRARY
# The pandas library function "read" takes as argument the local file path to where the data was stored on my computer during training
# This needs to be essentially updated if the location of the data files is updated.
data_train_features = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/train_data.csv", names=['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10','x11','x12','x13','x14','x15','x16']) # Importing the training feature vectors and storing them in the corresponding variable
data_test_features = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/test_data.csv", names=['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10','x11','x12','x13','x14','x15','x16']) # Importing the test feature vectors and storing them in the corresponding variable
data_train_features_matrix = data_train_features.as_matrix() # Creating a matrix of the obtained features for both the training and the test data sets.
data_test_features_matrix = data_test_features.as_matrix() # Trainging/Test feature matrix shape = (number of training/test inputs, 16) # Exclusive of the bias term '1'
data_train_labels = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/train_labels.csv", names=['y']) # Importing the training labels and storing them in the corresponding variable
data_test_labels = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/test_labels.csv", names=['y']) # Importing the test labels and storing them in the corresponding variable
#Y_df = pd.DataFrame(data_train_labels.y)
#print(Y_df.head())
data_train_labels_matrix = data_train_labels.as_matrix() # Creating a matrix of the obtained labels for both the training and the test data sets.
data_test_labels_matrix = data_test_labels.as_matrix() # Trainging/Test label matrix shape = (number of training/test inputs, 1)
data_train_features_matrix[:,1:] = normalized(data_train_features_matrix[:,1:]) # Normalizing the training feature data set
X_train = np.zeros((data_train_features_matrix.shape[0],17))
X_train[:,16] = 1.0
for i in range(16):
X_train[:,i] = data_train_features_matrix[:,i] # Training feature matrix shape = (number of training inputs, 17) # Inclusive of the bias term '1'
data_test_features_matrix[:,1:] = normalized(data_test_features_matrix[:,1:]) # Normalizing the test feature data set
X_test = np.zeros((data_test_features_matrix.shape[0],17))
X_test[:,16] = 1.0
for i in range(16):
X_test[:,i] = data_test_features_matrix[:,i] # Test feature matrix shape = (number of test inputs, 17) # Inclusive of the bias term '1'
Y_train = np.zeros((data_train_labels_matrix.shape[0],10)) # In this step an output matrix for each of the training and test sets is created based on the value of label for that data point
for i in range(10):
Y_train[:,i] = np.where(data_train_labels_matrix[:,0]==i, 1,0) # The new matrix has the shape = (number of training/test labels , 10)
Y_test = np.zeros((data_test_labels_matrix.shape[0],10)) # So, a new matrix is constructed having 10 coloumns with the coloumn number corresponding to the label value to be 1 and the rest to be zero.
for j in range(10):
Y_test[:,j] = np.where(data_test_labels_matrix[:,0]==j, 1,0)
#------------------------------------------------------------------------------------------------------------------------------------------------
# MAIN LEARNING PART OF THE CODE. HERE I IMPLEMENT THE USUAL GRADIENT DESCENT ALGORITHM TO MAKE THE COST FUNCTION CONVERGE TO A LOCAL MINIMA
W_opt= np.zeros((X_train.shape[1],10)) # The optimized weight matrix is stored in this variable.
W_opt2= np.zeros((X_train.shape[1],10)) # Again, each coloumn of this matrix is a decision boundary separating that particular class from the rest of the classes.
# The shape of the optimized W_opt matrix = (17,10) in this case with 16 dimensional feature vectors that are required to be classified into either of the 10 distinct classes
def grad_desc(X, w, y, Tolerance, LearningRate) :
error = Error(X, w, y) # Computing the value of the cost function right at the start of the gradient descent algorithm for the first step.
iterations = 1 # Starting the counter for iterations with 1
error_diff = 2 # difference in error between two consecutive iterations(intially set to a random value greater than convergence) (important for loop termination), will be updated inside the loop
while(error_diff > Tolerance):
error_prev = error # assigns the value of the existing error to the variable error_prev
w = w - (LearningRate * gradError(X, w, y)) # update the weights according to the equation (w(j+1) = w(j) - LearningRate(gradError)) # step towards parameter optimization
error = Error(X, w, y) # new value of error will be equal to the newly calculated one with updated weights
error_diff = error_prev - error # defintion of error_diff
iterations+=1 # updating the iteration number
print('Total Interations required for learning this decision boundary: ', iterations)
return w
for i in range(10):
print('\nLearning the parameters for Class-{} versus the rest\n'.format(i))
W_opt2[:,i] = grad_desc(X_train, W_opt[:,i], Y_train[:,i], Tolerance=1e-6, LearningRate=.001) # I have selected the convergence/tolerance and the learning rate to values that give best efficiency, but the learning is slow with these hyperparameters.
# Taking between 35,000 - 55,000 iterations for learning each class. We can change these values for a trade-off between training time and efficiency
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# VARIOUS SCORING METHODS TO TEST FOR THE EFFICIENCY OF THE LEARNED ALGORITHM
def Prob_list(X,w,y): # A function that calculates the probability of a feature vector belonging to a given class
h_prob_list = np.zeros(y.shape) # Simply by computing the sigmoid of the weighted sum over the input vector
for CLASS in range(10):
h_prob_list[:,CLASS]= V(X,w[:,CLASS])
return h_prob_list
def Pred_list(X,w,y): # Converts the probability of the highest coloumn to 1 and the rest to zero.
h_prob_list2 = Prob_list(X,w,y) # This is classification based on the maximum probability corresponding to a class.
pred_list = np.zeros(y.shape)
for Class in range(10):
for i in range(y[:,[1]].shape[0]):
if h_prob_list2[i,Class] == np.amax(h_prob_list2[i,:]):
pred_list[i,Class] = 1
else:
pred_list[i,Class] = 0
return pred_list # This function does the classification based on the probability distributions from the previous function
def true_Pos(pred_list, y, Class): # As the name suggests, gives the total number of true Positives for a class in train/test data
totalTruePos = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 1 and y[i] == 1):
totalTruePos += 1
return totalTruePos
def false_Pos(pred_list, y, Class): # As the name suggests, gives the total number of false Positives for a class in train/test data
totalFalsePos = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 1 and y[i] == 0):
totalFalsePos += 1
return totalFalsePos
def false_Neg(pred_list, y, Class): # As the name suggests, gives the total number of false Negatives for a class in train/test data
totalFalseNeg = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 0 and y[i] == 1):
totalFalseNeg += 1
return totalFalseNeg
def true_Neg(pred_list, y, Class): # As the name suggests, gives the total number of true Negatives for a class in train/test data
totalTrueNeg = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 0 and y[i] == 0):
totalTrueNeg += 1
return totalTrueNeg
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# A FEW SCORING METHODS WITH THEIR MATHEMATICAL DEFINITIONS
def accuracy(pred_list, y, Class):
acc = (true_Pos(pred_list, y, Class) + true_Neg(pred_list, y, Class))/y.size
return acc
def precision(pred_list, y, Class):
prec = true_Pos(pred_list, y,Class)/(false_Pos(pred_list, y, Class) + true_Pos(pred_list, y, Class))
return prec
def recall(pred_list, y, Class):
recall = true_Pos(pred_list, y, Class)/(true_Pos(pred_list, y,Class)+false_Neg(pred_list, y, Class))
return recall
def f1_score(pred_list, y, Class):
score = 2*recall(pred_list, y, Class)*precision(pred_list, y,Class)/(recall(pred_list, y,Class)+precision(pred_list, y,Class))
return score
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# PART OF THE CODE THAT COMPUTES THE SCORES VIA AFOREMENTIONED METHODS FOR BOTH TRAINING AND TEST DATA.
def scoringMethods(X,w,y):
pred_list = Pred_list(X,w,y)
ACCURACY = np.zeros(10)
PRECISION = np.zeros(10)
RECALL = np.zeros(10)
F_SCORE = np.zeros(10)
for Class in range(10):
pos_TRUE = true_Pos(pred_list, y[:,Class],Class)
pos_FALSE = false_Pos(pred_list, y[:,Class], Class)
neg_FALSE = false_Neg(pred_list, y[:,Class], Class)
neg_TRUE = true_Neg(pred_list, y[:,Class], Class)
ACCURACY[Class] = accuracy(pred_list, y[:,Class],Class)*100
PRECISION[Class] = precision(pred_list, y[:,Class], Class)
RECALL[Class] = recall(pred_list, y[:,Class], Class)
F_SCORE[Class] = f1_score(pred_list, y[:,Class], Class)
return ACCURACY, PRECISION, RECALL, F_SCORE
ACCURACY_train = np.zeros(10)
PRECISION_train = np.zeros(10)
RECALL_train = np.zeros(10)
F_SCORE_train = np.zeros(10)
ACCURACY_test = np.zeros(10)
PRECISION_test = np.zeros(10)
RECALL_test =
|
np.zeros(10)
|
numpy.zeros
|
from __future__ import absolute_import,division
__filetype__ = "base"
#External Modules
import logging, os, shutil, sys, time, uuid
import numpy as np
from astropy import units as u
from astropy import wcs
from astropy.io import fits
from astropy.table import Table, Column
from copy import deepcopy
from photutils import CircularAperture, aperture_photometry
from photutils.psf.models import GriddedPSFModel
from scipy.ndimage.interpolation import zoom, rotate
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
#Local Modules
from ..utilities import StipsEnvironment
from ..utilities import OffsetPosition
from ..utilities import overlapadd2
from ..utilities import overlapaddparallel
from ..utilities import read_table
from ..utilities import ImageData
from ..utilities import Percenter
from ..utilities import StipsDataTable
from ..utilities import SelectParameter
from ..errors import GetCrProbs, GetCrTemplate, MakeCosmicRay
stips_version = StipsEnvironment.__stips__version__
class AstroImage(object):
"""
The AstroImage class represents a generic astronomical image. The image has the following
data associated with it:
_file : string of file name (including path) containing mem-mapped numpy array.
data : mem-mapped numpy double-precision 2D array of image data, in counts
scale : array of 2 double-precision floating point values, forming X and Y scale in
arcseconds/pixel
wcs : astropy WCS object containing image WCS information.
header : key/value array. Contains FITS header information and metadata
history : array of strings holding the FITS HISTORY section
"""
def __init__(self, **kwargs):
"""
Astronomical image. The __init__ function creates an empty image with all other data values
set to zero.
"""
default = self.INSTRUMENT_DEFAULT
if 'parent' in kwargs:
self.parent = kwargs['parent']
self.logger = self.parent.logger
self.out_path = self.parent.out_path
self.prefix = self.parent.prefix
self.seed = self.parent.seed
self.telescope = self.parent.TELESCOPE.lower()
self.instrument = self.parent.PSF_INSTRUMENT
self.filter = self.parent.filter
self.oversample = self.parent.oversample
self.shape = np.array(self.parent.DETECTOR_SIZE)*self.oversample
self._scale =
|
np.array(self.parent.SCALE)
|
numpy.array
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
import pycocotools.mask as mask_util
from detectron2.utils.visualizer import (
ColorMode,
Visualizer,
_PanopticPrediction,
)
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.colormap import random_color
from detectron2.structures import Boxes, RotatedBoxes
from lib.utils.visualizer import InteractionVisualizer, _create_text_labels
class _DetectedInstance:
"""
Used to store data about detected objects in video frame,
in order to transfer color to objects in the future frames.
Attributes:
label (int):
bbox (tuple[float]):
mask_rle (dict):
color (tuple[float]): RGB colors in range (0, 1)
ttl (int): time-to-live for the instance. For example, if ttl=2,
the instance color can be transferred to objects in the next two frames.
"""
__slots__ = ["label", "bbox", "color", "ttl"]
def __init__(self, label, bbox, color, ttl):
self.label = label
self.bbox = bbox
self.color = color
self.ttl = ttl
class VideoVisualizer:
def __init__(self, metadata, instance_mode=ColorMode.IMAGE):
"""
Args:
metadata (MetadataCatalog): image metadata.
"""
self.metadata = metadata
self._old_instances = []
assert instance_mode in [
ColorMode.IMAGE,
ColorMode.IMAGE_BW,
], "Other mode not supported yet."
self._instance_mode = instance_mode
def draw_interaction_predictions(self, frame, predictions):
"""
Draw interaction prediction results on an image.
Args:
frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255].
predictions (Instances): the output of an interaction detection model.
Following fields will be used to draw: "person_boxes", "object_boxes",
"object_classes", "action_classes", "scores".
Returns:
output (VisImage): image object with visualizations.
"""
frame_visualizer = InteractionVisualizer(frame, self.metadata)
thing_colors = self.metadata.get("thing_colors", "None")
if thing_colors:
thing_colors = [color for name, color in thing_colors.items()]
num_instances = len(predictions)
if num_instances == 0:
return frame_visualizer.output
person_boxes = self._convert_boxes(predictions.person_boxes)
object_boxes = self._convert_boxes(predictions.object_boxes)
object_classes = predictions.object_classes
classes = predictions.pred_classes
scores = predictions.scores
# Take the unique person and object boxes.
unique_person_boxes = np.asarray([list(x) for x in set(tuple(x) for x in person_boxes)])
unique_object_boxes = np.asarray([list(x) for x in set(tuple(x) for x in object_boxes)])
unique_object_classes = {tuple(x): -1 for x in unique_object_boxes}
for box, c in zip(object_boxes, object_classes):
unique_object_classes[tuple(box)] = c
unique_object_colors = {tuple(x): None for x in unique_object_boxes}
if thing_colors:
for box, c in unique_object_classes.items():
unique_object_colors[box] = thing_colors[c]
unique_object_colors = [color for _, color in unique_object_colors.items()]
# Assign colors to person boxes and object boxes.
object_detected = [
_DetectedInstance(unique_object_classes[tuple(box)], box, color=color, ttl=8)
for box, color in zip(unique_object_boxes, unique_object_colors)
]
object_colors = self._assign_colors(object_detected)
assigned_person_colors = {tuple(x): 'w' for x in unique_person_boxes}
assigned_object_colors = {tuple(x.bbox): x.color for x in object_detected}
# Take all interaction associated with each unique person box
# classes_to_contiguous_id = self.metadata.get("interaction_classes_to_contiguous_id", None)
# contiguous_id_to_classes = {v: k for k, v in classes_to_contiguous_id.items()} \
# if classes_to_contiguous_id else None
labels = _create_text_labels(classes, scores)
interactions_to_draw = {tuple(x): [] for x in unique_person_boxes}
labels_to_draw = {tuple(x): [] for x in unique_person_boxes}
for i in range(num_instances):
x = tuple(person_boxes[i])
interactions_to_draw[x].append(object_boxes[i])
if labels is not None:
labels_to_draw[x].append(
{
"label": labels[i],
"color": assigned_object_colors[tuple(object_boxes[i])]
}
)
if self._instance_mode == ColorMode.IMAGE_BW:
# any() returns uint8 tensor
frame_visualizer.output.img = frame_visualizer._create_grayscale_image(
(masks.any(dim=0) > 0).numpy() if masks is not None else None
)
alpha = 0.3
else:
alpha = 0.5
frame_visualizer.overlay_interactions(
unique_person_boxes=unique_person_boxes,
unique_object_boxes=unique_object_boxes,
interactions=interactions_to_draw,
interaction_labels=labels_to_draw,
assigned_person_colors=assigned_person_colors,
assigned_object_colors=assigned_object_colors,
alpha=0.5,
)
return frame_visualizer.output
def draw_instance_predictions(self, frame, predictions):
"""
Draw instance-level prediction results on an image.
Args:
frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255].
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
frame_visualizer = Visualizer(frame, self.metadata)
num_instances = len(predictions)
if num_instances == 0:
return frame_visualizer.output
boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None
detected = [
_DetectedInstance(classes[i], boxes[i], color=None, ttl=8)
for i in range(num_instances)
]
colors = self._assign_colors(detected)
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
if self._instance_mode == ColorMode.IMAGE_BW:
# any() returns uint8 tensor
frame_visualizer.output.img = frame_visualizer._create_grayscale_image(
(masks.any(dim=0) > 0).numpy() if masks is not None else None
)
alpha = 0.3
else:
alpha = 0.5
frame_visualizer.overlay_instances(
boxes=boxes,
labels=labels,
assigned_colors=colors,
alpha=alpha,
)
return frame_visualizer.output
def _assign_colors(self, instances):
"""
Naive tracking heuristics to assign same color to the same instance,
will update the internal state of tracked instances.
Returns:
list[tuple[float]]: list of colors.
"""
# Compute iou with either boxes or masks:
is_crowd = np.zeros((len(instances),), dtype=np.bool)
boxes_old = [x.bbox for x in self._old_instances]
boxes_new = [x.bbox for x in instances]
ious = mask_util.iou(boxes_old, boxes_new, is_crowd)
threshold = 0.6
if len(ious) == 0:
ious = np.zeros((len(self._old_instances), len(instances)), dtype="float32")
# Only allow matching instances of the same label:
for old_idx, old in enumerate(self._old_instances):
for new_idx, new in enumerate(instances):
if old.label != new.label:
ious[old_idx, new_idx] = 0
matched_new_per_old = np.asarray(ious).argmax(axis=1)
max_iou_per_old = np.asarray(ious).max(axis=1)
# Try to find match for each old instance:
extra_instances = []
for idx, inst in enumerate(self._old_instances):
if max_iou_per_old[idx] > threshold:
newidx = matched_new_per_old[idx]
if instances[newidx].color is None:
instances[newidx].color = inst.color
continue
# If an old instance does not match any new instances,
# keep it for the next frame in case it is just missed by the detector
inst.ttl -= 1
if inst.ttl > 0:
extra_instances.append(inst)
# Assign random color to newly-detected instances:
for inst in instances:
if inst.color is None:
inst.color = random_color(rgb=True, maximum=1)
self._old_instances = instances[:] + extra_instances
return [d.color for d in instances]
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.numpy()
else:
return np.asarray(boxes)
def draw_proposals(self, frame, proposals, thresh):
"""
Draw interaction prediction results on an image.
Args:
predictions (Instances): the output of an interaction detection model.
Following fields will be used to draw:
"person_boxes", "object_boxes", "pred_classes", "scores"
Returns:
output (VisImage): image object with visualizations.
"""
_MAX_OBJECT_AREA = 60000
frame_visualizer = InteractionVisualizer(frame, self.metadata)
num_instances = len(proposals)
if num_instances == 0:
return frame_visualizer.output
proposal_boxes = self._convert_boxes(proposals.proposal_boxes)
scores = np.asarray(proposals.interactness_logits)
is_person =
|
np.asarray(proposals.is_person)
|
numpy.asarray
|
#!/usr/bin/env python
#
# Inspired by g_mmpbsa code.
# #
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
from builtins import range
from builtins import object
import re
import numpy as np
import argparse
import sys
import os
import math
import time
from copy import deepcopy
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
from matplotlib.colors import ListedColormap
import mdtraj as md
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import converters
from molmolpy.utils import plot_tools
from molmolpy.utils import pdb_tools
from molmolpy.utils import folder_utils
from molmolpy.utils import extra_tools
from molmolpy.utils import pymol_tools
from molmolpy.utils import protein_analysis
class EnergyAnalysisObject(object):
"""
Usage Example
>>> from molmolpy.moldyn import md_analysis
>>> from molmolpy.g_mmpbsa import mmpbsa_analyzer
>>>
>>> import os
>>>
>>> # In[3]:
>>>
>>> folder_to_sim = '/media/Work/SimData/g_mmpbsa/HSL/HSL_1_backbone/Cluster1/'
>>>
>>> molmech = folder_to_sim + 'contrib_MM.dat'
>>> polar = folder_to_sim + 'contrib_pol.dat'
>>> apolar = folder_to_sim + 'contrib_apol.dat'
>>>
>>> LasR_energy_object = mmpbsa_analyzer.EnergyAnalysisObject(molmech, polar, apolar,
>>> sim_num=3)
>>>
>>> LasR_energy_object.plot_bar_energy_residues()
>>> LasR_energy_object.plot_most_contributions()
>>> LasR_energy_object.plot_sorted_contributions()
>>>
>>>
>>> centroid_file = '/media/Work/MEGA/Programming/docking_LasR/HSL_1_v8/centroid.pdb'
>>>
>>>
>>> LasR_energy_object.add_centroid_pdb_file(centroid_file)
>>> LasR_energy_object.save_mmpbsa_analysis_pickle('HSL_simulation_cluster3.pickle')
>>> #LasR_energy_object.visualize_interactions_pymol()
>>>
>>>
>>> test = 1
>>> # simulation_name = 'LasR_Ligand_simulation'
>>> #
Molecule object loading of pdb and pbdqt file formats.
Then converts to pandas dataframe.
Create MoleculeObject by parsing pdb or pdbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule information in pandas dataframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filename : str, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Convert gro to PDB so mdtraj recognises topology
YEAH
gmx editconf -f npt.gro -o npt.pdb
"""
# @profile
def __init__(self,
energymm_xvg,
polar_xvg,
apolar_xvg,
molmech,
polar,
apolar,
bootstrap=True,
bootstrap_steps=5000,
sim_num=1,
receptor_name='LasR',
molecule_name='HSL',
meta_file=None
):
self.receptor_name = receptor_name
self.molecule_name = molecule_name
self.sim_num = sim_num
self.simulation_name = self.receptor_name + '_' + self.molecule_name + '_num:' + str(self.sim_num)
self.meta_file = meta_file
# molmech = folder_to_sim + 'contrib_MM.dat'
# polar = folder_to_sim + 'contrib_pol.dat'
# apolar = folder_to_sim + 'contrib_apol.dat'
# Complex Energy
c = []
if meta_file is not None:
MmFile, PolFile, APolFile = ReadMetafile(meta_file)
for i in range(len(MmFile)):
cTmp = Complex(MmFile[i], PolFile[i], APolFile[i], K[i])
cTmp.CalcEnergy(args, frame_wise, i)
c.append(cTmp)
else:
cTmp = Complex(energymm_xvg, polar_xvg, apolar_xvg)
self.cTmp = cTmp
self.full_copy_original = deepcopy(cTmp)
self.full_copy_bootstrap = deepcopy(cTmp)
# cTmp.CalcEnergy(frame_wise, 0, bootstrap=bootstrap, bootstrap_steps=bootstrap_steps)
# c.append(cTmp)
# Summary in output files => "--outsum" and "--outmeta" file options
# TODO adapt to make able to use bootstrap as well, multiple analysis modes?
self.c = c
# summary_output_filename = self.simulation_name + '_binding_summary.log'
# Summary_Output_File(c, summary_output_filename, meta_file)
#
# corr_outname = self.simulation_name + '_correllation_distance.log'
# corr_plot = self.simulation_name + '_correllation_plot.png'
test = 1
# This won't work it needs K, read paper again
#FitCoef_all = PlotCorr(c, corr_outname, corr_plot, bootstrap_steps)
#PlotEnrgy(c, FitCoef_all, args, args.enplot)
# RESIDUE analysis part
self.MMEnData, self.resnameA = ReadData_Residue_Parse(molmech)
self.polEnData, self.resnameB = ReadData_Residue_Parse(polar)
self.apolEnData, self.resnameC = ReadData_Residue_Parse(apolar)
self.resname = CheckResname(self.resnameA, self.resnameB, self.resnameC)
self.sim_num = sim_num
Residues = []
data = []
columns_residue_energy = ['index', 'ResidueNum', 'Residue', 'TotalEnergy', 'TotalEnergySD']
for i in range(len(self.resname)):
CheckEnData_residue(self.MMEnData[i], self.polEnData[i], self.apolEnData[i])
r = Residue()
r.CalcEnergy(self.MMEnData[i], self.polEnData[i], self.apolEnData[i], bootstrap, bootstrap_steps)
Residues.append(r)
# print(' %8s %8.4f %8.4f' % (self.resname[i], r.TotalEn[0], r.TotalEn[1]))
data.append([i, i + 1, self.resname[i], r.TotalEn[0], r.TotalEn[1]])
self.pandas_residue_energy_data = pd.DataFrame(data)
self.pandas_residue_energy_data.columns = columns_residue_energy
test = 1
self.most_contributions = self.pandas_residue_energy_data[:-1]
self.most_contributions = self.most_contributions.sort_values(['TotalEnergy'])
test = 1
def calculate_binding_energy_full(self, idx=0,jump_data=1, bootstrap=False, bootstrap_steps=5000):
'''
Calculate full binding energy then analyze autocorrelation and partial correlation
:param idx: from frame number
:param bootstrap: for this one dont calculate bootstrap
:param bootstrap_steps:
:return:
'''
# TODO CALCULATION OF BINDING ENERGY
outfr = self.simulation_name + '_full.log'
try:
frame_wise = open(outfr, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(outfr))
frame_wise.write(
'#Time E_VdW_mm(Protein)\tE_Elec_mm(Protein)\tE_Pol(Protein)\tE_Apol(Protein)\tE_VdW_mm(Ligand)\tE_Elec_mm(Ligand)\tE_Pol(Ligand)\tE_Apol(Ligand)\tE_VdW_mm(Complex)\tE_Elec_mm(Complex)\tE_Pol(Complex)\tE_Apol(Complex)\tDelta_E_mm\tDelta_E_Pol\tDelta_E_Apol\tDelta_E_binding\n')
self.frame_wise_full = frame_wise
self.c_full = []
self.full_copy_original.CalcEnergy(self.frame_wise_full, idx, jump_data=jump_data, bootstrap=bootstrap, bootstrap_steps=bootstrap_steps)
self.c_full.append(self.full_copy_original)
summary_output_filename = self.simulation_name + '_binding_summary_full.log'
Summary_Output_File(self.c_full, summary_output_filename, self.meta_file)
self.autocorr_analysis(self.c_full, 'full')
def calculate_binding_energy_bootstrap(self, idx=0, bootstrap=True, bootstrap_steps=5000, bootstrap_jump=4):
'''
Calculate bootstrap binding energy then analyze autocorrelation and partial correlation
:param idx: from frame number
:param bootstrap: for this one dont calculate bootstrap
:param bootstrap_steps:
:return:
'''
# TODO CALCULATION OF BINDING ENERGY
outfr = self.simulation_name + '_bootstrap.log'
try:
frame_wise = open(outfr, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(outfr))
frame_wise.write(
'#Time E_VdW_mm(Protein)\tE_Elec_mm(Protein)\tE_Pol(Protein)\tE_Apol(Protein)\tE_VdW_mm(Ligand)\tE_Elec_mm(Ligand)\tE_Pol(Ligand)\tE_Apol(Ligand)\tE_VdW_mm(Complex)\tE_Elec_mm(Complex)\tE_Pol(Complex)\tE_Apol(Complex)\tDelta_E_mm\tDelta_E_Pol\tDelta_E_Apol\tDelta_E_binding\n')
self.frame_wise_bootstrap = frame_wise
self.c_bootstrap = []
self.full_copy_bootstrap.CalcEnergy(self.frame_wise_bootstrap, idx,
bootstrap=bootstrap,
bootstrap_steps=bootstrap_steps,
bootstrap_jump=bootstrap_jump)
self.c_bootstrap.append(self.full_copy_bootstrap)
summary_output_filename = self.simulation_name + '_binding_summary_bootstrap.log'
Summary_Output_File(self.c_bootstrap, summary_output_filename, self.meta_file)
self.autocorr_analysis(self.c_bootstrap, 'bootstrap')
def autocorr_analysis(self, energy_val, naming='full'):
if naming =='full':
total_en = energy_val[0].TotalEn
time = energy_val[0].time
else:
total_en = energy_val[0].TotalEn_bootstrap
time = energy_val[0].time_bootstrap
# Old version :)
# print('Mean autocorrelation ', np.mean(autocorr(total_en)))
# plt.semilogx(time, autocorr(total_en))
# plt.xlabel('Time [ps]', size=16)
# plt.ylabel('Binding Energy autocorrelation', size=16)
# plt.show()
from pandas import Series
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
series = Series.from_array(total_en, index=time)
# https://machinelearningmastery.com/gentle-introduction-autocorrelation-partial-autocorrelation/
plot_acf(series, alpha=0.05)
# pyplot.show()
plt.savefig(self.simulation_name + '_autocorrelation_bindingEnergy_{0}.png'.format(naming), dpi=600)
from statsmodels.graphics.tsaplots import plot_pacf
# plot_pacf(series, lags=50)
plot_pacf(series)
plt.savefig(self.simulation_name +'_partial_autocorrelation_bindingEnergy_{0}.png'.format(naming), dpi=600)
#pyplot.show()
test = 1
def plot_binding_energy_full(self):
bind_energy = self.full_copy_original.TotalEn
time = self.full_copy_original.time
dataframe = converters.convert_data_to_pandas(time, bind_energy,
x_axis_name='time',
y_axis_name='binding')
import seaborn as sns
sns.set(style="ticks")
plt.clf()
plt.plot(time, bind_energy)
plt.savefig('test.png', dpi=600)
# sns.lmplot(x="time", y="binding",data=dataframe,
# ci=None, palette="muted", size=4,
# scatter_kws={"s": 50, "alpha": 1})
# sns.tsplot(data=dataframe)
test = 1
def add_centroid_pdb_file(self, filename, simplified_state=True):
self.centroid_pdb_file = filename
self.dssp_traj = md.load(self.centroid_pdb_file)
self.dssp_data = md.compute_dssp(self.dssp_traj, simplified=simplified_state)
# self.helixes = protein_analysis.find_helixes(self.dssp_data)
self.helixes = protein_analysis.find_dssp_domain(self.dssp_data, type='H')
self.strands = protein_analysis.find_dssp_domain(self.dssp_data, type='E')
self.data_to_save = {self.sim_num: {'residueEnergyData': self.pandas_residue_energy_data[:-1],
'mostResidueContrib': self.most_contributions_plot,
'mostAllContrib': self.most_contributions_plot_all,
'centroidFile': self.centroid_pdb_file,
'dsspObject': self.dssp_data,
'dsspData': self.dssp_data,
'dsspStructures': {'helix': self.helixes,
'strands': self.strands}}
}
test = 1
def save_mmpbsa_analysis_pickle(self, filename):
import pickle
if filename is None:
filename = self.simulation_name + '_pickleFile.pickle'
# pickle.dump(self.cluster_models, open(filename, "wb"))
pickle.dump(self.data_to_save, open(filename, "wb"))
def plot_bar_energy_residues(self,
custom_dpi=600,
trasparent_alpha=False):
# sns.set(style="white", context="talk")
sns.set(style="ticks", context="paper")
# Set up the matplotlib figure
f, ax1 = plt.subplots(1, 1, figsize=(plot_tools.cm2inch(17, 10)), sharex=True)
# Generate some sequential data
to_plot_data = self.pandas_residue_energy_data[:-1]
sns.barplot(to_plot_data['ResidueNum'], to_plot_data['TotalEnergy'],
palette="BuGn_d", ax=ax1)
ax1.set_ylabel("Contribution Energy (kJ/mol)")
ax1.set_xlabel("Residue Number")
last_index = to_plot_data['ResidueNum'].iloc[-1]
# this is buggy
x_label_key = []
# ax1.set_xticklabels(to_plot_data['ResidueNum']) # set new labels
# # ax1.set_x
#
# for ind, label in enumerate(ax1.get_xticklabels()):
# if ind+1 == last_index:
# label.set_visible(True)
# elif (ind+1) % 100 == 0: # every 100th label is kept
# label.set_visible(True)
# # label = round(sim_time[ind])
# # x_label_key.append(ind)
# else:
# label.set_visible(False)
# x_label_key.append(ind)
ax1.set_xlim(1, last_index)
ax1.xaxis.set_major_locator(ticker.LinearLocator(3))
ax1.xaxis.set_minor_locator(ticker.LinearLocator(31))
labels = [item.get_text() for item in ax1.get_xticklabels()]
test = 1
labels[0] = '1'
labels[1] = str(last_index // 2)
labels[2] = str(last_index)
ax1.set_xticklabels(labels)
# ax1.text(0.0, 0.1, "LinearLocator(numticks=3)",
# fontsize=14, transform=ax1.transAxes)
tick_labels = []
# for ind, tick in enumerate(ax1.get_xticklines()):
# # tick part doesn't work
# test = ind
# # if ind+1 == last_index:
# # tick.set_visible(True)
# if (ind+1) % 10 == 0: # every 100th label is kept
# tick.set_visible(True)
# else:
# tick.set_visible(False)
# tick_labels.append(tick)
#
# ax1.set_xticklabels
# for ind, label in enumerate(ax.get_yticklabels()):
# if ind % 50 == 0: # every 100th label is kept
# label.set_visible(True)
# else:
# label.set_visible(False)
#
# for ind, tick in enumerate(ax.get_yticklines()):
# if ind % 50 == 0: # every 100th label is kept
# tick.set_visible(True)
# else:
# tick.set_visible(False)
# Finalize the plot
sns.despine()
# plt.setp(f.axes, yticks=[])
plt.tight_layout()
# plt.tight_layout(h_pad=3)
# sns.plt.show()
f.savefig(self.simulation_name + '_residue_contribution_all.png',
dpi=custom_dpi,
transparent=trasparent_alpha)
def plot_most_contributions(self,
custom_dpi=600,
trasparent_alpha=False):
sns.set(style="white", context="talk")
# Set up the matplotlib figure
# f, ax1 = plt.subplots(1, 1, figsize=(plot_tools.cm2inch(17, 10)), sharex=True)
# Generate some sequential data
self.most_contributions_plot = self.most_contributions[self.most_contributions['TotalEnergy'] < -1.0]
self.most_contributions_plot = self.most_contributions_plot[
np.isfinite(self.most_contributions_plot['TotalEnergy'])]
# self.most_contributions_plot = self.most_contributions_plot.dropna(axis=1)
test = 1
# sns.barplot(self.most_contributions_plot['Residue'], self.most_contributions_plot['TotalEnergy'],
# palette="BuGn_d", ax=ax1)
# cmap = sns.cubehelix_palette(n_colors=len(self.most_contributions_plot['TotalEnergy']), as_cmap=True)
cmap = sns.dark_palette("palegreen", as_cmap=True)
ax1 = self.most_contributions_plot.plot(x='Residue', y='TotalEnergy', yerr='TotalEnergySD', kind='bar',
colormap='Blues',
legend=False)
# ax1 = self.most_contributions_plot['TotalEnergy'].plot(kind='bar')
# ax1.bar(self.most_contributions_plot['ResidueNum'], self.most_contributions_plot['TotalEnergy'],
# width=40,
# yerr=self.most_contributions_plot['TotalEnergySD'])
ax1.set_ylabel("Contribution Energy (kJ/mol)")
#
# # # Center the data to make it diverging
# # y2 = y1 - 5
# # sns.barplot(x, y2, palette="RdBu_r", ax=ax2)
# # ax2.set_ylabel("Diverging")
# #
# # # Randomly reorder the data to make it qualitative
# # y3 = rs.choice(y1, 9, replace=False)
# # sns.barplot(x, y3, palette="Set3", ax=ax3)
# # ax3.set_ylabel("Qualitative")
#
# # Finalize the plot
#
labels = ax1.get_xticklabels() # get x labels
# for i, l in enumerate(labels):
# if (i % 2 == 0): labels[i] = '' # skip even labels
ax1.set_xticklabels(self.most_contributions_plot['Residue'], rotation=50) # set new labels
# plt.show()
#
#
# sns.despine(bottom=True)
# # plt.setp(f.axes, yticks=[])
plt.tight_layout()
# # plt.tight_layout(h_pad=3)
# # sns.plt.show()
#
plt.savefig(self.simulation_name + '_most_residue_contribution.png',
dpi=custom_dpi,
transparent=trasparent_alpha)
def plot_sorted_contributions(self,
custom_dpi=600,
trasparent_alpha=False,
lower_criteria=-0.5,
upper_criteria=0.5
):
my_cmap = sns.light_palette("Navy", as_cmap=True)
self.cmap_residue_energy = sns.cubehelix_palette(as_cmap=True)
self.most_contributions_plot_all = self.most_contributions[
(self.most_contributions['TotalEnergy'] < lower_criteria) |
(self.most_contributions['TotalEnergy'] > upper_criteria)]
colors_sns = sns.cubehelix_palette(n_colors=len(self.most_contributions_plot_all), dark=0.5, light=0.92,
reverse=True)
# residue_color_data = converters.convert_seaborn_color_to_rgb(colors)
self.all_residue_colors_to_rgb = converters.convert_values_to_rgba(self.most_contributions_plot_all['TotalEnergy'],
cmap=self.cmap_residue_energy, type='seaborn')
# colors = sns.cubehelix_palette(n_colors=len(self.most_contributions_plot_all), dark=0.5, light=0.92, reverse=True)
#
# residue_color_data = converters.convert_seaborn_color_to_rgb(colors)
# sns.palplot(colors)
# plot_tools.custom_palplot_vertical(colors)
# sns.plt.show()
test = 1
# self.most_contributions_plot_all.plot(x='Residue', y='TotalEnergy', yerr='TotalEnergySD', kind='bar',
# colormap=self.cmap_residue_energy,
# legend=False)
# f, ax1 = plt.subplots(1, 1, figsize=(plot_tools.cm2inch(17 , 10)), sharex=True)
sns.set(style="white", context="talk")
self.most_contributions_plot_all.plot(x='Residue', y='TotalEnergy', yerr='TotalEnergySD', kind='bar',
colors=colors_sns,
legend=False)
plt.ylabel("Contribution Energy (kJ/mol)")
plt.xlabel("Residues")
plt.tight_layout()
# # plt.tight_layout(h_pad=3)
# # sns.plt.show()
#
plt.savefig(self.simulation_name + '_sorted_residue_contribution.png',
dpi=custom_dpi,
transparent=trasparent_alpha)
@hlp.timeit
def visualize_interactions_pymol(self, show_energy=False):
# self.clusters_centroids_mmpbsa_dict
# self.filtered_neighbours
test = 1
print('Start of Pymol MD MMPBSA residue show smethod ---> ')
print('Visualising MMPBSA residue energy contribution')
# To pass Values
# self.cmap_residue_energy
# self.most_contributions_plot_all
#
# self.all_residue_colors_to_rgba
save_state_name = self.receptor_name + '_' + self.molecule_name + '_' + \
'centroid:{0}_mdEnergyAnalyzer_pymolViz.pse'.format(self.sim_num)
pymol_tools.generate_pymol_residue_energy_viz(self.centroid_pdb_file,
self.dssp_data,
self.most_contributions_plot_all,
save_state_name,
show_residue_energy=show_energy
)
time.sleep(5)
print('Finished Pymol method ---> verify yolo')
# try:
# fout = open(args.output, 'w')
# except:
# raise IOError('Could not open file {0} for writing. \n'.format(args.output))
# try:
# fmap = open(args.outmap, 'w')
# except:
# raise IOError('Could not open file {0} for writing. \n'.format(args.outmap))
# fout.write(
# '#Residues MM Energy(+/-)dev/error Polar Energy(+/-)dev/error APolar Energy(+/-)dev/error Total Energy(+/-)dev/error\n')
# for i in range(len(resname)):
# if (args.cutoff == 999):
# fout.write("%-8s %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f \n" % (
# resname[i], Residues[i].FinalMM[0], Residues[i].FinalMM[1], Residues[i].FinalPol[0],
# Residues[i].FinalPol[1], Residues[i].FinalAPol[0], Residues[i].FinalAPol[1], Residues[i].TotalEn[0],
# Residues[i].TotalEn[1]))
# elif (args.cutoff <= Residues[i].TotalEn[0]) or ((-1 * args.cutoff) >= Residues[i].TotalEn[0]):
# fout.write("%-8s %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f \n" % (
# resname[i], Residues[i].FinalMM[0], Residues[i].FinalMM[1], Residues[i].FinalPol[0],
# Residues[i].FinalPol[1], Residues[i].FinalAPol[0], Residues[i].FinalAPol[1], Residues[i].TotalEn[0],
# Residues[i].TotalEn[1]))
#
# fmap.write("%-8d %4.4f \n" % ((i + 1), Residues[i].TotalEn[0])) # TODO Binding energy calculation
def autocorr(x):
"Compute an autocorrelation with numpy"
x = x - np.mean(x)
result = np.correlate(x, x, mode='full')
result = result[result.size//2:]
return result / result[0]
def PlotEnrgy(c, FitCoef_all, args, fname):
CompEn, CompEnErr, ExpEn, CI = [], [], [], []
for i in range(len(c)):
CompEn.append(c[i].FinalAvgEnergy)
ExpEn.append(c[i].freeEn)
CompEnErr.append(c[i].StdErr)
CI.append(c[i].CI)
fig = plt.figure()
plt.subplots_adjust(left=0.15, right=0.9, top=0.9, bottom=0.15)
ax = fig.add_subplot(111)
CI = np.array(CI).T
# To plot data
ax.errorbar(ExpEn, CompEn, yerr=CI, fmt='o', ecolor='k', color='k', zorder=20000)
# To plot straight line having median correlation coefficiant
fit = np.polyfit(ExpEn, CompEn, 1)
fitCompEn = np.polyval(fit, ExpEn)
ax.plot(ExpEn, fitCompEn, color='k', lw=3, zorder=20000)
# To plot straight line having minimum correlation coefficiant
# fitCompEn = np.polyval(FitCoef[1], ExpEn)
# ax.plot(ExpEn,fitCompEn,color='g',lw=2)
# To plot straight line having maximum correlation coefficiant
# fitCompEn = np.polyval(FitCoef[2], ExpEn)
# ax.plot(ExpEn,fitCompEn,color='r',lw=2)
for i in range(len(FitCoef_all[0])):
fitCompEn = np.polyval([FitCoef_all[0][i], FitCoef_all[1][i]], ExpEn)
ax.plot(ExpEn, fitCompEn, color='#BDBDBD', lw=0.5, zorder=1)
ax.set_xlabel('Experimental Free Energy (kJ/mol)', fontsize=24, fontname='Times new Roman')
ax.set_ylabel('Computational Binding Energy (kJ/mol)', fontsize=24, fontname='Times new Roman')
xtics = ax.get_xticks()
plt.xticks(xtics, fontsize=24, fontname='Times new Roman')
ytics = ax.get_yticks()
plt.yticks(ytics, fontsize=24, fontname='Times new Roman')
plt.savefig(fname, dpi=300, orientation='landscape')
def PlotCorr(c, corr_outname, fname, bootstrap_nsteps):
CompEn, ExpEn = [], []
for i in range(len(c)):
CompEn.append(c[i].FinalAvgEnergy)
ExpEn.append(c[i].freeEn)
AvgEn = np.sort(c[i].AvgEnBS, kind='mergesort')
n = len(AvgEn)
div = int(n / 21)
AvgEn = AvgEn[:n:div]
c[i].AvgEnBS = AvgEn
main_r = np.corrcoef([CompEn, ExpEn])[0][1]
r, FitCoef = [], []
Id_0_FitCoef, Id_1_FitCoef = [], []
f_corrdist = open(corr_outname, 'w')
# Bootstrap analysis for correlation coefficiant
nbstep = bootstrap_nsteps
for i in range(nbstep):
temp_x, temp_y = [], []
energy_idx = np.random.randint(0, 22, size=len(c))
complex_idx = np.random.randint(0, len(c), size=len(c))
for j in range(len(complex_idx)):
temp_y.append(c[complex_idx[j]].AvgEnBS[energy_idx[j]])
temp_x.append(c[complex_idx[j]].freeEn)
rtmp = np.corrcoef([temp_x, temp_y])[0][1]
temp_x = np.array(temp_x)
temp_y = np.array(temp_y)
r.append(rtmp)
fit = np.polyfit(temp_x, temp_y, 1)
FitCoef.append(fit)
f_corrdist.write('{0}\n'.format(rtmp))
# Seprating Slope and intercept
Id_0_FitCoef = np.transpose(FitCoef)[0]
Id_1_FitCoef = np.transpose(FitCoef)[1]
# Calculating mode of coorelation coefficiant
density, r_hist = np.histogram(r, 25, normed=True)
mode = (r_hist[np.argmax(density) + 1] + r_hist[np.argmax(density)]) / 2
# Calculating Confidence Interval
r = np.sort(r)
CI_min_idx = int(0.005 * nbstep)
CI_max_idx = int(0.995 * nbstep)
CI_min = mode - r[CI_min_idx]
CI_max = r[CI_max_idx] - mode
print("%5.3f %5.3f %5.3f %5.3f" % (main_r, mode, CI_min, CI_max))
# Plotting Correlation Coefficiant Distribution
fig = plt.figure()
plt.subplots_adjust(left=0.15, right=0.9, top=0.9, bottom=0.15)
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(r, 40, normed=1, facecolor='#B2B2B2', alpha=0.75, lw=0.1)
plt.title('Mode = {0:.3f}\nConf. Int. = -{1:.3f}/+{2:.3f}'.format(mode, CI_min, CI_max), fontsize=18,
fontname='Times new Roman')
bincenters = 0.5 * (bins[1:] + bins[:-1])
# y = mlab.normpdf( bincenters, mode, np.std(r))
# l = ax.plot(bincenters, y, 'k--', lw=1)
ax.set_xlabel('Correlation Coefficient', fontsize=24, fontname='Times new Roman')
ax.set_ylabel('Density', fontsize=24, fontname='Times new Roman')
xtics = ax.get_xticks()
plt.xticks(xtics, fontsize=24, fontname='Times new Roman')
ytics = ax.get_yticks()
plt.yticks(ytics, fontsize=24, fontname='Times new Roman')
plt.savefig(fname, dpi=300, orientation='landscape')
return [Id_0_FitCoef, Id_1_FitCoef]
class Complex(object):
def __init__(self, MmFile, PolFile, APolFile):
self.frames = []
self.TotalEn = []
self.Vdw, self.Elec, self.Pol, self.Sas, self.Sav, self.Wca = [], [], [], [], [], []
self.MmFile = MmFile
self.PolFile = PolFile
self.APolFile = APolFile
self.AvgEnBS = []
self.CI = []
self.FinalAvgEnergy = 0
self.StdErr = 0
def jump_data_conv(self, data, jump_data):
temp_data = []
for tempus in data:
new_temp = tempus[::jump_data]
temp_data.append(new_temp)
return temp_data
def CalcEnergy(self, frame_wise, idx, jump_data=1, bootstrap=False, bootstrap_jump=4, bootstrap_steps=None):
mmEn = ReadData(self.MmFile, n=7)
mmEn = ReadData(self.MmFile, n=7)
polEn = ReadData(self.PolFile, n=4)
apolEn = ReadData(self.APolFile, n=10)
if jump_data>1:
mmEn = self.jump_data_conv( mmEn, jump_data)
polEn = self.jump_data_conv(polEn, jump_data)
apolEn = self.jump_data_conv(apolEn, jump_data)
CheckEnData(mmEn, polEn, apolEn)
time, MM, Vdw, Elec, Pol, Apol, Sas, Sav, Wca = [], [], [], [], [], [], [], [], []
for i in range(len(mmEn[0])):
# Vacuum MM
Energy = mmEn[5][i] + mmEn[6][i] - (mmEn[1][i] + mmEn[2][i] + mmEn[3][i] + mmEn[4][i])
MM.append(Energy)
Energy = mmEn[5][i] - (mmEn[1][i] + mmEn[3][i])
Vdw.append(Energy)
Energy = mmEn[6][i] - (mmEn[2][i] + mmEn[4][i])
Elec.append(Energy)
# Polar
Energy = polEn[3][i] - (polEn[1][i] + polEn[2][i])
Pol.append(Energy)
# Non-polar
Energy = apolEn[3][i] + apolEn[6][i] + apolEn[9][i] - (
apolEn[1][i] + apolEn[2][i] + apolEn[4][i] + apolEn[5][i] + apolEn[7][i] + apolEn[8][i])
Apol.append(Energy)
Energy = apolEn[3][i] - (apolEn[1][i] + apolEn[2][i])
Sas.append(Energy)
Energy = apolEn[6][i] - (apolEn[4][i] + apolEn[5][i])
Sav.append(Energy)
Energy = apolEn[9][i] - (apolEn[7][i] + apolEn[8][i])
Wca.append(Energy)
# Final Energy
time.append(mmEn[0][i])
Energy = MM[i] + Pol[i] + Apol[i]
self.TotalEn.append(Energy)
# TODO HISTOGRAM NEED TO DO SOMETHING
# TAKE A VERY CAREFUL LOOK
# https://machinelearningmastery.com/calculate-bootstrap-confidence-intervals-machine-learning-results-python/
plt.clf()
plt.hist(self.TotalEn)
plt.show()
plt.clf()
self.time = time
self.time_bootstrap = time[::bootstrap_jump]
self.TotalEn_bootstrap = self.TotalEn[::bootstrap_jump]
# Writing frame wise component energy to file
frame_wise.write('\n#Complex %d\n' % ((idx + 1)))
for i in range(len(time)):
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf %15.3lf' % (
time[i], mmEn[1][i], mmEn[2][i], polEn[1][i], (apolEn[1][i] + apolEn[4][i] + apolEn[7][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf' % (
mmEn[3][i], mmEn[4][i], polEn[2][i], (apolEn[2][i] + apolEn[5][i] + apolEn[8][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf' % (
mmEn[5][i], mmEn[6][i], polEn[3][i], (apolEn[3][i] + apolEn[6][i] + apolEn[9][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf\n' % (MM[i], Pol[i], Apol[i], self.TotalEn[i]))
# Bootstrap analysis energy components
if bootstrap is True:
bsteps = bootstrap_steps
curr_Vdw = Vdw[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Vdw, bsteps)
self.Vdw.append(avg_energy)
self.Vdw.append(error)
curr_Elec = Elec[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Elec, bsteps)
self.Elec.append(avg_energy)
self.Elec.append(error)
curr_Pol = Pol[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Pol, bsteps)
self.Pol.append(avg_energy)
self.Pol.append(error)
curr_Sas = Sas[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Sas, bsteps)
self.Sas.append(avg_energy)
self.Sas.append(error)
curr_Sav = Sav[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Sav, bsteps)
self.Sav.append(avg_energy)
self.Sav.append(error)
curr_Wca = Wca[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Wca, bsteps)
self.Wca.append(avg_energy)
self.Wca.append(error)
# Bootstrap => Final Average Energy
curr_TotalEn = self.TotalEn_bootstrap
#from matplotlib import pyplot
self.AvgEnBS, AvgEn, EnErr, CI = ComplexBootStrap(curr_TotalEn, bsteps)
self.FinalAvgEnergy = AvgEn
self.StdErr = EnErr
self.CI = CI
# If not bootstrap then average and standard deviation
else:
self.Vdw.append(np.mean(Vdw))
self.Vdw.append(np.std(Vdw))
self.Elec.append(
|
np.mean(Elec)
|
numpy.mean
|
# -*- coding: utf-8 -*-
"""
SOD 333 : filtrage bayésian
@author: <NAME>
<NAME>
<NAME>
"""
# Import des bibliothéques
import numpy as np # calcul numerique
import numpy.random as rnd # fonctions pseudo-aleatoires
import matplotlib.pyplot as plt # fonctions graphiques a la MATLAB
import matplotlib.animation as anim # fonctions d'animation
import scipy.io as io # fonctions pour l'ouverture des fichiers .mat de MATLAB
from math import *
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
# Initialisation des varibles du problemes:
# Ils sont considéres comme des varibales globales
X1MIN,X1MAX = -1e4, 1e4
X2MIN,X2MAX = -1e4, 1e4
r0 = (-6000,2000)
v0 = (120,0)
sigma_r0 = 100
sigma_v0 = 10
sigma_INS = 7
sigma_ALT = 10
sigma_BAR = 20
Delta = 1
T =100
# definition d'une fonction donnant les indices des ancetres dans la redistribution multinomiale
def resampling_multi(w,N):
u_tild = np.zeros((N))
expo = np.zeros((N))
alpha = np.zeros((N))
u_ord = np.zeros((N))
uu = np.zeros((N+1))
s = np.zeros((N))
#
w = w/w.sum()
s = np.cumsum(w)
u_tild = rnd.uniform(0,1,N)
#
for i in range(N):
alpha[i] = u_tild[i]**(1/float(i+1))
alpha = np.cumprod(alpha)
u_ord = alpha[N-1]/alpha
u = np.append(u_ord,float("inf"))
#
ancestor = np.zeros(N,dtype=int)
offsprings = np.zeros(N,dtype=int)
i = 0
for j in range(N):
o = 0
while u[i]<=s[j]:
ancestor[i] = j
i = i+1
o = o+1
offsprings[j] = o
return ancestor
# fonction de simulation des algortithmes
# c = coefficient de sensibilité de l'algorithme
# T = la duree total des sequences
# N = Nombre des particules a simuler
# sim = varible binaire, s'il est True elle affiche l'évolution des particules sur un graphe.
# evol = varible binaire, s'il est True elle affiche le déroulemnet de l'algo.
# Elle retourne 3 variables :
# RV : matrice contenant les valeurs de (r,v) estimée à chaque particule à chaque instant.
# weights : les poids respectifs associé à chaque particule à chaque instant.
# X_i :matrice contenat les particules (dr,dv)
# h : Les altitudes associées à chaque compesant de RV.
def Adaptive(c,T,N,sim=True,evol=True) :
#### Création des matrices contenant tous les états pour toutes les particules
X_i = np.zeros(shape=(T+1,N,4)) # Matrice des particules(les écarts : dr,dv)
RV = np.zeros(shape=(T+1,N,4)) # Matrice des positions et des vitesses estimés (r_INS+dr,v_INS+dv)
h = np.zeros(shape=(T+1,N)) # Matrice des altitudes des particules
#### Tirage des premiers élements.
for i in range(N):
X_i[0,i,0:2] = rnd.normal(size=2,loc=0,scale=sigma_r0)
X_i[0,i,2:4] = rnd.normal(size=2,loc=0,scale=sigma_v0)
RV[0,:,0:2] = r0 + X_i[0,:,0:2]
RV[0,:,2:4] = v0 + X_i[0,:,2:4]
weights = np.zeros(shape=(T+1,N)) # Création de la matrice des poids
#### Les poids initiaux
# Determination des indices
i_ind= (X2MAX - RV[0,:,1]) * N2 /(X2MAX-X2MIN)
i_ind=np.ceil(i_ind)
j_ind= (RV[0,:,0] - X1MIN) * N1 /(X1MAX-X1MIN)
j_ind=np.ceil(j_ind)
#On calcule le h0
h[0,:] = map[i_ind.astype(int),j_ind.astype(int)]
weights[0,:]= vraisemblance(h=h_ALT[0], mu= h[0,:])
weights[0,:] = weights[0,:] / np.sum(weights[0,:])
#### La boucle du filtre
for k in range(1,T+1):
#On calcule N effective :
Neff= 1 / (np.sum(weights[k-1,:]**2))
### algorithme SIR
if Neff <= c*N :
if(evol==True):
print(" instant ",k," : SIR avec Neff = ",Neff)
### Phase de tirage
#indices = rnd.choice(range(0,N),size=
#N,p=weights[k-1,:])
indices = resampling_multi(w=weights[k-1,:],
N=N)
Xi_hat = X_i[k-1,indices,:]
### Phase de prédiction
w_INS = rnd.multivariate_normal(mean=[0,0],cov=[[sigma_INS,0],[0,sigma_INS]],size=N)
X_i[k,:,0:2] = Xi_hat[:,0:2]+Delta*Xi_hat[:,2:4]
X_i[k,:,2:4] = Xi_hat[:,2:4]-Delta*w_INS
### Phase de correction
#On calcule r et v
RV[k,:,0:2] = r_INS[:,k]+X_i[k,:,0:2]
RV[k,:,2:4] = v_INS[:,k]+X_i[k,:,2:4]
### Calcul des poids
# Determination des indices
i_ind= (X2MAX - RV[k,:,1]) * N2 /(X2MAX-X2MIN)
i_ind=np.clip(np.ceil(i_ind), 0,N2-1)
j_ind= (RV[k,:,0] - X1MIN) * N1 /(X1MAX-X1MIN)
j_ind=np.clip(np.ceil(j_ind),0,N1-1)
### On calcule le hk
h[k,:] = map[i_ind.astype(int),j_ind.astype(int)]
weights[k,:] = vraisemblance(h=h_ALT[k], mu= h[k,:])
weights[k,:] = weights[k,:] / np.sum(weights[k,:])
### algorithme SIS :
elif Neff > c*N :
if(evol==True) :
print(" instant ",k," : SIS avec Neff = ",Neff)
### Phase de prédiction
w_INS = rnd.multivariate_normal(mean=[0,0],cov=[[sigma_INS,0],[0,sigma_INS]],size=N)
X_i[k,:,0:2] = X_i[k-1,:,0:2]+Delta*X_i[k-1,:,2:4]
X_i[k,:,2:4] = X_i[k-1,:,2:4]-Delta*w_INS
### Phase de correction
#On calcule r et v
RV[k,:,0:2] = r_INS[:,k]+X_i[k,:,0:2]
RV[k,:,2:4] = v_INS[:,k]+X_i[k,:,2:4]
### Calcul des poids
# Determination des indices
i_ind= (X2MAX - RV[k,:,1]) * N2 /(X2MAX-X2MIN)
i_ind=np.clip(np.ceil(i_ind),0,N2-1)
j_ind= (RV[k,:,0] - X1MIN) * N1 /(X1MAX-X1MIN)
j_ind=np.clip(
|
np.ceil(j_ind)
|
numpy.ceil
|
import numpy as np
import xarray as xr
import pandas as pd
import os
from collections import OrderedDict
# from astropy.time import Time
import logging
import copy
from typing import List, Dict, Union, Tuple
import pysagereader
class SAGEIILoaderV700(object):
"""
Class designed to load the v7.00 SAGE II spec and index files provided by NASA ADSC into python
Data files must be accessible by the users machine, and can be downloaded from:
https://eosweb.larc.nasa.gov/project/sage2/sage2_v7_table
Parameters
----------
data_folder
location of sage ii index and spec files.
output_format
format for the output data. If ``'xarray'`` the output is returned as an ``xarray.Dataset``.
If None the output is returned as a dictionary of numpy arrays.
**NOTE: the following options only apply to xarray output types**
species
Species to be returned in the output data. If None all species are returned. Options are
``aerosol``, ``ozone``, ``h2o``, and ``no2``. If more than one species is returned fields will be NaN-padded
where data is not available. ``species`` is only used if ``'xarray'`` is set as the ``output_data`` format,
otherwise it has no effect.
cf_names
If True then CF-1.7 naming conventions are used for the output_data when ``xarray`` is selected.
filter_aerosol
filter the aerosol using the cloud flag
filter_ozone
filter the ozone using the criteria recommended in the release notes
* Exclusion of all data points with an uncertainty estimate of 300% or greater
* Exclusion of all profiles with an uncertainty greater than 10% between 30 and 50 km
* Exclusion of all data points at altitude and below the occurrence of an aerosol extinction value of
greater than 0.006 km^-1
* Exclusion of all data points at altitude and below the occurrence of both the 525nm aerosol extinction
value exceeding 0.001 km^-1 and the 525/1020 extinction ratio falling below 1.4
* Exclusion of all data points below 35km an 200% or larger uncertainty estimate
enumerate_flags
expand the index and species flags to their boolean values.
normalize_percent_error
give the species error as percent rather than percent * 100
return_separate_flags
return the enumerated flags as a separate data array
Example
-------
>>> sage = SAGEIILoaderV700()
>>> sage.data_folder = 'path/to/data'
>>> data = sage.load_data('2004-1-1','2004-5-1')
In addition to the sage ii fields reported in the files, two additional time fields are provided
to allow for easier subsetting of the data.
``data['mjd']`` is a numpy array containing the modified julian dates of each scan
``date['time']`` is an pandas time series object containing the times of each scan
"""
def __init__(self, data_folder: str=None, output_format: str='xarray', species: List[str]=('aerosol', 'h2o', 'no2', 'ozone', 'background'),
cf_names: bool=False, filter_aerosol: bool=False, filter_ozone: bool=False,
enumerate_flags: bool=False, normalize_percent_error: bool=False, return_separate_flags: bool=False):
if type(species) == str:
species = [species]
self.data_folder = data_folder # Type: str
self.version = '7.00'
self.index_file = 'SAGE_II_INDEX_'
self.spec_file = 'SAGE_II_SPEC_'
self.fill_value = np.nan
self.spec_format = self.get_spec_format()
self.index_format = self.get_index_format()
self.output_format = output_format
self.species = [s.lower() for s in species]
self.cf_names = cf_names
self.filter_aerosol = filter_aerosol
self.filter_ozone = filter_ozone
self.normalize_percent_error = normalize_percent_error
self.enumerate_flags = enumerate_flags
self.return_separate_flags = return_separate_flags
@staticmethod
def get_spec_format() -> Dict[str, Tuple[str, int]]:
"""
spec format taken from sg2_specinfo.pro provided in the v7.00 download
used for reading the binary data format
Returns
-------
Dict
Ordered dictionary of variables provided in the spec file. Each dictionary field contains a
tuple with the information (data type, number of data points). Ordering is important as the
sage ii binary files are read sequentially.
"""
spec = OrderedDict()
spec['Tan_Alt'] = ('float32', 8) # Subtangent Altitudes(km)
spec['Tan_Lat'] = ('float32', 8) # Subtangent Latitudes @ Tan_Alt(deg)
spec['Tan_Lon'] = ('float32', 8) # Subtangent Longitudes @ Tan_Alt(deg)
spec['NMC_Pres'] = ('float32', 140) # Gridded Pressure profile(mb)
spec['NMC_Temp'] = ('float32', 140) # Gridded Temperature profile(K)
spec['NMC_Dens'] = ('float32', 140) # Gridded Density profile(cm ^ (-3))
spec['NMC_Dens_Err'] = ('int16', 140) # Error in NMC_Dens( % * 1000)
spec['Trop_Height'] = ('float32', 1) # NMC Tropopause Height(km)
spec['Wavelength'] = ('float32', 7) # Wavelength of each channel(nm)
spec['O3'] = ('float32', 140) # O3 Density profile 0 - 70 Km(cm ^ (-3))
spec['NO2'] = ('float32', 100) # NO2 Density profile 0 - 50 Km(cm ^ (-3))
spec['H2O'] = ('float32', 100) # H2O Volume Mixing Ratio 0 - 50 Km(ppp)
spec['Ext386'] = ('float32', 80) # 386 nm Extinction 0 - 40 Km(1 / km)
spec['Ext452'] = ('float32', 80) # 452 nm Extinction 0 - 40 Km(1 / km)
spec['Ext525'] = ('float32', 80) # 525 nm Extinction 0 - 40 Km(1 / km)
spec['Ext1020'] = ('float32', 80) # 1020 nm Extinction 0 - 40 Km(1 / km)
spec['Density'] = ('float32', 140) # Calculated Density 0 - 70 Km(cm ^ (-3))
spec['SurfDen'] = ('float32', 80) # Aerosol surface area dens 0 - 40 km(um ^ 2 / cm ^ 3)
spec['Radius'] = ('float32', 80) # Aerosol effective radius 0 - 40 km(um)
spec['Dens_Mid_Atm'] = ('float32', 70) # Middle Atmosphere Density(cm ^ (-3))
spec['O3_Err'] = ('int16', 140) # Error in O3 density profile( % * 100)
spec['NO2_Err'] = ('int16', 100) # Error in NO2 density profile( % * 100)
spec['H2O_Err'] = ('int16', 100) # Error in H2O mixing ratio( % * 100)
spec['Ext386_Err'] = ('int16', 80) # Error in 386 nm Extinction( % * 100)
spec['Ext452_Err'] = ('int16', 80) # Error in 452 nm Extinction( % * 100)
spec['Ext525_Err'] = ('int16', 80) # Error in 525 nm Extinction( % * 100)
spec['Ext1020_Err'] = ('int16', 80) # Error in 1019 nm Extinction( % * 100)
spec['Density_Err'] = ('int16', 140) # Error in Density( % * 100)
spec['SurfDen_Err'] = ('int16', 80) # Error in surface area dens( % * 100)
spec['Radius_Err'] = ('int16', 80) # Error in aerosol radius( % * 100)
spec['Dens_Mid_Atm_Err'] = ('int16', 70) # Error in Middle Atm.Density( % * 100)
spec['InfVec'] = ('uint16', 140) # Informational Bit flags
return spec
@staticmethod
def get_index_format() -> Dict[str, Tuple[str, int]]:
"""
index format taken from sg2_indexinfo.pro provided in the v7.00 download
used for reading the binary data format
Returns
-------
Dict
an ordered dictionary of variables provided in the index file. Each dictionary
field contains a tuple with the information (data type, length). Ordering is
important as the sage ii binary files are read sequentially.
"""
info = OrderedDict()
info['num_prof'] = ('uint32', 1) # Number of profiles in these files
info['Met_Rev_Date'] = ('uint32', 1) # LaRC Met Model Revision Date(YYYYMMDD)
info['Driver_Rev'] = ('S1', 8) # LaRC Driver Version(e.g. 6.20)
info['Trans_Rev'] = ('S1', 8) # LaRC Transmission Version
info['Inv_Rev'] = ('S1', 8) # LaRC Inversion Version
info['Spec_Rev'] = ('S1', 8) # LaRC Inversion Version
info['Eph_File_Name'] = ('S1', 32) # Ephemeris data file name
info['Met_File_Name'] = ('S1', 32) # Meteorological data file name
info['Ref_File_Name'] = ('S1', 32) # Refraction data file name
info['Tran_File_Name'] = ('S1', 32) # Transmission data file name
info['Spec_File_Name'] = ('S1', 32) # Species profile file name
info['FillVal'] = ('float32', 1) # Fill value
# Altitude grid and range info
info['Grid_Size'] = ('float32', 1) # Altitude grid spacing(0.5 km)
info['Alt_Grid'] = ('float32', 200) # Geometric altitudes(0.5, 1.0, ..., 100.0 km)
info['Alt_Mid_Atm'] = ('float32', 70) # Middle atmosphere geometric altitudes
info['Range_Trans'] = ('float32', 2) # Transmission min & max altitudes[0.5, 100.]
info['Range_O3'] = ('float32', 2) # Ozone min & max altitudes[0.5, 70.0]
info['Range_NO2'] = ('float32', 2) # NO2 min & max altitudes[0.5, 50.0]
info['Range_H2O'] = ('float32', 2) # Water vapor min & max altitudes[0.5, 50.0]
info['Range_Ext'] = ('float32', 2) # Aerosol extinction min & max altitudes[0.5, 40.0]
info['Range_Dens'] = ('float32', 2) # Density min & max altitudes[0.5, 70.0]
info['Spare'] = ('float32', 2) #
# Event specific info useful for data subsetting
info['YYYYMMDD'] = ('int32', 930) # Event date at 20km subtangent point
info['Event_Num'] = ('int32', 930) # Event number
info['HHMMSS'] = ('int32', 930) # Event time at 20km
info['Day_Frac'] = ('float32', 930) # Time of year(DDD.frac) at 20 km
info['Lat'] = ('float32', 930) # Subtangent latitude at 20 km(-90, +90)
info['Lon'] = ('float32', 930) # Subtangent longitude at 20 km(-180, +180)
info['Beta'] = ('float32', 930) # Spacecraft beta angle(deg)
info['Duration'] = ('float32', 930) # Duration of event(sec)
info['Type_Sat'] = ('int16', 930) # Event Type Instrument(0 = SR, 1 = SS)
info['Type_Tan'] = ('int16', 930) # Event Type Local(0 = SR, 1 = SS)
# Process tracking and flag info
info['Dropped'] = ('int32', 930) # Dropped event flag
info['InfVec'] = ('uint32', 930) # Bit flags relating to processing (
# NOTE: readme_sage2_v6.20.txt says InfVec is 16 bit but appears to actually be 32 (also in IDL software)
# Record creation dates and times
info['Eph_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Eph_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Met_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Met_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Ref_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Ref_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Tran_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Tran_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Spec_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Spec_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
return info
def get_spec_filename(self, year: int, month: int) -> str:
"""
Returns the spec filename given a year and month
Parameters
----------
year
year of the data that will be loaded
month
month of the data that will be loaded
Returns
-------
filename of the spec file where the data is stored
"""
file = os.path.join(self.data_folder,
self.spec_file + str(int(year)) + str(int(month)).zfill(2) + '.' + self.version)
if not os.path.isfile(file):
file = None
return file
def get_index_filename(self, year: int, month: int) -> str:
"""
Returns the index filename given a year and month
Parameters
----------
year
year of the data that will be loaded
month
month of the data that will be loaded
Returns
-------
filename of the index file where the data is stored
"""
file = os.path.join(self.data_folder,
self.index_file + str(int(year)) + str(int(month)).zfill(2) + '.' + self.version)
if not os.path.isfile(file):
file = None
return file
def read_spec_file(self, file: str, num_profiles: int) -> List[Dict]:
"""
Parameters
----------
file
name of the spec file to be read
num_profiles
number of profiles to read from the spec file (usually determined from the index file)
Returns
-------
list of dictionaries containing the spec data. Each list is one event
"""
# load the file into the buffer
file_format = self.spec_format
with open(file, "rb") as f:
buffer = f.read()
# initialize the list of dictionaries
data = [None] * num_profiles
for p in range(num_profiles):
data[p] = dict()
# load the data from the buffer
bidx = 0
for p in range(num_profiles):
for key in file_format.keys():
nbytes = np.dtype(file_format[key][0]).itemsize * file_format[key][1]
data[p][key] = copy.copy(np.frombuffer(buffer[bidx:bidx+nbytes],
dtype=file_format[key][0]))
bidx += nbytes
return data
def read_index_file(self, file: str) -> Dict:
"""
Read the binary file into a python data structure
Parameters
----------
file
filename to be read
Returns
-------
data from the file
"""
file_format = self.index_format
with open(file, "rb") as f:
buffer = f.read()
data = dict()
# load the data from file into a list
bidx = 0
for key in file_format.keys():
nbytes = np.dtype(file_format[key][0]).itemsize * file_format[key][1]
if file_format[key][0] == 'S1':
data[key] = copy.copy(buffer[bidx:bidx + nbytes].decode('utf-8'))
else:
data[key] = copy.copy(np.frombuffer(buffer[bidx:bidx + nbytes], dtype=file_format[key][0]))
if len(data[key]) == 1:
data[key] = data[key][0]
bidx += nbytes
# make a more useable time field
date_str = []
# If the time overflows by less than the scan time just set it to midnight
data['HHMMSS'][(data['HHMMSS'] >= 240000) & (data['HHMMSS'] < (240000 + data['Duration']))] = 235959
# otherwise, set it as invalid
data['HHMMSS'][data['HHMMSS'] >= 240000] = -999
for idx, (ymd, hms) in enumerate(zip(data['YYYYMMDD'], data['HHMMSS'])):
if (ymd < 0) | (hms < 0):
date_str.append('1970-1-1 00:00:00') # invalid sage ii date
else:
hours = int(hms/10000)
mins = int((hms % 10000)/100)
secs = hms % 100
date_str.append(str(ymd)[0:4] + '-' + str(ymd)[4:6].zfill(2) + '-' +
str(ymd)[6::].zfill(2) + ' ' + str(hours).zfill(2) + ':' +
str(mins).zfill(2) + ':' + str(secs).zfill(2))
# data['time'] = Time(date_str, format='iso')
data['time'] = pd.to_datetime(date_str)
data['mjd'] = np.array((data['time'] - pd.Timestamp('1858-11-17')) / pd.Timedelta(1, 'D'))
data['mjd'][data['mjd'] < 40588] = -999 # get rid of invalid dates
return data
def load_data(self, min_date: str, max_date: str,
min_lat: float=-90, max_lat: float=90,
min_lon: float=-180, max_lon: float=360) -> Union[Dict, xr.Dataset]:
"""
Load the SAGE II data for the specified dates and locations.
Parameters
----------
min_date
start date where data will be loaded in iso format, eg: '2004-1-1'
max_date
end date where data will be loaded in iso format, eg: '2004-1-1'
min_lat
minimum latitude (optional)
max_lat
maximum latitude (optional)
min_lon
minimum longitude (optional)
max_lon
maximum longitude (optional)
Returns
-------
Variables are returned as numpy arrays (1 or 2 dimensional depending on the variable)
"""
min_time = pd.Timestamp(min_date)
max_time = pd.Timestamp(max_date)
data = dict()
init = False
# create a list of unique year/month combinations between the start/end dates
uniq = OrderedDict()
for year in [(t.date().year, t.date().month) for t in
pd.date_range(min_time, max_time+pd.Timedelta(27, 'D'), freq='27D')]:
uniq[year] = year
# load in the data from the desired months
for (year, month) in list(uniq.values()):
logging.info('loading data for : ' + str(year) + '/' + str(month))
indx_file = self.get_index_filename(year, month)
# if the file does not exist move on to the next month
if indx_file is None:
continue
indx_data = self.read_index_file(indx_file)
numprof = indx_data['num_prof']
spec_data = self.read_spec_file(self.get_spec_filename(year, month), numprof)
# get rid of the duplicate names for InfVec
for sp in spec_data:
sp['ProfileInfVec'] = copy.copy(sp['InfVec'])
del sp['InfVec']
for key in indx_data.keys():
# get rid of extraneous profiles in the index so index and spec are the same lengths
if hasattr(indx_data[key], '__len__'):
indx_data[key] = np.delete(indx_data[key], np.arange(numprof, 930))
# add the index values to the data set
if key in data.keys():
# we dont want to replicate certain fields
if (key[0:3] != 'Alt') & (key[0:5] != 'Range') & (key[0:7] != 'FillVal'):
data[key] = np.append(data[key], indx_data[key])
else:
if key == 'FillVal':
data[key] = indx_data[key]
else:
data[key] = [indx_data[key]]
# initialize the data dictionaries as lists
if init is False:
for key in spec_data[0].keys():
data[key] = []
init = True
# add the spec values to the data set
for key in spec_data[0].keys():
data[key].append(np.asarray([sp[key] for sp in spec_data]))
# join all of our lists into an array - this could be done more elegantly with vstack to avoid
# the temporary lists, but this is much faster
for key in data.keys():
if key == 'FillVal':
data[key] = float(data[key]) # make this a simple float rather than zero dimensional array
elif len(data[key][0].shape) > 0:
data[key] =
|
np.concatenate(data[key], axis=0)
|
numpy.concatenate
|
import numpy as np
import matplotlib.cm
def draw_person_limbs_2d_coco(axis, coords, vis=None, color=None, order='hw', with_face=True):
""" Draws a 2d person stick figure in a matplotlib axis. """
import matplotlib.cm
if order == 'uv':
pass
elif order == 'hw':
coords = coords[:, ::-1]
else:
assert 0, "Unknown order."
LIMBS_COCO = np.array([[1, 2], [2, 3], [3, 4], # right arm
[1, 8], [8, 9], [9, 10], # right leg
[1, 5], [5, 6], [6, 7], # left arm
[1, 11], [11, 12], [12, 13], # left leg
[1, 0], [2, 16], [0, 14], [14, 16], [0, 15], [15, 17], [5, 17]]) # head
if type(color) == str:
if color == 'sides':
blue_c = np.array([[0.0, 0.0, 1.0]]) # side agnostic
red_c = np.array([[1.0, 0.0, 0.0]]) # "left"
green_c = np.array([[0.0, 1.0, 0.0]]) # "right"
color = np.concatenate([np.tile(green_c, [6, 1]),
np.tile(red_c, [6, 1]),
np.tile(blue_c, [7, 1])], 0)
if not with_face:
color = color[:13, :]
if not with_face:
LIMBS_COCO = LIMBS_COCO[:13, :]
if vis is None:
vis = np.ones_like(coords[:, 0]) == 1.0
if color is None:
color = matplotlib.cm.jet(np.linspace(0, 1, LIMBS_COCO.shape[0]))[:, :3]
for lid, (p0, p1) in enumerate(LIMBS_COCO):
if (vis[p0] == 1.0) and (vis[p1] == 1.0):
if type(color) == str:
axis.plot(coords[[p0, p1], 0], coords[[p0, p1], 1], color, linewidth=2)
else:
axis.plot(coords[[p0, p1], 0], coords[[p0, p1], 1], color=color[lid, :], linewidth=2)
def draw_person_limbs_3d_coco(axis, coords, vis=None, color=None, orientation=None, orientation_val=None, with_face=True, rescale=True):
""" Draws a 3d person stick figure in a matplotlib axis. """
import matplotlib.cm
LIMBS_COCO = np.array([[1, 2], [1, 5], [2, 3], [3, 4], [5, 6],
[6, 7], [1, 8], [8, 9], [9, 10],
[1, 11], [11, 12], [12, 13], [1, 0],
[2, 16], [0, 14], [14, 16], [0, 15], [15, 17], [5, 17]])
if not with_face:
LIMBS_COCO = LIMBS_COCO[:13, :]
if vis is None:
vis = np.ones_like(coords[:, 0]) == 1.0
vis = vis == 1.0
if color is None:
color = matplotlib.cm.jet(np.linspace(0, 1, LIMBS_COCO.shape[0]))[:, :3]
for lid, (p0, p1) in enumerate(LIMBS_COCO):
if (vis[p0] == 1.0) and (vis[p1] == 1.0):
if type(color) == str:
axis.plot(coords[[p0, p1], 0], coords[[p0, p1], 1], coords[[p0, p1], 2], color, linewidth=2)
else:
axis.plot(coords[[p0, p1], 0], coords[[p0, p1], 1], coords[[p0, p1], 2], color=color[lid, :], linewidth=2)
if np.sum(vis) > 0 and rescale:
min_v, max_v, mean_v = np.min(coords[vis, :], 0), np.max(coords[vis, :], 0), np.mean(coords[vis, :], 0)
range = np.max(np.maximum(np.abs(max_v-mean_v), np.abs(mean_v-min_v)))
axis.set_xlim([mean_v[0]-range, mean_v[0]+range])
axis.set_ylim([mean_v[1]-range, mean_v[1]+range])
axis.set_zlim([mean_v[2]-range, mean_v[2]+range])
axis.set_xlabel('x')
axis.set_ylabel('y')
axis.set_zlabel('z')
axis.view_init(azim=-90., elev=-90.)
def detect_hand_keypoints(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(
|
np.argmax(scoremaps[:, :, i])
|
numpy.argmax
|
import numpy as np
import scipy.linalg
from common_lab_utils import PerspectiveCamera
class PrecalibratedCameraMeasurementsFixedWorld:
"""Measurements of fixed world points given in the normalised image plane"""
def __init__(self, camera: PerspectiveCamera, u: np.ndarray, x_w: np.ndarray):
"""Constructs the 2D-3D measurement
:param camera: A PerspectiveCamera representing the camera that performed the measurement.
:param u: A 2xn matrix of n pixel observations.
:param covs_u: A list of covariance matrices representing the uncertainty in each pixel observation.
:param x_w: A 3xn matrix of the n corresponding world points.
"""
self.camera = camera
self.x_w = x_w.T
# Transform to the normalised image plane.
self.xn = camera.pixel_to_normalised(u.T)
self.num = self.xn.shape[1]
class PrecalibratedMotionOnlyBAObjective:
"""Implements linearisation of motion-only BA objective function"""
def __init__(self, measurement):
"""Constructs the objective
:param measurement: A PrecalibratedCameraMeasurementsFixedWorld object.
"""
self.measurement = measurement
@staticmethod
def extract_measurement_jacobian(point_index, pose_state_c_w, measurement):
"""Computes the measurement Jacobian for a specific point and camera measurement.
:param point_index: Index of current point.
:param pose_state_c_w: Current pose state given as the pose of the world in the camera frame.
:param measurement: The measurement
:return: The measurement Jacobian
"""
A = measurement.camera.jac_project_world_to_normalised_wrt_pose_w_c(pose_state_c_w,
measurement.x_w[:, [point_index]])
return A
@staticmethod
def extract_measurement_error(point_index, pose_state_c_w, measurement):
"""Computes the measurement error for a specific point and camera measurement.
:param point_index: Index of current point.
:param pose_state_c_w: Current pose state given as the pose of the world in the camera frame.
:param measurement: The measurement
:return: The measurement error
"""
b = measurement.camera.reprojection_error_normalised(pose_state_c_w * measurement.x_w[:, [point_index]],
measurement.xn[:, [point_index]])
return b
def linearise(self, pose_state_w_c):
"""Linearises the objective over all states and measurements
:param pose_state_w_c: The current camera pose state in the world frame.
:return:
A - The full measurement Jacobian
b - The full measurement error
cost - The current cost
"""
num_points = self.measurement.num
A = np.zeros((2 * num_points, 6))
b = np.zeros((2 * num_points, 1))
pose_state_c_w = pose_state_w_c.inverse()
for j in range(num_points):
rows = slice(j * 2, (j + 1) * 2)
A[rows, :] = self.extract_measurement_jacobian(j, pose_state_c_w, self.measurement)
b[rows, :] = self.extract_measurement_error(j, pose_state_c_w, self.measurement)
return A, b, b.T.dot(b)
def gauss_newton(x_init, model, cost_thresh=1e-9, delta_thresh=1e-9, max_num_it=20):
"""Implements nonlinear least squares using the Gauss-Newton algorithm
:param x_init: The initial state
:param model: Model with a function linearise() that returns A, b and the cost for the current state estimate.
:param cost_thresh: Threshold for cost function
:param delta_thresh: Threshold for update vector
:param max_num_it: Maximum number of iterations
:return:
- x: State estimates at each iteration, the final state in x[-1]
- cost: The cost at each iteration
- A: The full measurement Jacobian at the final state
- b: The full measurement error at the final state
"""
x = [None] * (max_num_it + 1)
cost = np.zeros(max_num_it + 1)
x[0] = x_init
for it in range(max_num_it):
A, b, cost[it] = model.linearise(x[it])
tau = np.linalg.lstsq(A, b, rcond=None)[0]
x[it + 1] = x[it] + tau
if cost[it] < cost_thresh or np.linalg.norm(tau) < delta_thresh:
x = x[:it + 2]
cost = cost[:it + 2]
break
A, b, cost[-1] = model.linearise(x[-1])
return x, cost, A, b
def levenberg_marquardt(x_init, model, cost_thresh=1e-9, delta_thresh=1e-9, max_num_it=20):
"""Implements nonlinear least squares using the Levenberg-Marquardt algorithm
:param x_init: The initial state
:param model: Model with a function linearise() that returns A, b and the cost for the current state estimate.
:param cost_thresh: Threshold for cost function
:param delta_thresh: Threshold for update vector
:param max_num_it: Maximum number of iterations
:return:
- x: State estimates at each iteration, the final state in x[-1]
- cost: The cost at each iteration
- A: The full measurement Jacobian at the final state
- b: The full measurement error at the final state
"""
x = [None] * (max_num_it + 1)
cost =
|
np.zeros(max_num_it + 1)
|
numpy.zeros
|
import json
import os
import numpy as np
import pandas as pd
from pathlib import Path
import argparse
from typing import Union
import re
import pickle as pkl
def preprocess(
data_name,
node_feat: Union[None, int],
node_feat_which='both',
):
"""
data_name: str, name of raw dataset (to be found in data folder)
node_feat: {None, int}, number of node features in each row, None if no node features
node_feat_which: {'source','destination','both'}, specify which node features are present in each row
"""
u_list, i_list, ts_list, label_list = [], [], [], []
feat_edge = []
feat_node = []
idx_list = []
with open(data_name) as f:
s = next(f)
for idx, line in enumerate(f):
e = line.strip().split(',')
u = int(e[0])
i = int(e[1])
ts = float(e[2])
label = float(e[3])
if node_feat:
if node_feat_which=='both':
feat_e = np.array([float(x) for x in e[4:-node_feat*2]])
feat_n_s = np.array([u, ts ] + [float(x) for x in e[-node_feat*2:-node_feat]])
feat_n_d = np.array([i, ts ] + [float(x) for x in e[-node_feat:]])
feat_n = [feat_n_s, feat_n_d]
else:
feat_e = np.array([float(x) for x in e[4:-node_feat]])
if node_feat_which=='source':
feat_n = np.array([u, ts ] + [float(x) for x in e[-node_feat:]])
elif node_feat_which=='destination':
feat_n = np.array([i, ts ] + [float(x) for x in e[-node_feat:]])
else:
raise ValueError(f"Expected {{'source', 'destination', 'both'}} in 'node_feat_which' argument, got {node_feat_which}")
else:
feat_e = np.array([float(x) for x in e[4:]])
feat_n = []
u_list.append(u)
i_list.append(i)
ts_list.append(ts)
label_list.append(label)
idx_list.append(idx)
feat_edge.append(feat_e)
feat_node.append(feat_n)
return pd.DataFrame({'u': u_list,
'i': i_list,
'ts': ts_list,
'label': label_list,
'idx': idx_list}),
|
np.array(feat_edge)
|
numpy.array
|
import os
import pickle
import cv2
import fastestimator as fe
import numpy as np
import tensorflow as tf
from fastestimator.op.numpyop import Delete
from fastestimator.op.numpyop.meta import Sometimes
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.io import ModelSaver
from tensorflow.python.keras import layers
def zscore(data, epsilon=1e-7):
mean = np.mean(data)
std = np.std(data)
data = (data - mean) / max(std, epsilon)
return data
def load_pickle(pickle_path):
with open(pickle_path, "rb") as f:
data = pickle.load(f)
return data
def pad_left_one(data):
data_length = data.size
if data_length < 300:
data = np.pad(data, ((300 - data_length, 0), (0, 0)), mode='constant', constant_values=1.0)
return data
def pad_left_zero(data):
data_length = data.size
if data_length < 300:
data = np.pad(data, ((300 - data_length, 0), (0, 0)), mode='constant', constant_values=0.0)
return data
class RemoveValLoss(fe.op.numpyop.NumpyOp):
def forward(self, data, state):
val_loss = data
return np.zeros_like(val_loss)
class MultipleClsBinaryCELoss(fe.op.tensorop.TensorOp):
def __init__(self, inputs, outputs, pos_labels=[0, 2], neg_labels=[1], mode=None):
self.pos_labels = pos_labels
self.neg_labels = neg_labels
self.all_labels = self.pos_labels + self.neg_labels
self.missing_labels = list(set([0, 1, 2]) - set(self.all_labels))
if len(self.missing_labels) == 0:
self.missing_labels = [-1]
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
def forward(self, data, state):
cls_pred, cls_label = data
batch_size = cls_label.shape[0]
binaryCEloss = 0.0
case_count = 0.0
for idx in range(batch_size):
if cls_label[idx] != self.missing_labels[0]:
abnormal_predict = tf.clip_by_value(tf.math.reduce_max([cls_pred[idx][p] for p in self.pos_labels]),
1e-4,
1.0 - 1e-4)
if cls_label[idx] != self.neg_labels[0]:
abnormal_label = 1.0
else:
abnormal_label = 0.0
binaryCEloss -= (abnormal_label * tf.math.log(abnormal_predict) +
(1.0 - abnormal_label) * tf.math.log(1.0 - abnormal_predict))
case_count += 1
return binaryCEloss / case_count
class CombineLoss(fe.op.tensorop.TensorOp):
def __init__(self, inputs, outputs, weights, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.weights = weights
def forward(self, data, state):
return tf.reduce_sum([loss * weight for loss, weight in zip(data, self.weights)])
class CombineData(fe.op.numpyop.NumpyOp):
def forward(self, data, state):
x =
|
np.concatenate(data, axis=1)
|
numpy.concatenate
|
"""
Crossover ratios
The crossover ratio (CR) determines what percentage of parameters in the
target vector are updated with difference vector selected from the
population. In traditional differential evolution a CR value is chosen
somewhere in [0, 1] at the start of the search and stays constant throughout.
DREAM extends this by allowing multiple CRs at the same time with different
probabilities. Adaptive crossover adjusts the relative weights of the CRs
based on the average distance of the steps taken when that CR was used. This
distance will be zero for unsuccessful metropolis steps, and so the relative
weights on those CRs which generate many unsuccessful steps will be reduced.
Usage
-----
1. Traditional differential evolution::
crossover = Crossover(CR=CR)
2. Weighted crossover ratios::
crossover = Crossover(CR=[CR1, CR2, ...], weight=[weight1, weight2, ...])
The weights are normalized to one, and default to equally weighted CRs.
3. Adaptive weighted crossover ratios::
crossover = AdaptiveCrossover(N)
The CRs are set to *[1/N, 2/N, ... 1]*, and start out equally weighted. The
weights are adapted during burn-in (10% of the runs) and fixed for the
remainder of the analysis.
Compatibility Notes
-------------------
For *Extra.pCR == 'Update'* in the matlab interface use::
CR = AdaptiveCrossover(Ncr=MCMCPar.nCR)
For *Extra.pCR != 'Update'* in the matlab interface use::
CR = Crossover(CR=[1./Ncr], pCR=[1])
"""
from __future__ import division, print_function
__all__ = ["Crossover", "AdaptiveCrossover", "LogAdaptiveCrossover"]
from numpy import hstack, empty, ones, zeros, cumsum, arange, \
reshape, array, isscalar, asarray, std, sum, trunc, log10, logspace
from . import util
class Crossover(object):
"""
Fixed weight crossover ratios.
*CR* is a scalar if there is a single crossover ratio, or a vector of
numbers in (0, 1].
*weight* is the relative weighting of each CR, or None for equal weights.
"""
def __init__(self, CR, weight=None):
if isscalar(CR):
CR, weight = [CR], [1]
CR, weight = [asarray(v, 'd') for v in (CR, weight)]
self.CR, self.weight = CR, weight/sum(weight)
def reset(self):
pass
def update(self, xold, xnew, used):
"""
Gather adaptation data on *xold*, *xnew* for each CR that was
*used* in step *N*.
"""
pass
def adapt(self):
"""
Update CR weights based on the available adaptation data.
"""
pass
class BaseAdaptiveCrossover(object):
"""
Adapted weight crossover ratios.
"""
def _set_CRs(self, CR):
self.CR = CR
# Start with all CRs equally probable
self.weight = ones(len(self.CR)) / len(self.CR)
# No initial statistics for adaptation
self._count = zeros(len(self.CR))
self._distance = zeros(len(self.CR))
self._generations = 0
def reset(self):
# TODO: do we reset count and distance?
pass
def update(self, xold, xnew, used):
"""
Gather adaptation data on *xold*, *xnew* for each CR that was
*used* in step *N*.
"""
# Calculate the standard deviation of each dimension of X
r = std(xnew, ddof=1, axis=0)
# Compute the Euclidean distance between new X and old X
d = sum(((xold - xnew)/r)**2, axis=1)
# Use this information to update sum_p2 to update N_CR
count, total = distance_per_CR(self.CR, d, used)
self._count += count
self._distance += total
self._generations += 1
self._Nchains = len(used)
def adapt(self):
"""
Update CR weights based on the available adaptation data.
"""
# [PAK] make sure no count is zero by adding one to all counts
self.weight = (self._distance/(self._count+1)) * (self._Nchains/sum(self._distance))
# [PAK] make sure no weight goes to zero
self.weight += 0.1*sum(self.weight)
self.weight /= sum(self.weight)
class AdaptiveCrossover(BaseAdaptiveCrossover):
"""
Adapted weight crossover ratios.
*N* is the number of CRs to use. CR is set to [1/N, 2/N, ..., 1], with
initial weights [1/N, 1/N, ..., 1/N].
"""
def __init__(self, N):
if N < 2:
raise ValueError("Need more than one CR for AdaptiveCrossover")
self._set_CRs((arange(N)+1)/N) # Equally spaced CRs
# [PAK] Add log spaced adaptive cross-over for high dimensional tightly
# constrained problems.
class LogAdaptiveCrossover(BaseAdaptiveCrossover):
"""
Adapted weight crossover ratios, log-spaced.
*dim* is the number of dimensions in the problem.
*N* is the number of CRs to use per decade.
CR is set to [k/dim] where k is log-spaced from 1 to dim.
The CRs start equally weighted as [1, ..., 1]/len(CR).
*N* should be around 4.5. This gives good low end density, with 1, 2, 3,
and 5 parameters changed at a time, and proceeds up to 60% and 100% of
parameters each time. Lower values of *N* give too few high density CRs,
and higher values give too many low density CRs.
"""
def __init__(self, dim, N=4.5):
# Log spaced CR from 1/dim to dim/dim
self._set_CRs(logspace(0, log10(dim), trunc(N*log10(dim)+1))/dim)
def distance_per_CR(available_CRs, distances, used):
"""
Accumulate normalized Euclidean distance for each crossover value
Returns the number of times each available CR was used and the total
distance for that CR.
"""
# TODO: could use sparse array trick to evaluate totals by CR
# Set distance[k] to coordinate (k, used[k]), then sum by columns
# Note: currently setting unused CRs to -1, so this won't work
total = array([sum(distances[used == p]) for p in available_CRs])
count = array([
|
sum(used == p)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
computes the MFCCs from the magnitude spectrum (see Slaney)
Args:
X: spectrogram (dimension FFTLength X Observations)
f_s: sample rate of audio data
Returns:
v_mfcc mel frequency cepstral coefficients
"""
import numpy as np
from ToolMfccFb import ToolMfccFb
def FeatureSpectralMfccs(X,f_s, iNumCoeffs = 13):
# allocate memory
v_mfcc = np.zeros ([iNumCoeffs, X.shape[1]])
# generate filter matrix
H = ToolMfccFb(X.shape[0], f_s)
T = generateDctMatrix (H.shape[0], iNumCoeffs)
for n in range(0,X.shape[1]):
# compute the mel spectrum
X_Mel = np.log10(np.dot(H, X[:,n] + 1e-20))
# calculate the mfccs
v_mfcc[:,n] = np.dot(T, X_Mel)
return (v_mfcc)
# see function mfcc.m from Slaneys Auditory Toolbox
def generateDctMatrix (iNumBands, iNumCepstralCoeffs):
T = np.cos(np.outer(np.arange(0,iNumCepstralCoeffs), (2*np.arange(0,iNumBands)+1)) * np.pi/2/iNumBands)
T = T /
|
np.sqrt(iNumBands/2)
|
numpy.sqrt
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
import pyscf.pbc
from pyscf import ao2mo, gto
from pyscf.pbc import gto as pgto
from pyscf.pbc import scf as pscf
from pyscf.pbc.df import df, aug_etb, FFTDF
#from mpi4pyscf.pbc.df import df
pyscf.pbc.DEBUG = False
def setUpModule():
global cell, mf0, kmdf, kpts
L = 5.
n = 11
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = numpy.array([n,n,n])
cell.atom = '''He 3. 2. 3.
He 1. 1. 1.'''
cell.basis = 'ccpvdz'
cell.verbose = 0
cell.max_memory = 1000
cell.build(0,0)
mf0 = pscf.RHF(cell)
mf0.exxdiv = 'vcut_sph'
numpy.random.seed(1)
kpts =
|
numpy.random.random((5,3))
|
numpy.random.random
|
import copy
from pathlib import Path
import numpy as np
import pytest
from argoverse.utils.json_utils import read_json_file
from argoverse.utils.se2 import SE2
from argoverse.utils.sim2 import Sim2
TEST_DATA_ROOT = Path(__file__).resolve().parent / "test_data"
def test_constructor() -> None:
"""Sim(2) to perform p_b = bSa * p_a"""
bRa = np.eye(2)
bta =
|
np.array([1, 2])
|
numpy.array
|
import argparse
import random
import collections
import itertools
from collections import deque
from operator import itemgetter
import os
import cv2
import mmcv
import numpy as np
import torch
import json
from mmcv.parallel import collate, scatter
from mmaction.apis import init_recognizer
from mmaction.datasets.pipelines import Compose
from mmaction.core import OutputHook
from SoccerNet.utils import getListGames
from SoccerNet.DataLoader import Frame, FrameCV
FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL
FONTSCALE = 1
FONTCOLOR = (255, 255, 255) # BGR, white
MSGCOLOR = (128, 128, 128) # BGR, gray
THICKNESS = 1
LINETYPE = 1
EXCLUED_STEPS = [
'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',
'PyAVDecode', 'RawFrameDecode', 'FrameSelector'
]
class sliceable_deque(collections.deque):
def __getitem__(self, index):
if isinstance(index, slice):
return type(self)(itertools.islice(self, index.start,
index.stop, index.step))
return collections.deque.__getitem__(self, index)
def parse_args():
parser = argparse.ArgumentParser(
description='MMAction2 predict different labels in a long video demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file/url')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--split',
type=str,
default="val",
help='val/test')
parser.add_argument(
'--bias',
type=int,
default=1000,
help='classify temporal bias')
parser.add_argument(
'--half',
type=int,
default=0,
help='0: 1st part, 1: second part, 2:third part, 3:forth part') # 4 parts
parser.add_argument(
'--datapath',
type=str,
default="data/loveu_wide_val_2s_30fps/",
help='loveu val data path')
parser.add_argument(
'--targetpath',
type=str,
default="data/valres/",
help='target path')
parser.add_argument(
'--modelname',
type=str,
default="csn_4cls_2s_30fps",
help='pth model name')
parser.add_argument(
'--fps',
type=float,
default=30.0,
help=('fps'))
parser.add_argument(
'--stride',
type=int,
default=8,
help='stride')
args = parser.parse_args()
return args
def bmn_proposals(results,
num_videos,
max_avg_proposals=None,
num_res = 1,
thres=0.0,
):
bmn_res = []
for result in results:
#video_id = result['video_name']
num_proposals = 0
cur_video_proposals = []
for proposal in result:
t_start, t_end = proposal['segment']
score = proposal['score']
if score < thres: continue
cur_video_proposals.append([t_start, t_end, score])
num_proposals += 1
if len(cur_video_proposals)==0:
bmn_res.append(np.array([-2021]))
continue
cur_video_proposals = np.array(cur_video_proposals)
ratio = (max_avg_proposals * float(num_videos) / num_proposals)
this_video_proposals = cur_video_proposals[:, :2]
sort_idx = cur_video_proposals[:, 2].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :].astype(np.float32)
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
# For each video, compute temporal_iou scores among the retrieved proposals
total_num_retrieved_proposals = 0
# Sort proposals by score
num_retrieved_proposals = np.minimum(
int(this_video_proposals.shape[0] * ratio),
this_video_proposals.shape[0])
total_num_retrieved_proposals += num_retrieved_proposals
this_video_proposals = this_video_proposals[:num_retrieved_proposals, :]
#print(this_video_proposals)
this_video_gebd_proposals = this_video_proposals.mean(axis=-1)
num_res = min(num_res, len(this_video_gebd_proposals))
this_video_gebd_top_proposal = this_video_gebd_proposals[:num_res]
bmn_res.append(this_video_gebd_top_proposal)
return bmn_res
def show_results(model, data, test, cn, args):
frame_queue = sliceable_deque(maxlen=args.sample_length)
result_queue = deque(maxlen=1)
result_path = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_score.npy"
# save results with different scores
result_bmn_path = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal.npy"
result_bmn_path_3 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.3.npy"
result_bmn_path_4 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.4.npy"
result_bmn_path_5 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.5.npy"
result_bmn_path_6 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.6.npy"
result_bmn_path_7 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.7.npy"
result_bmn_path_8 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.8.npy"
result_bmn_path_9 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.9.npy"
result_bmn_path_95 = args.targetpath + test.split(".")[0] + "/" + args.modelname + "_proposal_0.95.npy"
videoLoader = FrameCV(args.datapath + '/' + test, FPS=args.fps, transform="resize256", start=None, duration=None)
frames = videoLoader.frames[:, :, :, ::-1]
print(cn, test, frames.shape)
duration = videoLoader.time_second
stride = args.stride
pad_length = int(args.sample_length/2)
frames_head = np.zeros((pad_length, frames.shape[1], frames.shape[2], frames.shape[3]), frames.dtype)
frames_tail = np.zeros((pad_length, frames.shape[1], frames.shape[2], frames.shape[3]), frames.dtype)
for i in range(pad_length):
frames_head[i] = frames[0].copy()
frames_tail[i] = frames[-1].copy()
frames_padded = np.concatenate((frames_head, frames, frames_tail), 0)
score_list = []
bmn_results = []
num_sub_videos = 0
for i in range(int(frames.shape[0]/stride)):
num_sub_videos += 1
start_index = i * stride
frame_queue = frames_padded[(start_index):(start_index + args.sample_length)][0::data['frame_interval']].copy()
ret, scores, output_bmn = inference(model, data, args, frame_queue)
score_list.append(scores)
bmn_results.append(output_bmn)
bmn_res = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.0,
)
score_list = np.array(score_list)
bmn_res = np.array(bmn_res)
bmn_res3 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.3,
)
bmn_res3 = np.array(bmn_res3)
bmn_res4 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.4,
)
bmn_res4 = np.array(bmn_res4)
bmn_res5 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.5,
)
bmn_res5 = np.array(bmn_res5)
bmn_res6 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.6,
)
bmn_res6 = np.array(bmn_res6)
bmn_res7 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.7,
)
bmn_res7 = np.array(bmn_res7)
bmn_res8 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.8,
)
bmn_res8 = np.array(bmn_res8)
bmn_res9 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.9,
)
bmn_res9 = np.array(bmn_res9)
bmn_res95 = bmn_proposals(bmn_results,
num_videos=1,
max_avg_proposals=100,
num_res = 10,
thres=0.95,
)
bmn_res95 = np.array(bmn_res95)
score_list = np.array(score_list)
bmn_res = np.array(bmn_res)
#print(cn, test, frames.shape, score_list.shape)
np.save(result_path, score_list)
np.save(result_bmn_path, bmn_res)
np.save(result_bmn_path_3, bmn_res3)
np.save(result_bmn_path_4, bmn_res4)
np.save(result_bmn_path_5, bmn_res5)
|
np.save(result_bmn_path_6, bmn_res6)
|
numpy.save
|
import numpy as np
import torch
import yaml
import matplotlib.pyplot as plt
from dp_datagen import iter
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import PillowWriter
with open("dp_config.yaml", "r") as f:
all_configs = yaml.safe_load(f)
common_config = all_configs['COMMON'].copy()
# Filling in models from model templates
for instance in common_config['ALL_MODEL_CONFIGS']:
template_name = instance[:instance.rfind('_')]
training_points = int(instance[(instance.rfind('_')+1):])
template_config = all_configs[template_name].copy()
template_config['num_datadriven'] = training_points
template_config['model_name'] = template_name.lower() + '_' + str(training_points)
all_configs[template_name + '_' + str(training_points)] = template_config
# theta1 = float(input('Enter initial Theta 1: '))
# theta2 = float(input('Enter initial Theta 2: '))
# tsteps = int(input('Enter number of timesteps: '))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
active_data_config_name = 'SIMULATION_80_90'
active_model_config_name = 'PIDNN_64000'
noise = 0.1
active_data_config = all_configs[active_data_config_name].copy()
active_data_config.update(common_config)
active_model_config = all_configs[active_model_config_name].copy()
active_model_config.update(active_data_config)
config = active_model_config
theta1 = ((config['TRAIN_THETA_START'] + config['TRAIN_THETA_END'])/2) * np.pi/180
theta2 = ((config['TRAIN_THETA_START'] + config['TRAIN_THETA_END'])/2) * np.pi/180
tsteps = 10000
config['t_range'] = np.arange(start=0.0, stop = config['TIMESTEP']*tsteps, step = config['TIMESTEP'])
t = config['t_range']
solved_data = iter(theta1, theta2, t, config['g'], config['m1'], config['m2'], config['l1'], config['l2'])
simulator_output = np.hstack([
|
np.reshape(t,(-1,1))
|
numpy.reshape
|
#!/usr/bin/env python
"""
@file features.py
@brief provide functions to compute image features
@author ChenglongChen
"""
import numpy as np
from numpy import pi
from skimage.io import imread
from skimage import measure
from skimage import morphology
from skimage.feature import greycomatrix, greycoprops
import warnings
warnings.filterwarnings("ignore")
import mahotas as mh
from mahotas.features import surf
from scipy.stats.mstats import mquantiles, kurtosis, skew
def tryDivide(x, y):
if y == 0:
return 0.0
else:
return x / y
# find the largest nonzero region
def getLargestRegion(props, labelmap, imagethres):
regionmaxprop = None
for regionprop in props:
# check to see if the region is at least 50% nonzero
if sum(imagethres[labelmap == regionprop.label])*1.0/regionprop.area < 0.50:
continue
if regionmaxprop is None:
regionmaxprop = regionprop
if regionmaxprop.filled_area < regionprop.filled_area:
regionmaxprop = regionprop
return regionmaxprop
def estimateRotationAngle(image_file):
image = imread(image_file, as_grey=True)
image = image.copy()
# Create the thresholded image to eliminate some of the background
imagethr = np.where(image >
|
np.mean(image)
|
numpy.mean
|
# import standard plotting and animation
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import FormatStrFormatter
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from IPython.display import clear_output
import matplotlib.ticker as ticker
# import standard libraries
import math
import time
import copy
from inspect import signature
class Visualizer:
'''
animators for time series
'''
#### animate moving average ####
def animate_system(self,x,y,T,savepath,**kwargs):
# produce figure
fig = plt.figure(figsize = (9,4))
gs = gridspec.GridSpec(1, 3, width_ratios=[1,7,1])
ax = plt.subplot(gs[0]); ax.axis('off')
ax1 = plt.subplot(gs[1]);
ax2 = plt.subplot(gs[2]); ax2.axis('off')
artist = fig
# view limits
xmin = -3
xmax = len(x) + 3
ymin = np.min(x)
ymax = np.max(x)
ygap = (ymax - ymin)*0.15
ymin -= ygap
ymax += ygap
# start animation
num_frames = len(y) - T + 1
print ('starting animation rendering...')
def animate(k):
# clear panels
ax1.cla()
# print rendering update
if np.mod(k+1,25) == 0:
print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames))
if k == num_frames - 1:
print ('animation rendering complete!')
time.sleep(1.5)
clear_output()
# plot x
ax1.scatter(np.arange(1,x.size + 1),x,c = 'k',edgecolor = 'w',s = 40,linewidth = 1,zorder = 3);
ax1.plot(np.arange(1,x.size + 1),x,alpha = 0.5,c = 'k',zorder = 3);
# plot moving average - initial conditions
if k == 1:
# plot x
ax1.scatter(np.arange(1,T + 1), y[:T],c = 'darkorange',edgecolor = 'w',s = 120,linewidth = 1,zorder = 2);
ax1.plot(np.arange(1,T + 1), y[:T],alpha = 0.5,c = 'darkorange',zorder = 2);
# make vertical visual guides
ax1.axvline(x = 1, c='deepskyblue')
ax1.axvline(x = T, c='deepskyblue')
# plot moving average - everything after and including initial conditions
if k > 1:
j = k-1
# plot
ax1.scatter(np.arange(1,T + j + 1),y[:T + j],c = 'darkorange',edgecolor = 'w',s = 120,linewidth = 1,zorder = 2);
ax1.plot(np.arange(1,T + j + 1),y[:T + j],alpha = 0.5,c = 'darkorange',zorder = 2);
# make vertical visual guides
ax1.axvline(x = j, c='deepskyblue')
ax1.axvline(x = j + T - 1, c='deepskyblue')
# label axes
ax1.set_xlim([xmin,xmax])
ax1.set_ylim([ymin,ymax])
return artist,
anim = animation.FuncAnimation(fig, animate ,frames=num_frames, interval=num_frames, blit=True)
# produce animation and save
fps = 50
if 'fps' in kwargs:
fps = kwargs['fps']
anim.save(savepath, fps=fps, extra_args=['-vcodec', 'libx264'])
clear_output()
#### animate range of moving average calculations ####
def animate_system_range(self,x,func,params,savepath,**kwargs):
playback = 1
if 'playback' in kwargs:
playback = kwargs['playback']
# produce figure
fig = plt.figure(figsize = (9,4))
gs = gridspec.GridSpec(1, 3, width_ratios=[1,7,1])
ax = plt.subplot(gs[0]); ax.axis('off')
ax1 = plt.subplot(gs[1]);
ax2 = plt.subplot(gs[2]); ax2.axis('off')
artist = fig
# view limits
xmin = -3
xmax = len(x) + 3
ymin = np.min(x)
ymax = np.max(x)
ygap = (ymax - ymin)*0.15
ymin -= ygap
ymax += ygap
# start animation
num_frames = len(params)+1
print ('starting animation rendering...')
def animate(k):
# clear panels
ax1.cla()
# print rendering update
if np.mod(k+1,25) == 0:
print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames))
if k == num_frames - 1:
print ('animation rendering complete!')
time.sleep(1.5)
clear_output()
# plot x
ax1.scatter(np.arange(1,x.size + 1),x,c = 'k',edgecolor = 'w',s = 40,linewidth = 1,zorder = 3);
ax1.plot(np.arange(1,x.size + 1),x,alpha = 0.5,c = 'k',zorder = 3);
# create y
if k == 0:
T = params[0]
y = func(x,T)
ax1.set_title(r'Original data')
if k > 0:
T = params[k-1]
y = func(x,T)
ax1.scatter(np.arange(1,y.size + 1),y,c = 'darkorange',edgecolor = 'w',s = 120,linewidth = 1,zorder = 2);
ax1.plot(np.arange(1,y.size + 1),y,alpha = 0.5,c = 'darkorange',zorder = 2);
ax1.set_title(r'$D = $ ' + str(T))
# label axes
ax1.set_xlabel(r'$p$',fontsize = 13)
ax1.set_xlim([xmin,xmax])
ax1.set_ylim([ymin,ymax])
return artist,
anim = animation.FuncAnimation(fig, animate ,frames=num_frames, interval=num_frames, blit=True)
# produce animation and save
if 'fps' in kwargs:
fps = kwargs['fps']
anim.save(savepath, fps=1, extra_args=['-vcodec', 'libx264'])
clear_output()
#### animate vector system with heatmap ####
def animate_vector_system(self,x,D,model,func,savepath,**kwargs):
x = np.array(x)
h,old_bins = func([0])
bins = []
for i in range(len(old_bins)-1):
b1 = old_bins[i]
b2 = old_bins[i+1]
n = (b1 + b2)/2
n = np.round(n,2)
bins.append(n)
y = model(x,D,func)
num_windows = len(y) - 1
# produce figure
fig = plt.figure(figsize = (11,10))
gs = gridspec.GridSpec(2, 3, width_ratios=[1,7,1],height_ratios=[0.75,1])
ax1 = plt.subplot(gs[0]); ax1.axis('off')
ax2 = plt.subplot(gs[1]);
ax3 = plt.subplot(gs[2]); ax3.axis('off')
ax4 = plt.subplot(gs[3]); ax4.axis('off')
ax5 = plt.subplot(gs[4]);
ax6 = plt.subplot(gs[5]); ax6.axis('off')
artist = fig
# view limits
xmin = -3
xmax = len(x) + 3
ymin = np.min(x)
ymax = np.max(x)
ygap = (ymax - ymin)*0.15
ymin -= ygap
ymax += ygap
# make colormap
# a,b = np.meshgrid(np.arange(num_windows+1),np.arange(len(bins)-1))
# s = ax1.pcolormesh(a, b, np.array(y).T,cmap = 'hot',vmin = 0,vmax = 1) #,edgecolor = 'k') # hot, gist_heat, cubehelix
# ax1.cla(); ax1.axis('off');
# fig.colorbar(s, ax=ax5)
# start animation
num_frames = len(x) - D + 2
print ('starting animation rendering...')
def animate(k):
# clear panels
ax2.cla()
ax5.cla()
# print rendering update
if np.mod(k+1,25) == 0:
print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames))
if k == num_frames - 1:
print ('animation rendering complete!')
time.sleep(1.5)
clear_output()
# plot x
ax2.scatter(np.arange(1,x.size + 1),x,c = 'k',edgecolor = 'w',s = 80,linewidth = 1,zorder = 3);
ax2.plot(np.arange(1,x.size + 1),x,alpha = 0.5,c = 'k',zorder = 3);
# plot moving average - initial conditions
if k == 0:
# plot x
ax2.scatter(np.arange(1,D + 1), x[:D],c = 'darkorange',edgecolor = 'w',s = 200,linewidth = 1,zorder = 2);
ax2.plot(np.arange(1,D + 1), x[:D],alpha = 0.5,c = 'darkorange',zorder = 2);
# make vertical visual guides
ax2.axvline(x = 1, c='deepskyblue')
ax2.axvline(x = D, c='deepskyblue')
# plot histogram
self.plot_heatmap(ax5,y[:2],bins,num_windows)
# plot moving average - everything after and including initial conditions
if k > 0:
j = k
# plot
ax2.scatter(np.arange(j,D + j),x[j-1:D + j - 1],c = 'darkorange',edgecolor = 'w',s = 200,linewidth = 1,zorder = 2);
ax2.plot(np.arange(j,D + j),x[j-1:D + j - 1],alpha = 0.5,c = 'darkorange',zorder = 2);
# make vertical visual guides
ax2.axvline(x = j, c='deepskyblue')
ax2.axvline(x = j + D - 1, c='deepskyblue')
# plot histogram
self.plot_heatmap(ax5,y[:j+1],bins,num_windows)
# label axes
ax2.set_xlim([xmin,xmax])
ax2.set_ylim([ymin,ymax])
return artist,
anim = animation.FuncAnimation(fig, animate ,frames=num_frames, interval=num_frames, blit=True)
# produce animation and save
fps = 50
if 'fps' in kwargs:
fps = kwargs['fps']
anim.save(savepath, fps=fps, extra_args=['-vcodec', 'libx264'])
clear_output()
def plot_heatmap(self,ax,y,bins,num_windows):
y=np.array(y).T
### plot ###
num_chars,num_samples = y.shape
num_chars += 1
a,b = np.meshgrid(np.arange(num_samples),np.arange(num_chars))
### y-axis Customize minor tick labels ###
# make custom labels
num_bins = len(bins)+1
y_ticker_range = np.arange(0.5,num_bins,10).tolist()
new_bins = [bins[v] for v in range(0,len(bins),10)]
y_char_range = [str(s) for s in new_bins]
# assign major or minor ticklabels? - chosen major by default
ax.yaxis.set_major_locator(ticker.FixedLocator(y_ticker_range))
ax.yaxis.set_major_formatter(ticker.FixedFormatter(y_char_range))
ax.xaxis.set_ticks_position('bottom') # the rest is the same
ax.set_xticks([],[])
ax.set_yticks([],[])
ax.set_ylabel('values',rotation = 90,fontsize=15)
ax.set_xlabel('window',fontsize=15)
# ax.set_title(title,fontsize = 15)
cmap = 'hot_r'
#cmap = 'RdPu'
s = ax.pcolormesh(a, b, 4*y,cmap = cmap,vmin = 0,vmax = 1) #,edgecolor = 'k') # hot, gist_heat, cubehelix
ax.set_ylim([-1,len(bins)])
ax.set_xlim([0,num_windows])
# for i in range(len(bins)):
# ax.hlines(y=i, xmin=0, xmax=num_windows, linewidth=1, color='k',alpha = 0.75)
#### animate vector system with heatmap ####
def animate_vector_histogram(self,x,D,model,func,savepath,**kwargs):
x = np.array(x)
h,old_bins = func([0])
bins = []
for i in range(len(old_bins)-1):
b1 = old_bins[i]
b2 = old_bins[i+1]
n = (b1 + b2)/2
n = np.round(n,2)
bins.append(n)
y = model(x,D,func)
num_windows = len(y) - 1
# produce figure
fig = plt.figure(figsize = (11,10))
gs = gridspec.GridSpec(3, 3, width_ratios=[1,7,1],height_ratios=[1,1,1.5])
ax1 = plt.subplot(gs[0]); ax1.axis('off')
ax2 = plt.subplot(gs[1]);
ax3 = plt.subplot(gs[2]); ax3.axis('off')
axa = plt.subplot(gs[3]); axa.axis('off')
axb = plt.subplot(gs[7]);
axc = plt.subplot(gs[5]); axc.axis('off')
ax4 = plt.subplot(gs[6]); ax4.axis('off')
ax5 = plt.subplot(gs[4]);
ax6 = plt.subplot(gs[8]); ax6.axis('off')
artist = fig
# view limits
xmin = -3
xmax = len(x) + 3
ymin = np.min(x)
ymax = np.max(x)
ygap = (ymax - ymin)*0.15
ymin -= ygap
ymax += ygap
# start animation
num_frames = len(x) - D + 2
print ('starting animation rendering...')
def animate(k):
# clear panels
ax2.cla()
ax5.cla()
axb.cla()
# print rendering update
if np.mod(k+1,25) == 0:
print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames))
if k == num_frames - 1:
print ('animation rendering complete!')
time.sleep(1.5)
clear_output()
# plot x
ax2.scatter(np.arange(1,x.size + 1),x,c = 'k',edgecolor = 'w',s = 80,linewidth = 1,zorder = 3);
ax2.plot(np.arange(1,x.size + 1),x,alpha = 0.5,c = 'k',zorder = 3);
# plot moving average - initial conditions
if k == 0:
# plot x
ax2.scatter(np.arange(1,D + 1), x[:D],c = 'darkorange',edgecolor = 'w',s = 200,linewidth = 1,zorder = 2);
ax2.plot(np.arange(1,D + 1), x[:D],alpha = 0.5,c = 'darkorange',zorder = 2);
# make vertical visual guides
ax2.axvline(x = 1, c='deepskyblue')
ax2.axvline(x = D, c='deepskyblue')
# plot histogram
self.plot_histogram(ax5,y[0],bins)
self.plot_heatmap(axb,y[:2],bins,num_windows)
# plot moving average - everything after and including initial conditions
if k > 0:
j = k
# plot
ax2.scatter(np.arange(j,D + j),x[j-1:D + j - 1],c = 'darkorange',edgecolor = 'w',s = 200,linewidth = 1,zorder = 2);
ax2.plot(np.arange(j,D + j),x[j-1:D + j - 1],alpha = 0.5,c = 'darkorange',zorder = 2);
# make vertical visual guides
ax2.axvline(x = j, c='deepskyblue')
ax2.axvline(x = j + D - 1, c='deepskyblue')
# plot histogram
self.plot_histogram(ax5,y[j],bins)
# plot histogram
self.plot_heatmap(axb,y[:j+1],bins,num_windows)
# label axes
ax2.set_xlim([xmin,xmax])
ax2.set_ylim([ymin,ymax])
ax2.set_xlabel(r'$p$',fontsize=14)
ax2.set_ylabel(r'$x_p$',rotation=0,fontsize=14)
return artist,
anim = animation.FuncAnimation(fig, animate ,frames=num_frames, interval=num_frames, blit=True)
# produce animation and save
fps = 50
if 'fps' in kwargs:
fps = kwargs['fps']
anim.save(savepath, fps=fps, extra_args=['-vcodec', 'libx264'])
clear_output()
def plot_histogram(self,ax,h,bins,**kwargs):
# plot hist
ax.bar(bins,h,align='center',width=0.1,edgecolor='k',color='magenta',linewidth=1.5)
# label axes
ax.set_xlabel(r'$values$',fontsize = 13)
ax.set_ylabel(r'count',fontsize = 13,rotation = 90,labelpad = 15)
ymin = 0
xmin = min(bins) - 0.1
xmax = max(bins) + 0.1
ymax = 0.5
ax.set_xlim([xmin,xmax])
ax.set_ylim([ymin,ymax])
#### animate spectrogram construction ####
def animate_dct_spectrogram(self,x,D,model,func,savepath,**kwargs):
# produce heatmap
y = model(x,D,func)
num_windows = y.shape[1]-1
# produce figure
fig = plt.figure(figsize = (12,8))
gs = gridspec.GridSpec(2, 3, width_ratios=[1,7,1],height_ratios=[1,1])
ax1 = plt.subplot(gs[0]); ax1.axis('off')
ax2 = plt.subplot(gs[1]);
ax3 = plt.subplot(gs[2]); ax3.axis('off')
ax4 = plt.subplot(gs[3]); ax4.axis('off')
ax5 = plt.subplot(gs[4]);
ax5.set_yticks([],[])
ax5.axis('off')
ax6 = plt.subplot(gs[5]); ax6.axis('off')
artist = fig
# view limits for top panel
xmin = -3
xmax = len(x) + 3
ymin = np.min(x)
ymax = np.max(x)
ygap = (ymax - ymin)*0.15
ymin -= ygap
ymax += ygap
vmin = np.min(np.log(1 + y).flatten())
vmax = np.max(np.log(1 + y).flatten())
# start animation
num_frames = len(x) - D + 2
print ('starting animation rendering...')
def animate(k):
# clear panels
ax2.cla()
ax5.cla()
# print rendering update
if np.mod(k+1,25) == 0:
print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames))
if k == num_frames - 1:
print ('animation rendering complete!')
time.sleep(1.5)
clear_output()
# plot signal
ax2.plot(np.arange(1,x.size + 1),x,alpha = 0.5,c = 'k',zorder = 3);
# plot moving average - initial conditions
if k == 0:
# plot x
ax2.plot(np.arange(1,D + 1), x[:D],alpha = 0.5,c = 'magenta',zorder = 2,linewidth=8);
# plot spectrogram
ax5.imshow(np.log(1 + y[:,:1]),aspect='auto',cmap='jet',origin='lower',vmin = vmin, vmax = vmax)
# plot moving average - everything after and including initial conditions
if k > 0:
j = k
# plot
ax2.plot(
|
np.arange(j,D + j)
|
numpy.arange
|
# coding=utf-8
# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch
import numpy as np
from transformers import BartTokenizer, T5Tokenizer
from transformers.file_utils import cached_property, is_datasets_available, is_faiss_available, is_torch_available
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_non_multi_gpu,
slow,
torch_device,
)
from .test_modeling_bart import BartModelTester
from .test_modeling_dpr import DPRModelTester
from .test_modeling_t5 import T5ModelTester
TOLERANCE = 1e-3
T5_SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
if is_torch_available() and is_datasets_available() and is_faiss_available():
import torch
from datasets import Dataset
import faiss
from transformers import (
AutoConfig,
AutoModel,
AutoModelForSeq2SeqLM,
RagConfig,
RagModel,
RagRetriever,
RagSequenceForGeneration,
RagTokenForGeneration,
RagTokenizer,
)
from transformers.modeling_outputs import BaseModelOutput
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
:class:`~transformers.RagRetriever`.
These tests are skipped when respective libraries are not installed.
"""
if not (is_torch_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires PyTorch, datasets and faiss")(test_case)
return test_case
@require_torch
@require_retrieval
@require_sentencepiece
class RagTestMixin:
all_model_classes = (
(RagModel, RagTokenForGeneration, RagSequenceForGeneration)
if is_torch_available() and is_datasets_available() and is_faiss_available()
else ()
)
retrieval_vector_size = 32
n_docs = 3
max_combined_length = 16
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
t5_tokenizer = T5Tokenizer(T5_SAMPLE_VOCAB)
t5_tokenizer_path = os.path.join(self.tmpdirname, "t5_tokenizer")
t5_tokenizer.save_pretrained(t5_tokenizer_path)
@cached_property
def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
@cached_property
def bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
@cached_property
def t5_tokenizer(self) -> BartTokenizer:
return T5Tokenizer.from_pretrained(os.path.join(self.tmpdirname, "t5_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_retriever(self, config):
dataset = Dataset.from_dict(
{
"id": ["0", "1", "3"],
"text": ["foo", "bar", "qux"],
"title": ["Foo", "Bar", "Qux"],
"embeddings": [
|
np.ones(self.retrieval_vector_size)
|
numpy.ones
|
import unittest
import numpy as np
from dscribe.descriptors import SOAP
from dscribe.kernels import REMatchKernel
from dscribe.kernels import AverageKernel
from ase.build import molecule
class AverageKernelTests(unittest.TestCase):
def test_difference(self):
"""Tests that the similarity is correct."""
# Create SOAP features for a system
desc = SOAP(
species=[1, 6, 7, 8],
rcut=5.0,
nmax=2,
lmax=2,
sigma=0.2,
periodic=False,
crossover=True,
sparse=False,
)
# Calculate that identical molecules are identical.
a = molecule("H2O")
a_features = desc.create(a)
kernel = AverageKernel(metric="linear")
K = kernel.create([a_features, a_features])
self.assertTrue(np.all(np.abs(K - 1) < 1e-3))
# Check that completely different molecules are completely different
a = molecule("N2")
b = molecule("H2O")
a_features = desc.create(a)
b_features = desc.create(b)
K = kernel.create([a_features, b_features])
self.assertTrue(np.all(np.abs(K - np.eye(2)) < 1e-3))
# Check that somewhat similar molecules are somewhat similar
a = molecule("H2O")
b = molecule("H2O2")
a_features = desc.create(a)
b_features = desc.create(b)
K = kernel.create([a_features, b_features])
self.assertTrue(K[0, 1] > 0.9)
def test_metrics(self):
"""Tests that different metrics as defined by scikit-learn can be used."""
# Create SOAP features for a system
desc = SOAP(
species=[1, 8],
rcut=5.0,
nmax=2,
lmax=2,
sigma=0.2,
periodic=False,
crossover=True,
sparse=False,
)
a = molecule("H2O")
a_features = desc.create(a)
# Linear dot-product kernel
kernel = AverageKernel(metric="linear")
K = kernel.create([a_features, a_features])
# Gaussian kernel
kernel = AverageKernel(metric="rbf", gamma=1)
K = kernel.create([a_features, a_features])
# Laplacian kernel
kernel = AverageKernel(metric="laplacian", gamma=1)
K = kernel.create([a_features, a_features])
def test_xy(self):
"""Tests that the kernel can be also calculated between two different
sets, which is necessary for making predictions with kernel-based
methods.
"""
# Create SOAP features for a system
desc = SOAP(
species=[1, 8],
rcut=5.0,
nmax=2,
lmax=2,
sigma=0.2,
periodic=False,
crossover=True,
sparse=False,
)
a = molecule("H2O")
b = molecule("O2")
c = molecule("H2O2")
a_feat = desc.create(a)
b_feat = desc.create(b)
c_feat = desc.create(c)
# Linear dot-product kernel
kernel = AverageKernel(metric="linear")
K = kernel.create([a_feat, b_feat], [c_feat])
self.assertEqual(K.shape, (2, 1))
def test_sparse(self):
"""Tests that sparse features may also be used to construct the kernels."""
# Create SOAP features for a system
desc = SOAP(
species=[1, 8],
rcut=5.0,
nmax=2,
lmax=2,
sigma=0.2,
periodic=False,
crossover=True,
sparse=True,
)
a = molecule("H2O")
a_feat = desc.create(a)
kernel = AverageKernel(metric="linear")
K = kernel.create([a_feat])
class REMatchKernelTests(unittest.TestCase):
def test_difference(self):
"""Tests that the similarity is correct."""
# Create SOAP features for a system
desc = SOAP(
species=[1, 6, 7, 8],
rcut=5.0,
nmax=2,
lmax=2,
sigma=0.2,
periodic=False,
crossover=True,
sparse=False,
)
# Calculate that identical molecules are identical.
a = molecule("H2O")
a_features = desc.create(a)
kernel = REMatchKernel(metric="linear", alpha=1, threshold=1e-6)
K = kernel.create([a_features, a_features])
self.assertTrue(np.all(np.abs(K - 1) < 1e-3))
# Check that completely different molecules are completely different
a = molecule("N2")
b = molecule("H2O")
a_features = desc.create(a)
b_features = desc.create(b)
K = kernel.create([a_features, b_features])
self.assertTrue(np.all(np.abs(K -
|
np.eye(2)
|
numpy.eye
|
"""
<NAME> - November 2020
This program creates baryonic mass-selected group catalogs for ECO/RESOLVE-G3 using the new algorithm, described in the readme markdown.
The outline of this code is:
(1) Read in observational data from RESOLVE-B and ECO (the latter includes RESOLVE-A).
(2) Prepare arrays of input parameters and for storing results.
(3) Perform FoF only for giants in ECO, using an adaptive linking strategy.
(a) Get the adaptive links for every ECO galaxy.
(b) Fit those adaptive links for use in RESOLVE-B.
(c) Perform giant-only FoF for ECO
(d) Perform giant-only FoF for RESOLVE-B, by interpolating the fit to obtain separations for RESOLVE-B.
(4) From giant-only groups, fit model for individual giant projected radii and peculiar velocites, to use for association.
(5) Associate dwarf galaxies to giant-only FoF groups for ECO and RESOLVE-B (note different selection floors for dwarfs).
(6) Based on giant+dwarf groups, calibrate boundaries (as function of giant+dwarf integrated baryonic mass) for iterative combination
(7) Iterative combination on remaining ungrouped dwarf galaxies
(8) halo mass assignment
(9) Finalize arrays + output
"""
import virtools as vz
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.optimize import curve_fit
from center_binned_stats import center_binned_stats
import foftools as fof
import iterativecombination as ic
from smoothedbootstrap import smoothedbootstrap as sbs
import sys
from lss_dens import lss_dens_by_galaxy
def giantmodel(x, a, b):
return np.abs(a)*np.log(np.abs(b)*x+1)
def exp(x, a, b, c):
return np.abs(a)*np.exp(1*np.abs(b)*x + c)
def sepmodel(x, a, b, c, d, e):
#return np.abs(a)*np.exp(-1*np.abs(b)*x + c)+d
#return a*(x**3)+b*(x**2)+c*x+d
return a*(x**4)+b*(x**3)+c*(x**2)+(d*x)+e
def sigmarange(x):
q84, q16 = np.percentile(x, [84 ,16])
return (q84-q16)/2
if __name__=='__main__':
####################################
# Step 1: Read in obs data
####################################
ecodata = pd.read_csv("ECOdata_022521.csv")
resolvedata = pd.read_csv("RESOLVEdata_022521.csv")
resolvebdata = resolvedata[resolvedata.f_b==1]
####################################
# Step 2: Prepare arrays
####################################
ecosz = len(ecodata)
econame = np.array(ecodata.name)
ecoresname = np.array(ecodata.resname)
ecoradeg = np.array(ecodata.radeg)
ecodedeg = np.array(ecodata.dedeg)
ecocz = np.array(ecodata.cz)
ecologmstar = np.array(ecodata.logmstar)
ecologmgas = np.array(ecodata.logmgas)
ecologmbary = np.log10(10.**ecologmstar+10.**ecologmgas)
ecourcolor = np.array(ecodata.modelu_rcorr)
ecog3grp = np.full(ecosz, -99.) # id number of g3 group
ecog3grpn =
|
np.full(ecosz, -99.)
|
numpy.full
|
"""UnbalancedFederatedDataset module."""
from abc import abstractmethod
from typing import List
import numpy as np
from tqdm import trange
from openfl.utilities.data_splitters.data_splitter import DataSplitter
def get_label_count(labels, label):
"""Count samples with label `label` in `labels` array."""
return len(np.nonzero(labels == label)[0])
def one_hot(labels, classes):
"""Apply One-Hot encoding to labels."""
return np.eye(classes)[labels]
class NumPyDataSplitter(DataSplitter):
"""Base class for splitting numpy arrays of data."""
@abstractmethod
def split(self, data: np.ndarray, num_collaborators: int) -> List[List[int]]:
"""Split the data."""
raise NotImplementedError
class EqualNumPyDataSplitter(NumPyDataSplitter):
"""Splits the data evenly."""
def __init__(self, shuffle=True):
"""Initialize.
Args:
shuffle(bool): Flag determining whether to shuffle the dataset before splitting.
"""
self.shuffle = shuffle
def split(self, data, num_collaborators):
"""Split the data."""
idx = range(len(data))
if self.shuffle:
idx = np.random.permutation(idx)
slices = np.array_split(idx, num_collaborators)
return slices
class RandomNumPyDataSplitter(NumPyDataSplitter):
"""Splits the data randomly."""
def __init__(self, shuffle=True):
"""Initialize.
Args:
shuffle(bool): Flag determining whether to shuffle the dataset before splitting.
"""
self.shuffle = shuffle
def split(self, data, num_collaborators):
"""Split the data."""
idx = range(len(data))
if self.shuffle:
idx = np.random.permutation(idx)
random_idx = np.sort(np.random.choice(len(data), num_collaborators - 1, replace=False))
return np.split(idx, random_idx)
class LogNormalNumPyDataSplitter(NumPyDataSplitter):
"""Unbalanced (LogNormal) dataset split."""
def __init__(self, mu,
sigma,
num_classes,
classes_per_col,
min_samples_per_class):
"""Initialize.
Args:
mu(float): Distribution hyperparameter.
sigma(float): Distribution hyperparameter.
classes_per_col(int): Number of classes assigned to each collaborator.
min_samples_per_class(int): Minimum number of collaborator samples of each class.
"""
self.mu = mu
self.sigma = sigma
self.num_classes = num_classes
self.classes_per_col = classes_per_col
self.min_samples_per_class = min_samples_per_class
def split(self, data, num_collaborators):
"""Split the data."""
idx = [[] for _ in range(num_collaborators)]
samples_per_col = self.classes_per_col * self.min_samples_per_class
for col in range(num_collaborators):
for c in range(self.classes_per_col):
label = (col + c) % self.num_classes
label_idx = np.nonzero(data == label)[0]
slice_start = col // self.num_classes * samples_per_col
slice_start += self.min_samples_per_class * c
slice_end = slice_start + self.min_samples_per_class
print(f'Assigning {slice_start}:{slice_end} of {label} class to {col} col...')
idx[col] += list(label_idx[slice_start:slice_end])
assert all([len(i) == samples_per_col for i in idx]), f'''
All collaborators should have {samples_per_col} elements
but distribution is {[len(i) for i in idx]}'''
props_shape = (self.num_classes, num_collaborators // 10, self.classes_per_col)
props = np.random.lognormal(self.mu, self.sigma, props_shape)
num_samples_per_class = [[[get_label_count(data, label) - self.min_samples_per_class]]
for label in range(self.num_classes)]
num_samples_per_class = np.array(num_samples_per_class)
props = num_samples_per_class * props / np.sum(props, (1, 2), keepdims=True)
for col in trange(num_collaborators):
for j in range(self.classes_per_col):
label = (col + j) % self.num_classes
num_samples = int(props[label, col // 10, j])
print(f'Trying to append {num_samples} of {label} class to {col} col...')
slice_start = np.count_nonzero(data[np.hstack(idx)] == label)
slice_end = slice_start + num_samples
if slice_end < get_label_count(data, label):
label_subset = np.nonzero(data == (col + j) % self.num_classes)[0]
idx_to_append = label_subset[slice_start:slice_end]
print(f'Appending {idx_to_append} of {label} class to {col} col...')
idx[col] =
|
np.append(idx[col], idx_to_append)
|
numpy.append
|
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.pipeline import FeatureUnion
from skits.feature_extraction import AutoregressiveTransformer, SeasonalTransformer
from skits.pipeline import ForecasterPipeline, ClassifierPipeline
from skits.preprocessing import (
ReversibleImputer,
DifferenceTransformer,
HorizonTransformer,
)
SEED = 666 # \m/
class TestPipelines:
steps = [
("pre_differencer", DifferenceTransformer(period=1)),
("pre_imputer_1", ReversibleImputer()),
(
"features",
FeatureUnion(
[
("ar_transformer", AutoregressiveTransformer(num_lags=3)),
("seasonal_transformer", SeasonalTransformer(seasonal_period=4)),
]
),
),
("post_lag_imputer_2", ReversibleImputer()),
]
dt = DifferenceTransformer(period=1)
ri1 = ReversibleImputer()
fe = FeatureUnion(
[
("ar_transformer", AutoregressiveTransformer(num_lags=3)),
("seasonal_transformer", SeasonalTransformer(seasonal_period=4)),
]
)
ri2 = ReversibleImputer()
def test_predict(self):
# Let's just see if it works
# TODO: Make this a real test
np.random.seed(SEED)
l = np.linspace(0, 1, 100)
y = np.sin(2 * np.pi * 5 * l) + np.random.normal(0, 0.1, size=100)
# Ignore the DifferenceTransformer. It's actually bad.
steps = list(self.steps[1:])
steps.append(("regressor", LinearRegression(fit_intercept=False)))
pipeline = ForecasterPipeline(steps)
pipeline.fit(y[:, np.newaxis], y)
y_pred = pipeline.predict(y[:, np.newaxis], to_scale=True, refit=True)
assert np.mean((y_pred - y.squeeze()) ** 2) < 0.05
def test_forecast(self):
# Let's just see if it works
# TODO: Make this a real test
l =
|
np.linspace(0, 1, 100)
|
numpy.linspace
|
import numpy as np
import common as f
import os.path
class GetNetworksamples:
def initialize(self, nPicos, picoProbability, repetitions, QoSthres, load=1):
ISD = 500
VISD = np.sqrt(3) * ISD
self.apothem = np.sqrt(3)/6 * ISD
self.maxUEperSector = 2000
self.nInterferingMacros = 4
self.macroPos = np.array([[0, VISD/2], [ISD/2, 0], [ISD/2, VISD], [ISD, VISD/2]])
self.sectorCenter = self.macroPos[0] + np.array([ISD/3, 0])
dataDir = 'netdata/'
if load == 0 or (not os.path.isfile(dataDir+str(nPicos))):
picoPos = f.picoCellGeneration(self.apothem, nPicos, self.sectorCenter, self.macroPos)
self.picoPos = picoPos
file = open(dataDir+str(nPicos), "wb")
np.save(file, picoPos)
else:
self.picoPos = np.load(dataDir+str(nPicos))
self.nPicos = nPicos
self.probPico_0 = picoProbability
self.fileLenghtBits = 8000
#Pico Values TR 36.887
self.macroPower = 10**((43 - 30)/10) # 43 dBm --> 13 dBW --> 20 W
self.picoPower = 6.3 # 6.3 W
self.nSubframes= 8
self.crsProportion = .1
self.subframeDuration = 1e-3
self.nUsedSubframes = 0
self.framesPerMin = 100
self.minPerHour = 60
self.hourPerDay = 24
self.lastPicoControl = np.ones(self.nPicos)
F = 10**0.5 # noise figure = 5 dB
T_O = 290
K_B = 1.3806504e-23
BW = 200000
N_O = F*T_O*K_B
self.thermalNoise = N_O*BW # thermal noise at the receiver
self.W = 10e6 # Channel bandwidth (10 MHz)
self.repetitions = repetitions
self.QoSthres = QoSthres
def getSamples(self, point):
consumptionSamples = np.zeros(self.repetitions)
per5Samples = np.zeros(self.repetitions)
meanThrSamples = np.zeros(self.repetitions)
maxCellUsage = np.zeros(self.repetitions)
percentQoS = np.zeros(self.repetitions)
self.meanPicoUsage = np.zeros(point[1])
self.meanMacroUsage = 0
sortedActivationIndex = f.picoSelection(self)
# print(point)
print('o', end='', flush=True)
for r in range(self.repetitions):
traffic = point[0]
nActivePicos = point[1]
self.absRatio = point[2]
self.creBias = point[3]
self.nActivePicos = nActivePicos
picoControl = np.zeros(self.nPicos)
picoControl[sortedActivationIndex[:self.nActivePicos]] = 1
self.activePicosPos = self.picoPos[picoControl == 1, :]
if sum(picoControl) == 0:
self.probPico = 0
else:
self.probPico = self.probPico_0
self.thrSamples = []
self.cellUsage = [[] for _ in range(self.nActivePicos+1)]
self.meanConsumptionPerCell = np.zeros(self.nPicos+1)
self.totalMeanConsumption = 0
self.UEpos = np.ones((self.maxUEperSector, 2)) * -1
self.UEdata = np.ones(self.maxUEperSector)
self.UEposPico = [
|
np.ones((self.maxUEperSector, 2))
|
numpy.ones
|
import numpy as np
import random as rn
def calculate_crowding(scores):
# Crowding is based on chrmosome scores (not chromosome binary values)
# All scores are normalised between low and high
# For any one score, all solutions are sorted in order low to high
# Crowding for chromsome x for that score is the difference between th enext highest and next lowest score
# Total crowding value sums all crowding for all scores
population_size=len(scores[:,0])
number_of_scores=len(scores[0,:])
# create crowding matrix of population (row) and score (column)
crowding_matrix=np.zeros((population_size,number_of_scores))
# normalise scores
normed_scores = (scores-scores.min(0))/scores.ptp(0) # numpy ptp is range (max-min)
# Calculate crowding
for col in range(number_of_scores): # calculate crowding distance for each score in turn
crowding=np.zeros(population_size) # One dimensional array
crowding[0]=1 # end points have maximum crowding
crowding[population_size-1]=1 # end points have maximum crowding
sorted_scores=np.sort(normed_scores[:,col]) # sort scores
sorted_scores_index=np.argsort(normed_scores[:,col]) # index of sorted scores
crowding[1:population_size-1]=sorted_scores[2:population_size]-sorted_scores[0:population_size-2] # crowding distance
re_sort_order=np.argsort(sorted_scores_index) # re-sort to original order step 1
sorted_crowding=crowding[re_sort_order] # re-sort to orginal order step 2
crowding_matrix[:,col]=sorted_crowding # record crowding distances
crowding_distances=np.sum(crowding_matrix,axis=1) # Sum croding distances of all scores
return crowding_distances
def crowding_selection(population,scores,number_to_select):
# This function selects a number of solutions based on tournament of crowding distances
# Two members of the population ar epicked at random
# The one with the higher croding dostance is always picked
crowding_distances=calculate_crowding(scores) # crowding distances for each member of the population
picked_population=np.zeros((number_to_select,len(population[0,:]))) # array of picked solutions (actual solution not ID)
picked_scores=np.zeros((number_to_select,len(scores[0,:]))) # array of scores for picked solutions
for i in range(number_to_select):
population_size=len(population[:,0])
fighter1ID=rn.randint(0,population_size-1) # 1st random ID
fighter2ID=rn.randint(0,population_size-1) # 2nd random ID
if crowding_distances[fighter1ID]>=crowding_distances[fighter2ID]: # 1st solution picked
picked_population[i,:]=population[fighter1ID,:] # add solution to picked solutions array
picked_scores[i,:]=scores[fighter1ID,:] # add score to picked solutions array
# remove selected solution from available solutions
population=np.delete(population,(fighter1ID), axis=0) # remove picked solution - cannot be chosen again
scores=np.delete(scores,(fighter1ID), axis=0) # remove picked score (as line above)
crowding_distances=np.delete(crowding_distances,(fighter1ID), axis=0) # remove crowdong score (as line above)
else: # solution 2 is better. Code as above for 1st solution winning
picked_population[i,:]=population[fighter2ID,:]
picked_scores[i,:]=scores[fighter2ID,:]
population=np.delete(population,(fighter2ID), axis=0)
scores=np.delete(scores,(fighter2ID), axis=0)
crowding_distances=np.delete(crowding_distances,(fighter2ID), axis=0)
return (picked_population,picked_scores)
def generate_random_population(rows,cols):
population=np.zeros((rows,cols)) # create array of zeros
for i in range(rows):
x=rn.randint(1,cols) # Number of 1s to add
population[i,0:x]=1 # Add requires 1s
np.random.shuffle(population[i]) # Shuffle the 1s randomly
return population
def pareto(scores):
# In this method the array 'scores' is passed to the function.
# Scores have been normalised so that higher values dominate lower values.
# The function returns a Boolean array identifying which rows of the array 'scores' are non-dominated (the Pareto front)
# Method based on assuming everything starts on Pareto front and then records dominated points
pop_size=len(scores[:,0])
pareto_front=np.ones(pop_size,dtype=bool)
for i in range(pop_size):
for j in range(pop_size):
if all (scores[j]>=scores[i]) and any (scores[j]>scores[i]):
# j dominates i
pareto_front[i]=0
break
return pareto_front
def normalise_score(score_matrix,norm_matrix):
# normalise 'score matrix' with reference to 'norm matrix' which gives scores that produce zero or one
norm_score=np.zeros(np.shape(score_matrix)) # create normlaises score matrix with same dimensions as original scores
number_of_scores=len(score_matrix[0,:]) # number of different scores
for col in range(number_of_scores): # normaise for each score in turn
score_zero=norm_matrix[col,0]
score_one=norm_matrix[col,1]
score_range=score_one-score_zero
norm_score[:,col]=(score_matrix[:,col]-score_zero)/score_range
return norm_score
def score(population,TARGET_ADMISSIONS,TRAVEL_MATRIX,NODE_ADMISSIONS,TOTAL_ADMISSIONS,pareto_include,CALC_ALL,nscore_parameters):
#Only calculate the score that is needed by the pareto front, as determined by the array: pareto_include
#Unless CALC_ALL=True (set for the last generation) as then print out all the parameter values
CALC_ALL=True # MA reporting all
# Score_matrix:
# 0: Number of hospitals
# 1: Average distance
# 2: Maximum distance
# 3: Maximum admissions to any one hopsital
# 4: Minimum admissions to any one hopsital
# 5: Max/Min Admissions ratio
# 6: Proportion patients within target distance 1
# 7: Proportion patients within target distance 2
# 8: Proportion patients within target distance 3
# 9: Proportion patients attending unit with target admission numbers
# 10: Proportion of patients meeting distance 1 (~30 min) and admissions target
# 11: Proportion of patients meeting distance 2 (~45 min) and admissions target
# 12: Proportion of patients meeting distance 3 (~60 min) and admissions target
# 13: Clinical benefit, additional benefit per 100 treatable patients
TARGET_DISTANCE_1=30 # straight line km, equivalent to 30 min
TARGET_DISTANCE_2=45 # straight line km, equivalent to 45 min
TARGET_DISTANCE_3=60 # straight line km, equivalent to 60 min
pop_size=len(population[:,0]) # Count number of solutions to evaluate
score_matrix=np.zeros((pop_size,nscore_parameters)) # Create an empty score matrix
hospital_admissions_matrix=np.zeros((pop_size,len(TRAVEL_MATRIX[0,:])))#store teh hospital admissions, col = hospital, row = population
for i in range(pop_size): # Loop through population of solutions
node_results=np.zeros((len(NODE_ADMISSIONS),10))
# Node results stores results by patient node. These are used in the calculation of results
# Node result smay be of use to export at later date (e.g. for detailed analysis of one scenario)
# Col 0: Distance to closest hospital
# Col 1: Patients within target distance 1 (boolean)
# Col 2: Patients within target distance 2 (boolean)
# Col 3: Patients within target distance 3 (boolean)
# Col 4: Hospital ID
# Col 5: Number of admissiosn to hospital ID
# Col 6: Does hospital meet admissions target (boolean)
# Col 7: Admissions and target distance 1 both met (boolean)
# Col 8: Admissions and target distance 2 both met (boolean)
# Col 9: Admissions and target distance 3 both met (boolean)
# Count hospitals in each solution
if 0 in pareto_include or CALC_ALL:
score_matrix[i,0]=np.sum(population[i])
# Calculate average distance
mask=np.array(population[i],dtype=bool)
# hospital_list=np.where(mask) # list of hospitals in selection. Not currently used
masked_distances=TRAVEL_MATRIX[:,mask]
# Calculate results for each patient node
node_results[:,0]=np.amin(masked_distances,axis=1) # distance to closest hospital
node_results[:,1]=node_results[:,0]<=TARGET_DISTANCE_1 # =1 if target distance 1 met
node_results[:,2]=node_results[:,0]<=TARGET_DISTANCE_2 # =1 if target distance 2 met
node_results[:,3]=node_results[:,0]<=TARGET_DISTANCE_3 # =1 if target distance 3 met
closest_hospital_ID=np.argmin(masked_distances,axis=1) # index of closest hospital.
node_results[:,4]=closest_hospital_ID # stores hospital ID in case table needs to be exported later, but bincount below doesn't work when stored in NumPy array (which defaults to floating decimal)
# Create matrix of number of admissions to each hospital
hospital_admissions=np.bincount(closest_hospital_ID,weights=NODE_ADMISSIONS) # np.bincount with weights sums
hospital_admissions_matrix[i,mask]=hospital_admissions#putting the hospital admissions into a matrix with column per hospital, row per solution. Used to output to sheet
# record closest hospital (unused)
node_results[:,5]=np.take(hospital_admissions,closest_hospital_ID) # Lookup admissions to hospital used
node_results[:,6]=node_results[:,5]>TARGET_ADMISSIONS # =1 if admissions target met
# Calculate average distance by multiplying node distance * admission numbers and divide by total admissions
if 1 in pareto_include or CALC_ALL:
weighted_distances=np.multiply(node_results[:,0],NODE_ADMISSIONS)
average_distance=np.sum(weighted_distances)/TOTAL_ADMISSIONS
score_matrix[i,1]=average_distance
# Max distance for any one patient
if 2 in pareto_include or CALC_ALL:
score_matrix[i,2]=np.max(node_results[:,0])
# Max, min and max/min number of admissions to each hospital
if 3 in pareto_include or CALC_ALL:
score_matrix[i,3]=np.max(hospital_admissions)
if 4 in pareto_include or CALC_ALL:
score_matrix[i,4]=np.min(hospital_admissions)
if 5 in pareto_include or CALC_ALL:
score_matrix[i,5]=score_matrix[i,3]/score_matrix[i,4]
# Calculate proportion patients within target distance/time
if 6 in pareto_include or CALC_ALL:
score_matrix[i,6]=np.sum(NODE_ADMISSIONS[node_results[:,0]<=TARGET_DISTANCE_1])/TOTAL_ADMISSIONS
if 7 in pareto_include or CALC_ALL:
score_matrix[i,7]=np.sum(NODE_ADMISSIONS[node_results[:,0]<=TARGET_DISTANCE_2])/TOTAL_ADMISSIONS
if 8 in pareto_include or CALC_ALL:
score_matrix[i,8]=np.sum(NODE_ADMISSIONS[node_results[:,0]<=TARGET_DISTANCE_3])/TOTAL_ADMISSIONS
# Calculate proportion patients attending hospital with target admissions
if 9 in pareto_include or CALC_ALL:
score_matrix[i,9]=np.sum(hospital_admissions[hospital_admissions>=TARGET_ADMISSIONS])/TOTAL_ADMISSIONS
if 10 in pareto_include or CALC_ALL:
# Sum patients who meet distance taregts
node_results[:,7]=(node_results[:,1]+node_results[:,6])==2 # true if admissions and target distance 1 both met
sum_patients_addmissions_distance1_met=np.sum(NODE_ADMISSIONS[node_results[:,7]==1])
score_matrix[i,10]=sum_patients_addmissions_distance1_met/TOTAL_ADMISSIONS
if 11 in pareto_include or CALC_ALL:
# Sum patients who meet distance taregts
node_results[:,8]=(node_results[:,2]+node_results[:,6])==2 # true if admissions and target distance 2 both met
sum_patients_addmissions_distance2_met=np.sum(NODE_ADMISSIONS[node_results[:,8]==1])
score_matrix[i,11]=sum_patients_addmissions_distance2_met/TOTAL_ADMISSIONS
if 12 in pareto_include or CALC_ALL:
# Sum patients who meet distance taregts
node_results[:,9]=(node_results[:,3]+node_results[:,6])==2 # true if admissions and target distance 3 both met
sum_patients_addmissions_distance3_met=np.sum(NODE_ADMISSIONS[node_results[:,9]==1])
score_matrix[i,12]=sum_patients_addmissions_distance3_met/TOTAL_ADMISSIONS
#Calculate clinical benefit: Emberson and Lee
#Use 115 mins for the onset til travelling in ambulance (30 mins onset to call + 40 mins call to travel + 45 mins door to needle) + ? travel time (as deterined by the combination of hospital open)
# if 13 in pareto_include or CALC_ALL:
# onset_to_treatment_time = distancekm_to_timemin(node_results[:,0])+115
# #constant to be used in the equation
# factor=(0.2948/(1 - 0.2948))
# #Calculate the adjusted odds ratio
# clinical_benefit=np.array(factor*np.power(10, (0.326956 + (-0.00086211 * onset_to_treatment_time))))
# # Patients that exceed the licensed onset to treatment time, set to a zero clinical benefit
# clinical_benefit[onset_to_treatment_time>270]=0
# #Probabilty of good outcome per node
# clinical_benefit = (clinical_benefit / (1 + clinical_benefit)) - 0.2948
# #Number of patients with a good outcome per node
# clinical_benefit = clinical_benefit*NODE_ADMISSIONS
# score_matrix[i,13]=np.sum(clinical_benefit)/TOTAL_ADMISSIONS *100
#hospital_admissions_matrix[i,:]=np.transpose(hospital_admissions)#putting the column into a row in the matrix
#np.savetxt('output/admissions_test.csv',hospital_admissions_matrix[i,:],delimiter=',',newline='\n')
return (score_matrix,hospital_admissions_matrix)
def unique_rows(a): # stolen off the interwebs
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def fix_hospital_status(l_population,l_HOSPITAL_STATUS):
#Takes the 5th column from the hospital.csv file and if "1" then open, "-1" then closed
HOSPITAL_STATUS_POPULATION=np.repeat(l_HOSPITAL_STATUS,len(l_population[:,0]),axis=0)#repeat the row "len(child_population[:,0])" number of times, so have 1 per solution row (matching the size of the child_population matrix)
l_population[HOSPITAL_STATUS_POPULATION==1]=1 # Fixes the open hospitals to have a value 1
l_population[HOSPITAL_STATUS_POPULATION==-1]=0 # Fixes the closed hospitals to have a value 0
return l_population
def f_location_crossover(l_parent, l_MAXCROSSOVERPOINTS,l_CHROMOSOMELENGTH):
number_crossover_points=rn.randint(1,l_MAXCROSSOVERPOINTS) # random, up to max
crossover_points=rn.sample(range(1,l_CHROMOSOMELENGTH), number_crossover_points) # pick random crossover points in gene, avoid first position (zero position)
crossover_points=np.append([0],np.sort(crossover_points)) # zero appended at front for calucation of interval to first crossover
intervals=crossover_points[1:]-crossover_points[:-1] # this gives array of number of ones, zeros etc in each section.
intervals=np.append([intervals],[l_CHROMOSOMELENGTH-np.amax(crossover_points)]) # adds in last interval of last cross-over to end of gene
# Build boolean arrays for cross-overs
current_bool=True # sub sections will be made up of repeats of boolean true or false, start with true
# empty list required for append
selection1=[]
for interval in intervals: # interval is the interval between crossoevrs (stored in 'intervals')
new_section=np.repeat(current_bool,interval) # create subsection of true or false
current_bool=not current_bool # swap true to false and vice versa
selection1=np.append([selection1],[new_section]) # add the new section to the existing array
selection1=
|
np.array([selection1],dtype=bool)
|
numpy.array
|
import datetime
import itertools
from typing import Sequence, Any, Union, Optional, Tuple, Callable
from warnings import warn
import numpy as np
import torch
from torch import Tensor
from torch.utils.data import TensorDataset, DataLoader, ConcatDataset, Dataset
from torchcast.internals.utils import ragged_cat, true1d_idx
class TimeSeriesDataset(TensorDataset):
"""
:class:`.TimeSeriesDataset` includes additional information about each of the Tensors' dimensions: the name for
each group in the first dimension, the start (date)time (and optionally datetime-unit) for the second dimension,
and the name of the measures for the third dimension.
Note that unlike :class:`torch.utils.data.TensorDataset`, indexing a :class:`.TimeSeriesDataset` returns another
:class:`.TimeSeriesDataset`, not a tuple of tensors. So when using :class:`.TimeSeriesDataset`, use
:class:`.TimeSeriesDataLoader` (equivalent to ``DataLoader(collate_fn=TimeSeriesDataset.collate)``).
"""
_repr_attrs = ('sizes', 'measures')
def __init__(self,
*tensors: Tensor,
group_names: Sequence[Any],
start_times: Union[np.ndarray, Sequence],
measures: Sequence[Sequence[str]],
dt_unit: Optional[str]):
if not isinstance(group_names, np.ndarray):
group_names = np.array(group_names)
assert len(group_names) == len(set(group_names))
assert len(group_names) == len(start_times)
assert len(tensors) == len(measures)
for i, (tensor, tensor_measures) in enumerate(zip(tensors, measures)):
if len(tensor.shape) < 3:
raise ValueError(f"Tensor {i} has < 3 dimensions")
if tensor.shape[0] != len(group_names):
raise ValueError(f"Tensor {i}'s first dimension has length != {len(group_names)}.")
if tensor.shape[2] != len(tensor_measures):
raise ValueError(f"Tensor {i}'s 3rd dimension has length != len({tensor_measures}).")
self.measures = tuple(tuple(m) for m in measures)
self.all_measures = tuple(itertools.chain.from_iterable(self.measures))
self.group_names = group_names
self.dt_unit = None
if dt_unit:
if not isinstance(dt_unit, np.timedelta64):
dt_unit = np.timedelta64(1, dt_unit)
self.dt_unit = dt_unit
start_times = np.asanyarray(start_times)
if self.dt_unit:
assert len(start_times.shape) == 1
if isinstance(start_times[0], (np.datetime64, datetime.datetime)):
start_times = np.array(start_times, dtype='datetime64')
else:
raise ValueError("`dt_unit` is not None but `start_times` is not an array of datetimes")
else:
if not isinstance(start_times[0], int) and not float(start_times[0]).is_integer():
raise ValueError(
f"`dt_unit` is None but `start_times` does not appear to be integers "
f"(e.g. start_times[0] is {start_times[0]})."
)
self.start_times = start_times
super().__init__(*tensors)
def to(self, *args, **kwargs) -> 'TimeSeriesDataset':
new_tensors = [x.to(*args, **kwargs) for x in self.tensors]
return self.with_new_tensors(*new_tensors)
def __repr__(self) -> str:
kwargs = []
for k in self._repr_attrs:
v = getattr(self, k)
if isinstance(v, Tensor):
v = v.size()
kwargs.append("{}={!r}".format(k, v))
return "{}({})".format(type(self).__name__, ", ".join(kwargs))
@property
def sizes(self) -> Sequence:
return [t.size() for t in self.tensors]
# Subsetting ------------------------:
@torch.no_grad()
def train_val_split(self,
train_frac: float = None,
dt: Union[np.datetime64, dict] = None) -> Tuple['TimeSeriesDataset', 'TimeSeriesDataset']:
"""
:param train_frac: The proportion of the data to keep for training. This is calculated on a per-group basis, by
taking the last observation for each group (i.e., the last observation that a non-nan value on any measure). If
neither `train_frac` nor `dt` are passed, ``train_frac=.75`` is used.
:param dt: A datetime to use in dividing train/validation (first datetime for validation), or a dictionary of
group-names : date-times.
:return: Two ``TimeSeriesDatasets``, one with data before the split, the other with >= the split.
"""
# get split times:
if dt is None:
if train_frac is None:
train_frac = .75
assert 0 < train_frac < 1
# for each group, find the last non-nan, take `frac` of that to find the train/val split point:
split_idx = np.array([int(idx * train_frac) for idx in self._last_measured_idx()], dtype='int')
_times = self.times(0)
split_times = np.array([_times[i, t] for i, t in enumerate(split_idx)])
else:
if train_frac is not None:
raise TypeError("Can pass only one of `train_frac`, `dt`.")
if isinstance(dt, dict):
split_times = np.array([dt[group_name] for group_name in self.group_names],
dtype='datetime64[ns]' if self.dt_unit else 'int')
else:
if self.dt_unit:
if hasattr(dt, 'to_datetime64'):
dt = dt.to_datetime64()
if not isinstance(dt, np.datetime64):
dt = np.datetime64(dt, self.dt_unit)
split_times = np.full(shape=len(self.group_names), fill_value=dt)
# val:
val_dataset = self.with_new_start_times(split_times)
# train:
train_tensors = []
for i, tens in enumerate(self.tensors):
train = tens.clone()
train[np.where(self.times(i) >= split_times[:, None])] = float('nan')
if i == 0:
not_all_nan = (~torch.isnan(train)).sum((0, 2))
last_good_idx = true1d_idx(not_all_nan).max()
train = train[:, :(last_good_idx + 1), :]
train_tensors.append(train)
# TODO: replace padding nans for all but first tensor?
# TODO: reduce width of 0> tensors based on width of 0 tensor?
train_dataset = self.with_new_tensors(*train_tensors)
return train_dataset, val_dataset
def with_new_start_times(self, start_times: Union[np.ndarray, Sequence]) -> 'TimeSeriesDataset':
"""
Subset a :class:`.TimeSeriesDataset` so that some/all of the groups have later start times.
:param start_times: An array/list of new datetimes.
:return: A new :class:`.TimeSeriesDataset`.
"""
new_tensors = []
for i, tens in enumerate(self.tensors):
times = self.times(i)
new_tens = []
for g, (new_time, old_times) in enumerate(zip(start_times, times)):
if (old_times <= new_time).all():
warn(f"{new_time} is later than all the times for group {self.group_names[g]}")
new_tens.append(tens[[g], 0:0])
continue
elif (old_times > new_time).all():
warn(f"{new_time} is earlier than all the times for group {self.group_names[g]}")
new_tens.append(tens[[g], 0:0])
continue
# drop if before new_time:
g_tens = tens[g, true1d_idx(old_times >= new_time)]
# drop if after last nan:
all_nan, _ = torch.min(torch.isnan(g_tens), 1)
if all_nan.all():
warn(f"Group '{self.group_names[g]}' (tensor {i}) has only `nans` after {new_time}")
end_idx = 0
else:
end_idx = true1d_idx(~all_nan).max() + 1
new_tens.append(g_tens[:end_idx].unsqueeze(0))
new_tens = ragged_cat(new_tens, ragged_dim=1, cat_dim=0)
new_tensors.append(new_tens)
return type(self)(
*new_tensors,
group_names=self.group_names,
start_times=start_times,
measures=self.measures,
dt_unit=self.dt_unit
)
def get_groups(self, groups: Sequence[Any]) -> 'TimeSeriesDataset':
"""
Get the subset of the batch corresponding to groups. Note that the ordering in the output will match the
original ordering (not that of `group`), and that duplicates will be dropped.
"""
group_idx = true1d_idx(np.isin(self.group_names, groups))
return self[group_idx]
def split_measures(self, *measure_groups, which: Optional[int] = None) -> 'TimeSeriesDataset':
"""
Take a dataset with one tensor, split it into a dataset with multiple tensors.
:param measure_groups: Each argument should be be a list of measure-names, or an indexer (i.e. list of ints or
a slice).
:param which: If there are already multiple measure groups, the split will occur within one of them; must
specify which.
:return: A :class:`.TimeSeriesDataset`, now with multiple tensors for the measure-groups.
"""
if which is None:
if len(self.measures) > 1:
raise RuntimeError(f"Must pass `which` if there's more than one groups:\n{self.measures}")
which = 0
self_tensor = self.tensors[which]
self_measures = self.measures[which]
idxs = []
for measure_group in measure_groups:
if isinstance(measure_group, slice) or isinstance(measure_group[0], int):
idxs.append(measure_group)
else:
idxs.append([self_measures.index(m) for m in measure_group])
self_measures = np.array(self_measures)
return type(self)(
*(self_tensor[:, :, idx].clone() for idx in idxs),
start_times=self.start_times,
group_names=self.group_names,
measures=[tuple(self_measures[idx]) for idx in idxs],
dt_unit=self.dt_unit
)
def __getitem__(self, item: Union[int, Sequence, slice]) -> 'TimeSeriesDataset':
if isinstance(item, int):
item = [item]
return type(self)(
*super(TimeSeriesDataset, self).__getitem__(item),
group_names=self.group_names[item],
start_times=self.start_times[item],
measures=self.measures,
dt_unit=self.dt_unit
)
# Creation/Transformation ------------------------:
@classmethod
def make_collate_fn(cls, pad_X: Optional[float] = 0.) -> Callable:
def collate_fn(batch: Sequence['TimeSeriesDataset']) -> 'TimeSeriesDataset':
to_concat = {
'tensors': [batch[0].tensors],
'group_names': [batch[0].group_names],
'start_times': [batch[0].start_times]
}
fixed = {'dt_unit': batch[0].dt_unit, 'measures': batch[0].measures}
for i, ts_dataset in enumerate(batch[1:], 1):
for attr, appendlist in to_concat.items():
to_concat[attr].append(getattr(ts_dataset, attr))
for attr, required_val in fixed.items():
new_val = getattr(ts_dataset, attr)
if new_val != required_val:
raise ValueError(
f"Element {i} has `{attr}` = {new_val}, but for element 0 it's {required_val}."
)
tensors = []
for i, t in enumerate(zip(*to_concat['tensors'])):
tensors.append(ragged_cat(t, ragged_dim=1, padding=None if i == 0 else pad_X))
return cls(
*tensors,
group_names=np.concatenate(to_concat['group_names']),
start_times=
|
np.concatenate(to_concat['start_times'])
|
numpy.concatenate
|
def run_mean(S, radius):
"""
Use:
run_mean(S, radius)
Computes the running average, using a method from
https://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy
Input:
S: Signal to be averaged. ............ (N,) np array
radius: Window size is 2*radius+1. ... int
Output:
"""
import numpy as np
window = 2*radius+1
rm =
|
np.cumsum(S, dtype=float)
|
numpy.cumsum
|
# -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006, <NAME>
# Copyright (c) 2006-2009, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Authors:
`<NAME> <http://www.lfd.uci.edu/~gohlke/>`__,
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 20090418
Requirements
------------
* `Python 2.6 <http://www.python.org>`__
* `Numpy 1.3 <http://numpy.scipy.org>`__
* `transformations.c 20090418 <http://www.lfd.uci.edu/~gohlke/>`__
(optional implementation of some functions in C)
Notes
-----
Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using
numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using
numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively
numpy.dot(v, M.T) for shape (\*, 4) "array of points".
Calculations are carried out with numpy.float64 precision.
This Python implementation is not optimized for speed.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions ix+jy+kz+w are represented as [x, y, z, w].
Use the transpose of transformation matrices for OpenGL glMultMatrixd().
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
References
----------
(1) Matrices and transformations. <NAME>.
In "Graphics Gems I", pp 472-475. <NAME>, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
<NAME>. In "Graphics Gems II", pp 320-323. <NAME>, 1991.
(3) Decomposing a matrix into simple transformations. <NAME>.
In "Graphics Gems II", pp 320-323. <NAME>, 1991.
(4) Recovering the data from the transformation matrix. <NAME>.
In "Graphics Gems II", pp 324-331. <NAME>, 1991.
(5) Euler angle conversion. <NAME>.
In "Graphics Gems IV", pp 222-229. <NAME>, 1994.
(6) Arcball rotation control. <NAME>.
In "Graphics Gems IV", pp 175-192. <NAME>, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. <NAME>. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. <NAME>. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642.
(10) Quaternions. <NAME>ake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. <NAME>. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. <NAME>.
In "Graphics Gems III", pp 124-132. <NAME>, 1992.
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix((1, 2, 3))
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, (1, 2, 3))
True
>>> numpy.allclose(shear, (0, math.tan(beta), 0))
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
"""
from __future__ import division
import warnings
import math
import numpy
# Documentation in HTML format can be generated with Epydoc
__docformat__ = "restructuredtext en"
def skew(v):
"""Returns the skew-symmetric matrix of a vector
cfo, 2015/08/13
"""
return numpy.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]], dtype=numpy.float64)
def unskew(R):
"""Returns the coordinates of a skew-symmetric matrix
cfo, 2015/08/13
"""
return numpy.array([R[2,1], R[0,2], R[1,0]], dtype=numpy.float64)
def first_order_rotation(rotvec):
"""First order approximation of a rotation: I + skew(rotvec)
cfo, 2015/08/13
"""
R = numpy.zeros((3,3), dtype=numpy.float64)
R[0,0] = 1.0
R[1,0] = rotvec[2]
R[2,0] = -rotvec[1]
R[0,1] = -rotvec[2]
R[1,1] = 1.0
R[2,1] = rotvec[0]
R[0,2] = rotvec[1]
R[1,2] = -rotvec[0]
R[2,2] = 1.0
return R
def axis_angle(axis, theta):
"""Compute a rotation matrix from an axis and an angle.
Returns 3x3 Matrix.
Is the same as transformations.rotation_matrix(theta, axis).
cfo, 2015/08/13
"""
if theta*theta > _EPS:
wx = axis[0]; wy = axis[1]; wz = axis[2]
costheta = numpy.cos(theta); sintheta = numpy.sin(theta)
c_1 = 1.0 - costheta
wx_sintheta = wx * sintheta
wy_sintheta = wy * sintheta
wz_sintheta = wz * sintheta
C00 = c_1 * wx * wx
C01 = c_1 * wx * wy
C02 = c_1 * wx * wz
C11 = c_1 * wy * wy
C12 = c_1 * wy * wz
C22 = c_1 * wz * wz
R = numpy.zeros((3,3), dtype=numpy.float64)
R[0,0] = costheta + C00;
R[1,0] = wz_sintheta + C01;
R[2,0] = -wy_sintheta + C02;
R[0,1] = -wz_sintheta + C01;
R[1,1] = costheta + C11;
R[2,1] = wx_sintheta + C12;
R[0,2] = wy_sintheta + C02;
R[1,2] = -wx_sintheta + C12;
R[2,2] = costheta + C22;
return R
else:
return first_order_rotation(axis*theta)
def expmap_so3(rotvec):
"""Exponential map at identity.
Create a rotation from canonical coordinates using Rodrigues' formula.
cfo, 2015/08/13
"""
theta = numpy.linalg.norm(rotvec)
axis = rotvec/theta
return axis_angle(axis, theta)
def logmap_so3(R):
"""Logmap at the identity.
Returns canonical coordinates of rotation.
cfo, 2015/08/13
"""
R11 = R[0, 0]; R12 = R[0, 1]; R13 = R[0, 2]
R21 = R[1, 0]; R22 = R[1, 1]; R23 = R[1, 2]
R31 = R[2, 0]; R32 = R[2, 1]; R33 = R[2, 2]
tr = numpy.trace(R)
omega = numpy.empty((3,), dtype=numpy.float64)
# when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, we do something
# special
if(numpy.abs(tr + 1.0) < 1e-10):
if(numpy.abs(R33 + 1.0) > 1e-10):
omega = (numpy.pi / numpy.sqrt(2.0 + 2.0 * R33)) * numpy.array([R13, R23, 1.0+R33])
elif(numpy.abs(R22 + 1.0) > 1e-10):
omega = (numpy.pi / numpy.sqrt(2.0 + 2.0 * R22)) * numpy.array([R12, 1.0+R22, R32])
else:
omega = (numpy.pi / numpy.sqrt(2.0 + 2.0 * R11)) * numpy.array([1.0+R11, R21, R31])
else:
magnitude = 1.0
tr_3 = tr - 3.0
if tr_3 < -1e-7:
theta = numpy.arccos((tr - 1.0) / 2.0)
magnitude = theta / (2.0 * numpy.sin(theta))
else:
# when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
# use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
magnitude = 0.5 - tr_3 * tr_3 / 12.0;
omega = magnitude * numpy.array([R32 - R23, R13 - R31, R21 - R12])
return omega
def right_jacobian_so3(rotvec):
"""Right Jacobian for Exponential map in SO(3)
Equation (10.86) and following equations in <NAME>, "Stochastic
Models, Information Theory, and Lie Groups", Volume 2, 2008.
> expmap_so3(thetahat + omega) \approx expmap_so3(thetahat) * expmap_so3(Jr * omega)
where Jr = right_jacobian_so3(thetahat);
This maps a perturbation in the tangent space (omega) to a perturbation
on the manifold (expmap_so3(Jr * omega))
cfo, 2015/08/13
"""
theta2 = numpy.dot(rotvec, rotvec)
if theta2 <= _EPS:
return numpy.identity(3, dtype=numpy.float64)
else:
theta = numpy.sqrt(theta2)
Y = skew(rotvec) / theta
I_3x3 = numpy.identity(3, dtype=numpy.float64)
J_r = I_3x3 - ((1.0 - numpy.cos(theta)) / theta) * Y + (1.0 - numpy.sin(theta) / theta) * numpy.dot(Y, Y)
return J_r
def S_inv_eulerZYX_body(euler_coordinates):
""" Relates angular rates w to changes in eulerZYX coordinates.
dot(euler) = S^-1(euler_coordinates) * omega
Also called: rotation-rate matrix. (E in Lupton paper)
cfo, 2015/08/13
"""
y = euler_coordinates[1]
z = euler_coordinates[2]
E = numpy.zeros((3,3))
E[0,1] = numpy.sin(z)/numpy.cos(y)
E[0,2] = numpy.cos(z)/numpy.cos(y)
E[1,1] = numpy.cos(z)
E[1,2] = -numpy.sin(z)
E[2,0] = 1.0
E[2,1] = numpy.sin(z)*numpy.sin(y)/numpy.cos(y)
E[2,2] = numpy.cos(z)*numpy.sin(y)/numpy.cos(y)
return E
def S_inv_eulerZYX_body_deriv(euler_coordinates, omega):
""" Compute dE(euler_coordinates)*omega/deuler_coordinates
cfo, 2015/08/13
"""
y = euler_coordinates[1]
z = euler_coordinates[2]
"""
w1 = omega[0]; w2 = omega[1]; w3 = omega[2]
J = numpy.zeros((3,3))
J[0,0] = 0
J[0,1] = math.tan(y) / math.cos(y) * (math.sin(z) * w2 + math.cos(z) * w3)
J[0,2] = w2/math.cos(y)*math.cos(z) - w3/math.cos(y)*math.sin(z)
J[1,0] = 0
J[1,1] = 0
J[1,2] = -w2*math.sin(z) - w3*math.cos(z)
J[2,0] = w1
J[2,1] = 1.0/math.cos(y)**2 * (w2 * math.sin(z) + w3 * math.cos(z))
J[2,2] = w2*math.tan(y)*math.cos(z) - w3*math.tan(y)*math.sin(z)
"""
#second version, x = psi, y = theta, z = phi
# J_x = numpy.zeros((3,3))
J_y = numpy.zeros((3,3))
J_z = numpy.zeros((3,3))
# dE^-1/dtheta
J_y[0,1] = math.tan(y)/math.cos(y)*math.sin(z)
J_y[0,2] = math.tan(y)/math.cos(y)*math.cos(z)
J_y[2,1] = math.sin(z)/(math.cos(y))**2
J_y[2,2] = math.cos(z)/(math.cos(y))**2
# dE^-1/dphi
J_z[0,1] = math.cos(z)/math.cos(y)
J_z[0,2] = -math.sin(z)/math.cos(y)
J_z[1,1] = -math.sin(z)
J_z[1,2] = -math.cos(z)
J_z[2,1] = math.cos(z)*math.tan(y)
J_z[2,2] = -math.sin(z)*math.tan(y)
J = numpy.zeros((3,3))
J[:,1] = numpy.dot(J_y, omega)
J[:,2] = numpy.dot(J_z, omega)
return J
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
True
"""
return numpy.identity(4, dtype=numpy.float64)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def convert_3x3_to_4x4(matrix_3x3):
M = numpy.identity(4)
M[:3,:3] = matrix_3x3
return M
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
l, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.array(((cosa, 0.0, 0.0),
(0.0, cosa, 0.0),
(0.0, 0.0, cosa)), dtype=numpy.float64)
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array((( 0.0, -direction[2], direction[1]),
( direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0)),
dtype=numpy.float64)
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
l, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
l, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix((0, 0, 0), (1, 0, 0))
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3.0-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(l)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustrum.
The frustrum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustrum.
If perspective is True the frustrum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (devided by w coordinate).
>>> frustrum = numpy.random.rand(6)
>>> frustrum[1] += frustrum[0]
>>> frustrum[3] += frustrum[2]
>>> frustrum[5] += frustrum[4]
>>> M = clip_matrix(*frustrum, perspective=False)
>>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(*frustrum, perspective=True)
>>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustrum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustrum: near <= 0")
t = 2.0 * near
M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0),
(0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0),
(0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)),
(0.0, 0.0, -1.0, 0.0))
else:
M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)),
(0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)),
(0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)),
(0.0, 0.0, 0.0, 1.0))
return numpy.array(M, dtype=numpy.float64)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1.0, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("No two linear independent eigenvectors found %s" % l)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
l = vector_norm(n)
if l > lenorm:
lenorm = l
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix((1, 2, 3))
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("Matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.))
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array((
( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0),
(-a*sinb*co, b*sina, 0.0, 0.0),
( a*cosb, b*cosa, c, 0.0),
( 0.0, 0.0, 0.0, 1.0)),
dtype=numpy.float64)
def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
"""Return matrix to transform given vector set into second vector set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors.
If usesvd is True, the weighted sum of squared deviations (RMSD) is
minimized according to the algorithm by <NAME> [8]. Otherwise the
quaternion based algorithm by <NAME> [9] is used (slower when using
this Python implementation).
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1))
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scaling=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3), dtype=numpy.float64)
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError("Vector sets are of wrong shape or type.")
# move centroids to origin
t0 = numpy.mean(v0, axis=1)
t1 = numpy.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx),
(yz-zy, xx-yy-zz, xy+yx, zx+xz),
(zx-xz, xy+yx, -xx+yy-zz, yz+zy),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
l, V = numpy.linalg.eig(N)
q = V[:, numpy.argmax(l)]
q /= vector_norm(q) # unit quaternion
q = numpy.roll(q, -1) # move w component to end
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# translation
M[:3, 3] = t1
T = numpy.identity(4)
T[:3, 3] = -t0
M = numpy.dot(M, T)
return M
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print axes, "failed"
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = numpy.empty((4, ), dtype=numpy.float64)
if repetition:
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
quaternion[3] = cj*(cc - ss)
else:
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
quaternion[3] = cj*cc + sj*ss
if parity:
quaternion[j] *= -1
return quaternion
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[:3] = axis[:3]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[3] = math.cos(angle/2.0)
return quaternion
def matrix_from_quaternion(quaternion):
return quaternion_matrix(quaternion)
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0)))
True
"""
q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def quaternionJPL_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion in JPL notation.
quaternion = [x y z w]
"""
q0 = quaternion[0]
q1 = quaternion[1]
q2 = quaternion[2]
q3 = quaternion[3]
return numpy.array([
[ q0**2 - q1**2 - q2**2 + q3**2, 2.0*q0*q1 + 2.0*q2*q3, 2.0*q0*q2 - 2.0*q1*q3, 0],
[ 2.0*q0*q1 - 2.0*q2*q3, - q0**2 + q1**2 - q2**2 + q3**2, 2.0*q0*q3 + 2.0*q1*q2, 0],
[ 2.0*q0*q2 + 2.0*q1*q3, 2.0*q1*q2 - 2.0*q0*q3, - q0**2 - q1**2 + q2**2 + q3**2, 0],
[0, 0, 0, 1.0]], dtype=numpy.float64)
def quaternion_from_matrix(matrix):
"""Return quaternion from rotation matrix.
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095])
True
"""
q = numpy.empty((4, ), dtype=numpy.float64)
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
t = numpy.trace(M)
if t > M[3, 3]:
q[3] = t
q[2] = M[1, 0] - M[0, 1]
q[1] = M[0, 2] - M[2, 0]
q[0] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
>>> numpy.allclose(q, [-44, -14, 48, 28])
True
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return numpy.array((
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
True
"""
return numpy.array((-quaternion[0], -quaternion[1],
-quaternion[2], quaternion[3]), dtype=numpy.float64)
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1])
True
"""
return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0.0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1.0, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1.0, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> q.shape
(4,)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array((numpy.sin(t1)*r1,
numpy.cos(t1)*r1,
|
numpy.sin(t2)
|
numpy.sin
|
import logging
import numpy as np
import xarray as xr
from datacube.model import Measurement
from datacube_stats.statistics import Statistic
from copy import copy
from .fast import smad, emad, bcmad, geomedian
LOG = logging.getLogger(__name__)
def sizefmt(num, suffix="B"):
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
class CosineDistanceMAD(Statistic):
def __init__(self, num_threads=3):
super().__init__()
self.num_threads = num_threads
LOG.info("num_threads: %i", num_threads)
def compute(self, data: xr.Dataset) -> xr.Dataset:
squashed = data.to_array().transpose("y", "x", "time", "variable")
fdata = squashed.data.astype(np.float32) / 10000.
fdata[(squashed.data == -999)] = np.nan
fdata[(squashed.data == 0)] = np.nan
del squashed
LOG.info("Data array size: %s", sizefmt(fdata.nbytes))
mask = np.isnan(fdata).any(axis=2)
ndepth = np.count_nonzero(mask, axis=-1)
mindepth, mediandepth, maxdepth = np.min(ndepth), np.median(ndepth), np.max(ndepth)
LOG.info("data mindepth: %s maxdepth: %s mediandepth: %s", mindepth, maxdepth, mediandepth)
LOG.info("Computing geometric median mosaic")
gm = geomedian(fdata, num_threads=self.num_threads)
LOG.info("Computing spectral MAD mosaic")
dev = smad(fdata, gm, num_threads=self.num_threads)
da = xr.DataArray(dev, dims=("y", "x"), name="dev")
return xr.Dataset(data_vars={"dev": da})
def measurements(self, m):
mm = [Measurement(name="dev", dtype="float32", nodata=0, units="1")]
LOG.debug("Returning measurements: %s", mm)
return mm
class EuclideanDistanceMAD(Statistic):
def __init__(self, num_threads=3):
super().__init__()
self.num_threads = num_threads
LOG.info("num_threads: %i", num_threads)
def compute(self, data: xr.Dataset) -> xr.Dataset:
squashed = data.to_array().transpose("y", "x", "time", "variable")
fdata = squashed.data.astype(np.float32) / 10000.
fdata[(squashed.data == -999)] = np.nan
fdata[(squashed.data == 0)] = np.nan
del squashed
LOG.info("Data array size: %s", sizefmt(fdata.nbytes))
mask = np.isnan(fdata).any(axis=2)
ndepth = np.count_nonzero(mask, axis=-1)
mindepth, mediandepth, maxdepth =
|
np.min(ndepth)
|
numpy.min
|
import numpy as np
from funcs_thermo import latent_heat, pd_c, pv_c, sd_c, sv_c, cpm_c, theta_rho_c
from parameters import *
def buoyancy_flux(shf, lhf, T_b, qt_b, alpha0_0):
cp_ = cpm_c(qt_b)
lv = latent_heat(T_b)
return (g * alpha0_0 / cp_ / T_b * (shf + (eps_vi-1.0) * cp_ * T_b * lhf /lv))
def psi_m_unstable(zeta, zeta0):
x = (1.0 - gamma_m * zeta)**0.25
x0 = (1.0 - gamma_m * zeta0)**0.25
psi_m = (2.0 * np.log((1.0 + x)/(1.0 + x0)) + np.log((1.0 + x*x)/(1.0 + x0 * x0))
-2.0 * np.arctan(x) + 2.0 * np.arctan(x0))
return psi_m
def psi_h_unstable(zeta, zeta0):
y = np.sqrt(1.0 - gamma_h * zeta )
y0 = np.sqrt(1.0 - gamma_h * zeta0 )
psi_h = 2.0 * np.log((1.0 + y)/(1.0 + y0))
return psi_h
def psi_m_stable(zeta, zeta0):
psi_m = -beta_m * (zeta - zeta0)
return psi_m
def psi_h_stable(zeta, zeta0):
psi_h = -beta_h * (zeta - zeta0)
return psi_h
def entropy_flux(tflux,qtflux, p0_1, T_1, qt_1):
cp_1 = cpm_c(qt_1)
pd_1 = pd_c(p0_1, qt_1, qt_1)
pv_1 = pv_c(p0_1, qt_1, qt_1)
sd_1 = sd_c(pd_1, T_1)
sv_1 = sv_c(pv_1, T_1)
return cp_1*tflux/T_1 + qtflux*(sv_1-sd_1)
def compute_ustar(windspeed, buoyancy_flux, z0, z1) :
logz = np.log(z1 / z0)
#use neutral condition as first guess
ustar0 = windspeed * vkb / logz
ustar = ustar0
if (np.abs(buoyancy_flux) > 1.0e-20):
lmo = -ustar0 * ustar0 * ustar0 / (buoyancy_flux * vkb)
zeta = z1 / lmo
zeta0 = z0 / lmo
if (zeta >= 0.0):
f0 = windspeed - ustar0 / vkb * (logz - psi_m_stable(zeta, zeta0))
ustar1 = windspeed * vkb / (logz - psi_m_stable(zeta, zeta0))
lmo = -ustar1 * ustar1 * ustar1 / (buoyancy_flux * vkb)
zeta = z1 / lmo
zeta0 = z0 / lmo
f1 = windspeed - ustar1 / vkb * (logz - psi_m_stable(zeta, zeta0))
ustar = ustar1
delta_ustar = ustar1 -ustar0
while np.abs(delta_ustar) > 1e-3:
ustar_new = ustar1 - f1 * delta_ustar / (f1-f0)
f0 = f1
ustar0 = ustar1
ustar1 = ustar_new
lmo = -ustar1 * ustar1 * ustar1 / (buoyancy_flux * vkb)
zeta = z1 / lmo
zeta0 = z0 / lmo
f1 = windspeed - ustar1 / vkb * (logz - psi_m_stable(zeta, zeta0))
delta_ustar = ustar1 -ustar0
else: # b_flux nonzero, zeta is negative
f0 = windspeed - ustar0 / vkb * (logz - psi_m_unstable(zeta, zeta0))
ustar1 = windspeed * vkb / (logz - psi_m_unstable(zeta, zeta0))
lmo = -ustar1 * ustar1 * ustar1 / (buoyancy_flux * vkb)
zeta = z1 / lmo
zeta0 = z0 / lmo
f1 = windspeed - ustar1 / vkb * (logz - psi_m_unstable(zeta, zeta0))
ustar = ustar1
delta_ustar = ustar1 - ustar0
while np.abs(delta_ustar) > 1e-3:
ustar_new = ustar1 - f1 * delta_ustar / (f1 - f0)
f0 = f1
ustar0 = ustar1
ustar1 = ustar_new
lmo = -ustar1 * ustar1 * ustar1 / (buoyancy_flux * vkb)
zeta = z1 / lmo
zeta0 = z0 / lmo
f1 = windspeed - ustar1 / vkb * (logz - psi_m_unstable(zeta, zeta0))
delta_ustar = ustar1 - ustar
return ustar
def exchange_coefficients_byun(Ri, zb, z0):
logz =
|
np.log(zb/z0)
|
numpy.log
|
"""
Test cases for the Comparisons class over the Chart elements
"""
from unittest import SkipTest, skipIf
import numpy as np
from holoviews.core import NdOverlay
from holoviews.core.options import Store
from holoviews.element import (
Area, BoxWhisker, Curve, Distribution, HSpan, Image, Points,
Rectangles, RGB, Scatter, Segments, Violin, VSpan, Path,
QuadMesh, Polygons
)
from holoviews.element.comparison import ComparisonTestCase
try:
import datashader as ds
except:
ds = None
try:
import spatialpandas as spd
except:
spd = None
try:
import shapely
except:
shapely = None
spd_available = skipIf(spd is None, "spatialpandas is not available")
shapelib_available = skipIf(shapely is None and spd is None,
'Neither shapely nor spatialpandas are available')
shapely_available = skipIf(shapely is None, 'shapely is not available')
ds_available = skipIf(ds is None, 'datashader not available')
class TestSelection1DExpr(ComparisonTestCase):
def setUp(self):
try:
import holoviews.plotting.bokeh # noqa
except:
raise SkipTest("Bokeh selection tests require bokeh.")
super().setUp()
self._backend = Store.current_backend
Store.set_current_backend('bokeh')
def tearDown(self):
Store.current_backend = self._backend
def test_area_selection_numeric(self):
area = Area([3, 2, 1, 3, 4])
expr, bbox, region = area._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(area), np.array([False, True, True, True, False]))
self.assertEqual(region, NdOverlay({0: VSpan(1, 3)}))
def test_area_selection_numeric_inverted(self):
area = Area([3, 2, 1, 3, 4]).opts(invert_axes=True)
expr, bbox, region = area._get_selection_expr_for_stream_value(bounds=(0, 1, 2, 3))
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(area), np.array([False, True, True, True, False]))
self.assertEqual(region, NdOverlay({0: HSpan(1, 3)}))
def test_area_selection_categorical(self):
area = Area((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = area._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C']
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C']})
self.assertEqual(expr.apply(area), np.array([True, True, True, False, False]))
self.assertEqual(region, NdOverlay({0: VSpan(0, 2)}))
def test_area_selection_numeric_index_cols(self):
area = Area([3, 2, 1, 3, 2])
expr, bbox, region = area._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(area), np.array([False, True, True, False, True]))
self.assertEqual(region, None)
def test_curve_selection_numeric(self):
curve = Curve([3, 2, 1, 3, 4])
expr, bbox, region = curve._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(curve), np.array([False, True, True, True, False]))
self.assertEqual(region, NdOverlay({0: VSpan(1, 3)}))
def test_curve_selection_categorical(self):
curve = Curve((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = curve._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C']
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C']})
self.assertEqual(expr.apply(curve), np.array([True, True, True, False, False]))
self.assertEqual(region, NdOverlay({0: VSpan(0, 2)}))
def test_curve_selection_numeric_index_cols(self):
curve = Curve([3, 2, 1, 3, 2])
expr, bbox, region = curve._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(curve), np.array([False, True, True, False, True]))
self.assertEqual(region, None)
def test_box_whisker_single(self):
box_whisker = BoxWhisker(list(range(10)))
expr, bbox, region = box_whisker._get_selection_expr_for_stream_value(
bounds=(0, 3, 1, 7)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(box_whisker), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(3, 7)}))
def test_box_whisker_single_inverted(self):
box = BoxWhisker(list(range(10))).opts(invert_axes=True)
expr, bbox, region = box._get_selection_expr_for_stream_value(
bounds=(3, 0, 7, 1)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(box), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: VSpan(3, 7)}))
def test_box_whisker_cats(self):
box_whisker = BoxWhisker((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = box_whisker._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(box_whisker), np.array([
False, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(1, 7)}))
def test_box_whisker_cats_index_cols(self):
box_whisker = BoxWhisker((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = box_whisker._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B'], index_cols=['x']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(box_whisker), np.array([
True, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, None)
def test_violin_single(self):
violin = Violin(list(range(10)))
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(0, 3, 1, 7)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(violin), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(3, 7)}))
def test_violin_single_inverted(self):
violin = Violin(list(range(10))).opts(invert_axes=True)
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(3, 0, 7, 1)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(violin), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: VSpan(3, 7)}))
def test_violin_cats(self):
violin = Violin((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(violin), np.array([
False, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(1, 7)}))
def test_violin_cats_index_cols(self):
violin = Violin((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B'], index_cols=['x']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(violin), np.array([
True, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, None)
def test_distribution_single(self):
dist = Distribution(list(range(10)))
expr, bbox, region = dist._get_selection_expr_for_stream_value(
bounds=(3, 0, 7, 1)
)
self.assertEqual(bbox, {'Value': (3, 7)})
self.assertEqual(expr.apply(dist), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: VSpan(3, 7)}))
def test_distribution_single_inverted(self):
dist = Distribution(list(range(10))).opts(invert_axes=True)
expr, bbox, region = dist._get_selection_expr_for_stream_value(
bounds=(0, 3, 1, 7)
)
self.assertEqual(bbox, {'Value': (3, 7)})
self.assertEqual(expr.apply(dist), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(3, 7)}))
class TestSelection2DExpr(ComparisonTestCase):
def setUp(self):
try:
import holoviews.plotting.bokeh # noqa
except:
raise SkipTest("Bokeh selection tests require bokeh.")
super().setUp()
self._backend = Store.current_backend
Store.set_current_backend('bokeh')
def tearDown(self):
Store.current_backend = self._backend
def test_points_selection_numeric(self):
points = Points([3, 2, 1, 3, 4])
expr, bbox, region = points._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(points), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(1, 0, 3, 2)]) * Path([]))
def test_points_selection_numeric_inverted(self):
points = Points([3, 2, 1, 3, 4]).opts(invert_axes=True)
expr, bbox, region = points._get_selection_expr_for_stream_value(bounds=(0, 1, 2, 3))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(points), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
@shapelib_available
def test_points_selection_geom(self):
points = Points([3, 2, 1, 3, 4])
geom = np.array([(-0.1, -0.1), (1.4, 0), (1.4, 2.2), (-0.1, 2.2)])
expr, bbox, region = points._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'x': np.array([-0.1, 1.4, 1.4, -0.1]),
'y': np.array([-0.1, 0, 2.2, 2.2])})
self.assertEqual(expr.apply(points), np.array([False, True, False, False, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.1, -0.1)]]))
@shapelib_available
def test_points_selection_geom_inverted(self):
points = Points([3, 2, 1, 3, 4]).opts(invert_axes=True)
geom = np.array([(-0.1, -0.1), (1.4, 0), (1.4, 2.2), (-0.1, 2.2)])
expr, bbox, region = points._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'y': np.array([-0.1, 1.4, 1.4, -0.1]),
'x': np.array([-0.1, 0, 2.2, 2.2])})
self.assertEqual(expr.apply(points), np.array([False, False, True, False, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.1, -0.1)]]))
def test_points_selection_categorical(self):
points = Points((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = points._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C'], y_selection=None
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C'], 'y': (1, 3)})
self.assertEqual(expr.apply(points), np.array([True, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
def test_points_selection_numeric_index_cols(self):
points = Points([3, 2, 1, 3, 2])
expr, bbox, region = points._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(points), np.array([False, False, True, False, False]))
self.assertEqual(region, None)
def test_scatter_selection_numeric(self):
scatter = Scatter([3, 2, 1, 3, 4])
expr, bbox, region = scatter._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(scatter), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(1, 0, 3, 2)]) * Path([]))
def test_scatter_selection_numeric_inverted(self):
scatter = Scatter([3, 2, 1, 3, 4]).opts(invert_axes=True)
expr, bbox, region = scatter._get_selection_expr_for_stream_value(bounds=(0, 1, 2, 3))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(scatter), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
def test_scatter_selection_categorical(self):
scatter = Scatter((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = scatter._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C'], y_selection=None
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C'], 'y': (1, 3)})
self.assertEqual(expr.apply(scatter), np.array([True, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
def test_scatter_selection_numeric_index_cols(self):
scatter = Scatter([3, 2, 1, 3, 2])
expr, bbox, region = scatter._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(scatter), np.array([False, False, True, False, False]))
self.assertEqual(region, None)
def test_image_selection_numeric(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3)))
expr, bbox, region = img._get_selection_expr_for_stream_value(bounds=(0.5, 1.5, 2.1, 3.1))
self.assertEqual(bbox, {'x': (0.5, 2.1), 'y': (1.5, 3.1)})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True]
]))
self.assertEqual(region, Rectangles([(0.5, 1.5, 2.1, 3.1)]) * Path([]))
def test_image_selection_numeric_inverted(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3))).opts(invert_axes=True)
expr, bbox, region = img._get_selection_expr_for_stream_value(bounds=(1.5, 0.5, 3.1, 2.1))
self.assertEqual(bbox, {'x': (0.5, 2.1), 'y': (1.5, 3.1)})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True]
]))
self.assertEqual(region, Rectangles([(1.5, 0.5, 3.1, 2.1)]) * Path([]))
@ds_available
@spd_available
def test_img_selection_geom(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3)))
geom = np.array([(-0.4, -0.1), (0.6, -0.1), (0.4, 1.7), (-0.1, 1.7)])
expr, bbox, region = img._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'x': np.array([-0.4, 0.6, 0.4, -0.1]),
'y': np.array([-0.1, -0.1, 1.7, 1.7])})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[ 1., np.nan, np.nan],
[ 1., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.4, -0.1)]]))
@ds_available
def test_img_selection_geom_inverted(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3))).opts(invert_axes=True)
geom = np.array([(-0.4, -0.1), (0.6, -0.1), (0.4, 1.7), (-0.1, 1.7)])
expr, bbox, region = img._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'y':
|
np.array([-0.4, 0.6, 0.4, -0.1])
|
numpy.array
|
from django.contrib.auth.models import User
from django_q.tasks import async_task
from app.models import Document, Similarity
from binfile.distance_measurement import cosine_sim, cosine_similarity, jaccard_similarity, dice_similarity, mahalanobis_distance, \
euclidean_distance, minkowski_distance, manhattan_distance, weighted_euclidean_distance, weighted_euclidean_distances
from django.conf import settings
import glob
import re
import os
import json
from math import *
import numpy as np
from django.core.files.base import ContentFile
from django.core.files import File
import asyncio, time
from functools import wraps
from concurrent import futures
IGNORE = [
"env",
".local",
".git",
]
_DEFAULT_POOL = futures.ThreadPoolExecutor(max_workers=4)
def threadpool(f, executor=None):
@wraps(f)
def wrap(*args, **kwargs):
asyncio.set_event_loop(asyncio.new_event_loop())
return asyncio.wrap_future((executor or _DEFAULT_POOL).submit(f, *args, **kwargs))
return wrap
def process_time(id, type="PROCESS", start=None):
if start:
elapsed = time.time() - start
msg = "FINISHED, {}, {}, {}".format(type, id, elapsed)
with open(os.path.join(settings.MEDIA_ROOT, 'logging.csv'), 'a') as f:
f.write(msg+'\n')
print(msg)
return elapsed, msg
else:
start = time.time()
msg = "START, {}, {}, {}".format(type, id, start)
# with open(os.path.join(settings.MEDIA_ROOT, 'logging.csv'), 'a') as f:
# f.write(msg+'\n')
print(msg)
return start, msg
# for datasets
def process_path(path, username='admin'):
if not path.endswith("/"):
path = path + "/"
if not path.endswith("**"):
path = path + "**"
import time
start = time.time()
for filename in glob.iglob(path, recursive=True):
print("Checking", filename)
will_ignore = False
for ignore in IGNORE:
if re.match(ignore, filename):
print("IGNORING", filename)
will_ignore = True
break
if not will_ignore:
print("PROCESSING", filename)
# async(process_file_by_path, filename)
process_file_by_path(filename, username)
print("=== Finished on ", time.time() - start)
return path
def projson():
from os import path
pathh = path.join(settings.MEDIA_ROOT, 'export')
if not pathh.endswith("/"):
pathh = pathh + "/"
if not pathh.endswith("**"):
pathh = pathh + "**"
import time
start = time.time()
arrai = []
for filename in glob.iglob(pathh, recursive=True):
print("Checking", filename)
will_ignore = False
for ignore in IGNORE:
if re.match(ignore, filename):
print("IGNORING", filename)
will_ignore = True
break
if path.isfile(filename) and not will_ignore and not filename.endswith('merged.json'):
print("PROCESSING", filename)
aa = []
with open(filename, 'r') as fa:
aa = fa.read()
aa = json.loads(aa)
arrai = arrai + aa
with open(path.join(settings.MEDIA_ROOT, 'export', 'merged.json'), 'w+') as ff:
ff.write(json.dumps(arrai))
print("=== Finished on ", time.time() - start)
return pathh
@threadpool
def process_file_by_path(path, username='admin'):
_, ext = os.path.splitext(path)
original_filename = os.path.basename(path)
if ext not in [".pdf", ".docx", ".doc"]:
return
try:
sf = Document.objects.get(content=path) # filename
except Document.DoesNotExist:
# If it does NOT have an entry, create one
sf = Document()
start, _ = process_time(sf.id.hex, type="DATASET FILE")
if not os.path.isfile(path):
print("NOT A FILE", path)
return
user = User.objects.get(username=username) # static
sf.user = user
with open(path, mode="rb") as file:
sf.content.save(name=os.path.basename(path), content=file)
sf.original_filename = original_filename
sf.is_dataset = True
sf.save()
finishing_dataset(sf.id)
process_time(sf.id.hex, type="DATASET FILE", start=start)
os.remove(path)
print("DELETE FILE ", path)
return sf.id
@threadpool
def finishing_dataset(id):
try:
sf = Document.objects.get(id=id)
except Document.DoesNotExist:
return
start, _ = process_time(sf.id.hex, type="DATASET FINISH")
sf.extract_content()
time.sleep(0.5)
sf.translate()
time.sleep(3)
sf.fingerprinting(save=True, debug=True)
sf.save()
process_time(sf.id.hex, type="DATASET FINISH", start=start)
return sf
# for user document
@threadpool
def process_doc(id):
try:
sf = Document.objects.get(id=id) # filename
except Document.DoesNotExist:
print("NOT EXIST", id)
return
start, _ = process_time(sf.id.hex, type="PROCESS DOC")
sf.extract_content()
sf.translate()
sf.fingerprinting(debug=True)
sf.save()
check_similarity(sf.id)
process_time(sf.id.hex, type="PROCESS DOC", start=start)
# should call from view
def extract_n_process(name, username='admin'):
from pyunpack import Archive
dest = os.path.join(settings.MEDIA_ROOT, 'extract')
src = os.path.join(settings.MEDIA_ROOT, name)
Archive(src).extractall(dest, auto_create_dir=True)
process_path(dest, username)
import shutil
try:
shutil.rmtree(dest)
os.unlink(src)
except:
pass
# os.remove(src)
print('Source Deleted')
def translate_and_finish():
untranslated = Document.objects.filter(is_dataset=True)
for unt in untranslated:
# unt.extract_content()
# unt.translate()
# print("Translating ", unt.id)
unt.fingerprinting(save=True, debug=True)
print("Fingerprinting ", unt.id)
# time.sleep(5)
@threadpool
def check_similarity(id):
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import euclidean_distances, manhattan_distances
from sklearn.preprocessing import normalize
from scipy.spatial import distance
mode = 1
try:
sf = Document.objects.get(id=id)
except Document.DoesNotExist:
print("Document not Exist")
return
start, _ = process_time(sf.id.hex, type="SIMILARITY")
sf.status = Document.Statuses.PROCESS
sf.save()
datasets = Document.objects.filter(is_dataset=True, status="finished")
similarities = []
fingerprints = []
t_origin = sf.get_fingerprint()['fingerprint']
t_debug = sf.get_fingerprint()['debug']
fingerprints.append(t_origin)
minlen = len(fingerprints[0])
maxlen = len(fingerprints[0])
bag = []
for d in datasets:
reff = d.get_fingerprint()
t_referer = reff['fingerprint']
if len(t_origin) <= 1 or len(t_referer) <= 1:
print("Fingerprint not valid!", d.id)
continue
az = set.intersection(set(t_origin), set(t_referer))
asz = [str(d.id), d.filename] + [x[0] for x in reff['debug']['hashes'] if x[1] in az]
bag.append(asz)
text1, text2 = padd_to_max(t_origin, t_referer) if mode == 1 else trim_to_min(t_origin, t_referer)
# cosine = cosine_sim(text1, text2) * 100
# jaccard = jaccard_similarity(text1, text2) * 100
# dice = dice_similarity(text1, text2) * 100
minlen = min(minlen, len(t_referer))
maxlen = max(maxlen, len(t_referer))
fingerprints.append(t_referer) # append as set
similarities.append([
d
# jaccard,
# dice,
# cosine,
])
# print((cosine, jaccard, dice, euclidean, manhattan, minkowski, weighted, mahalanobis), "=================")
# fingerprints, _,_ = norm(fingerprints)
if mode == 0:
for i, m in enumerate(fingerprints): # trim length
if len(m) > minlen:
fingerprints[i] = m[:minlen]
elif mode == 1:
for i, m in enumerate(fingerprints): # padding
if len(m) < maxlen:
fingerprints[i] = m + [0.0 for a in range(0, maxlen - len(m))]
matx = normalize(np.asarray(fingerprints, dtype=np.float))
with open(os.path.join(settings.MEDIA_ROOT, 'data', 'bag-'+sf.id.hex+'.json'), 'w') as fx:
# fx.write(json.dumps(matx))
fx.write(json.dumps(bag))
# tcov = np.array(matx).T
# # print(cov.shape)
# ccov = np.cov(tcov)
# iccov = np.linalg.inv(ccov)
for i, n in enumerate(matx):
if i == 0:
continue
cosine = round(distance.cosine(matx[0],matx[i]) , 8)
# print("cossine: ", distance.cosine(matx[0],matx[1]))
# jaccard = round(distance.jaccard(matx[0],matx[i]) , 8)
jaccard = distance.cdist([matx[0]],[matx[i]], 'jaccard')
# print("jaccard: ", distance.jaccard(matx[0],matx[1],5))
# dice = round(1-distance.dice(matx[0],matx[i]) , 8)
dice = distance.cdist([matx[0]],[matx[i]], 'dice')
# print("dice: ",i- distance.dice(matx[0],matx[i]))
# weighted = distance.sqeuclidean(matx[0], matx[i])
weighted = distance.cdist([matx[0]],[matx[i]], 'wminkowski', p=2., w=n)
# print("weig",weighted)cdist(XA, XB, 'euclidean')
# euclidean = distance.euclidean(matx[0], matx[i])
euclidean = distance.cdist([matx[0]], [matx[i]], 'euclidean')
# print("enclu", euclidean)
manhattan = manhattan_distances([matx[0]], [matx[i]], sum_over_features=False)
# print("manha",manhattan)
manhattan =
|
np.max(manhattan)
|
numpy.max
|
# test.py
# main testing script
import os
import json
import argparse
import torch
from torch import nn
from torch.utils.data import DataLoader
from nuscenes.nuscenes import NuScenes
from data import nuScenesDataset, CollateFn
import matplotlib.pyplot as plt
import numpy as np
from skimage.draw import polygon
def make_data_loader(cfg, args):
if "train_on_all_sweeps" not in cfg:
train_on_all_sweeps = False
else:
train_on_all_sweeps = cfg["train_on_all_sweeps"]
dataset_kwargs = {
"n_input": cfg["n_input"],
"n_samples": args.n_samples,
"n_output": cfg["n_output"],
"train_on_all_sweeps": train_on_all_sweeps
}
data_loader_kwargs = {
"pin_memory": False, # NOTE
"shuffle": True,
"batch_size": args.batch_size,
"num_workers": args.num_workers
}
nusc = NuScenes(cfg["nusc_version"], cfg["nusc_root"])
data_loader = DataLoader(nuScenesDataset(nusc, args.test_split, dataset_kwargs),
collate_fn=CollateFn, **data_loader_kwargs)
return data_loader
def mkdir_if_not_exists(d):
if not os.path.exists(d):
print(f"creating directory {d}")
os.makedirs(d)
def evaluate_box_coll(obj_boxes, trajectory, pc_range):
xmin, ymin, _, xmax, ymax, _ = pc_range
T, H, W = obj_boxes.shape
collisions = np.full(T, False)
for t in range(T):
x, y, theta = trajectory[t]
corners = np.array([
(-0.8, -1.5, 1), # back left corner
(0.8, -1.5, 1), # back right corner
(0.8, 2.5, 1), # front right corner
(-0.8, 2.5, 1), # front left corner
])
tf = np.array([
[np.cos(theta), -np.sin(theta), x],
[np.sin(theta), np.cos(theta), y],
[0, 0, 1],
])
xx, yy = tf.dot(corners.T)[:2]
yi = np.round((yy - ymin) / (ymax - ymin) * H).astype(int)
xi = np.round((xx - xmin) / (xmax - xmin) * W).astype(int)
rr, cc = polygon(yi, xi)
I = np.logical_and(
np.logical_and(rr >= 0, rr < H),
np.logical_and(cc >= 0, cc < W),
)
collisions[t] = np.any(obj_boxes[t, rr[I], cc[I]])
return collisions
def voxelize_point_cloud(points):
valid = (points[:, -1] == 0)
x, y, z, t = points[valid].T
x = ((x + 40.0) / 0.2).astype(int)
y = ((y + 70.4) / 0.2).astype(int)
mask = np.logical_and(
np.logical_and(0 <= x, x < 400),
np.logical_and(0 <= y, y < 704)
)
voxel_map = np.zeros((704, 400), dtype=bool)
voxel_map[y[mask], x[mask]] = True
return voxel_map
def make_cost_fig(cost_maps):
cost_imgs = np.ones_like(cost_maps)
T = len(cost_maps)
for t in range(T):
cost_map = cost_maps[t]
cost_min, cost_max = cost_map.min(), cost_map.max()
cost_img = (cost_map - cost_min) / (cost_max - cost_min)
cost_imgs[t] = cost_img
return cost_imgs
def test(args):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device_count = torch.cuda.device_count()
if args.batch_size % device_count != 0:
raise RuntimeError(f"Batch size ({args.batch_size}) cannot be divided by device count ({device_count})")
model_dir = args.model_dir
with open(f"{model_dir}/config.json", 'r') as f:
cfg = json.load(f)
# dataset
data_loader = make_data_loader(cfg, args)
# instantiate a model and a renderer
_n_input, _n_output = cfg["n_input"], cfg["n_output"]
_pc_range, _voxel_size = cfg["pc_range"], cfg["voxel_size"]
model_type = cfg["model_type"]
if model_type == "vanilla":
from model import VanillaNeuralMotionPlanner
model = VanillaNeuralMotionPlanner(_n_input, _n_output, _pc_range, _voxel_size)
elif model_type == "vf_guided":
from model import VFGuidedNeuralMotionPlanner
model = VFGuidedNeuralMotionPlanner(_n_input, _n_output, _pc_range, _voxel_size)
elif model_type == "obj_guided":
from model import ObjGuidedNeuralMotionPlanner
model = ObjGuidedNeuralMotionPlanner(_n_input, _n_output, _pc_range, _voxel_size)
elif model_type == "obj_shadow_guided":
from model import ObjShadowGuidedNeuralMotionPlanner
model = ObjShadowGuidedNeuralMotionPlanner(_n_input, _n_output, _pc_range, _voxel_size)
else:
raise NotImplementedError(f"{model_type} not implemented yet.")
model = model.to(device)
# resume
ckpt_path = f"{args.model_dir}/ckpts/model_epoch_{args.test_epoch}.pth"
checkpoint = torch.load(ckpt_path, map_location=device)
# NOTE: ignore renderer's parameters
model.load_state_dict(checkpoint["model_state_dict"], strict=False)
# data parallel
model = nn.DataParallel(model)
model.eval()
# output
vis_dir = os.path.join(model_dir, "visuals", f"{args.test_split}_epoch_{args.test_epoch}")
mkdir_if_not_exists(vis_dir)
#
counts = np.zeros(cfg["n_output"], dtype=int)
l2_dist_sum = np.zeros(cfg["n_output"], dtype=float)
obj_coll_sum = np.zeros(cfg["n_output"], dtype=int)
obj_box_coll_sum = np.zeros(cfg["n_output"], dtype=int)
#
obj_box_dir = f"{cfg['nusc_root']}/obj_boxes/{cfg['nusc_version']}"
#
np.set_printoptions(suppress=True, precision=2)
num_batch = len(data_loader)
for i, batch in enumerate(data_loader):
sample_data_tokens = batch["sample_data_tokens"]
bs = len(sample_data_tokens)
if bs < device_count:
print(f"Dropping the last batch of size {bs}")
continue
with torch.set_grad_enabled(False):
results = model(batch, "test")
best_plans = results["best_plans"].detach().cpu().numpy()
sampled_plans = batch["sampled_trajectories"].detach().cpu().numpy()
gt_plans = batch["gt_trajectories"].detach().cpu().numpy()
plot_on = args.plot_on and (i % args.plot_every == 0)
cache_on = args.cache_on and (i % args.cache_every == 0)
if (cache_on or plot_on) and "cost" in results:
costs = results["cost"].detach().cpu().numpy()
else:
costs = None
for j, sample_data_token in enumerate(sample_data_tokens):
# visualization:
# - highlight the low cost regions (sub-zero)
# - distinguish cost maps from different timestamps
if plot_on:
# tt = [2, 4, 6]
tt = list(range(_n_output))
if costs is not None:
cost =
|
np.concatenate(costs[j, tt], axis=-1)
|
numpy.concatenate
|
import numpy as np
from modules.active_learning import Pool, DataPool, UnlabeledPool, LabeledPool
class TestPool:
def test_has_unlabeled(self):
test_inputs = np.random.randn(50, 28, 28, 1)
new_pool = Pool(test_inputs)
assert new_pool.has_unlabeled() and not new_pool.has_labeled()
def test_has_labeled(self):
test_inputs = np.random.randn(50, 28, 28, 1)
new_pool = Pool(test_inputs)
new_pool.annotate([0], 1)
inputs, targets = new_pool.get_labeled_data()
assert new_pool.has_labeled() and targets == np.array([1])
def test_annotate(self):
test_inputs = np.random.randn(50, 28, 28, 1)
new_pool = Pool(test_inputs)
indices = [0, 2, 5, 12]
new_pool.annotate(indices, [1, 0, 1, 1])
inputs, targets = new_pool.get_labeled_data()
assert len(inputs) == len(indices)
def test_annotate_pseudo(self):
test_inputs = np.random.randn(50)
test_targets = np.random.choice([0, 1, 2], 50)
new_pool = Pool(test_inputs,test_targets)
indices = np.array([0, 2, 5, 12])
new_pool.annotate(indices)
inputs, targets = new_pool.get_labeled_data()
assert np.all(test_targets[indices] == targets)
def test_annotate_shortcut(self):
test_inputs = np.random.randn(50)
new_pool = Pool(test_inputs)
indices = [0, 2, 5, 12]
targets = [2, 5, 1, 0]
new_pool[indices] = targets
inputs, targets = new_pool.get_labeled_data()
assert len(inputs) == len(indices)
def test_get_by(self):
test_inputs = np.array([0, 2, 5, 12])
new_pool = Pool(test_inputs)
indices = [0, 1]
values = new_pool.get_inputs_by(indices)
true_values = test_inputs[np.array(indices)]
assert np.all(values == true_values)
def test_get_length_unlabeled(self):
test_inputs = np.random.randn(50)
new_pool = Pool(test_inputs)
assert new_pool.get_length_labeled() == 0
new_pool[1] = 0
assert new_pool.get_length_labeled() == 1
new_pool[[2, 5]] = [1, 0]
assert new_pool.get_length_labeled() == 3
def test_get_length_labeled(self):
test_inputs = np.random.randn(50)
new_pool = Pool(test_inputs)
assert new_pool.get_length_unlabeled() == len(test_inputs)
new_pool[0] = 1
assert new_pool.get_length_unlabeled() != len(test_inputs)
def test_get_labeld_data(self):
test_inputs =
|
np.random.randn(50)
|
numpy.random.randn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.