prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import ctypes
import numpy as np
import pytest
from psyneulink.core import llvm as pnlvm
from llvmlite import ir
ITERATIONS=100
DIM_X=1000
matrix = np.random.rand(DIM_X, DIM_X)
vector = | np.random.rand(DIM_X) | numpy.random.rand |
"""
Functionality for reading ICEYE complex data into a SICD model.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
import logging
import os
import numpy
from numpy.polynomial import polynomial
from scipy.constants import speed_of_light
from sarpy.io.complex.nisar import _stringify
from sarpy.compliance import string_types, int_func
from sarpy.io.complex.base import SICDTypeReader, h5py, is_hdf5
from sarpy.io.complex.sicd_elements.blocks import Poly2DType, Poly1DType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, \
RadarModeType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, \
ChanParametersType, WaveformParametersType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.Position import PositionType, XYZPolyType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, \
RcvChanProcType
from sarpy.io.complex.sicd_elements.RMA import RMAType, INCAType
from sarpy.io.complex.sicd_elements.Radiometric import RadiometricType
from sarpy.io.general.base import BaseReader, BaseChipper, SarpyIOError
from sarpy.io.general.utils import get_seconds, parse_timestring, is_file_like
from sarpy.io.complex.utils import fit_position_xvalidation, two_dim_poly_fit
logger = logging.getLogger(__name__)
########
# base expected functionality for a module with an implemented Reader
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a ICEYE file. Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
CSKReader|None
`CSKReader` instance if Cosmo Skymed file, `None` otherwise
"""
if is_file_like(file_name):
return None
if not is_hdf5(file_name):
return None
if h5py is None:
return None
try:
iceye_details = ICEYEDetails(file_name)
logger.info('File {} is determined to be a ICEYE file.'.format(file_name))
return ICEYEReader(iceye_details)
except SarpyIOError:
return None
def _parse_time(input):
"""
Parse the timestring.
Parameters
----------
input : bytes|str
Returns
-------
numpy.datetime64
"""
return parse_timestring(_stringify(input), precision='us')
class ICEYEDetails(object):
"""
Parses and converts the ICEYE metadata.
"""
__slots__ = ('_file_name', )
def __init__(self, file_name):
"""
Parameters
----------
file_name : str
"""
if h5py is None:
raise ImportError("Can't read ICEYE files, because the h5py dependency is missing.")
if not os.path.isfile(file_name):
raise SarpyIOError('Path {} is not a file'.format(file_name))
with h5py.File(file_name, 'r') as hf:
if 's_q' not in hf or 's_i' not in hf:
raise SarpyIOError(
'The hdf file does not have the real (s_q) or imaginary dataset (s_i).')
if 'satellite_name' not in hf:
raise SarpyIOError('The hdf file does not have the satellite_name dataset.')
if 'product_name' not in hf:
raise SarpyIOError('The hdf file does not have the product_name dataset.')
self._file_name = file_name
@property
def file_name(self):
"""
str: the file name
"""
return self._file_name
def get_sicd(self):
"""
Gets the SICD structure.
Returns
-------
Tuple[SICDType, tuple, tuple]
The sicd structure, the data size argument, and the symmetry argument.
"""
def get_collection_info():
# type: () -> CollectionInfoType
return CollectionInfoType(
CollectorName=_stringify(hf['satellite_name'][()]),
CoreName=_stringify(hf['product_name'][()]),
CollectType='MONOSTATIC',
Classification='UNCLASSIFIED',
RadarMode=RadarModeType(
ModeType=_stringify(hf['acquisition_mode'][()]).upper(),
ModeID=_stringify(hf['product_type'][()])))
def get_image_creation():
# type: () -> ImageCreationType
from sarpy.__about__ import __version__
return ImageCreationType(
Application='ICEYE_P_{}'.format(hf['processor_version'][()]),
DateTime=_parse_time(hf['processing_time'][()]),
Site='Unknown',
Profile='sarpy {}'.format(__version__))
def get_image_data():
# type: () -> ImageDataType
samp_prec = _stringify(hf['sample_precision'][()])
if samp_prec.upper() == 'INT16':
pixel_type = 'RE16I_IM16I'
elif samp_prec.upper() == 'FLOAT32':
pixel_type = 'RE32F_IM32F'
else:
raise ValueError('Got unhandled sample precision {}'.format(samp_prec))
num_rows = int_func(number_of_range_samples)
num_cols = int_func(number_of_azimuth_samples)
scp_row = int_func(coord_center[0]) - 1
scp_col = int_func(coord_center[1]) - 1
if 0 < scp_col < num_rows-1:
if look_side == 'left':
scp_col = num_cols - scp_col - 1
else:
# early ICEYE processing bug led to nonsensical SCP
scp_col = int_func(num_cols/2.0)
return ImageDataType(
PixelType=pixel_type,
NumRows=num_rows,
NumCols=num_cols,
FirstRow=0,
FirstCol=0,
FullImage=(num_rows, num_cols),
SCPPixel=(scp_row, scp_col))
def get_geo_data():
# type: () -> GeoDataType
# NB: the remainder will be derived.
return GeoDataType(
SCP=SCPType(
LLH=[coord_center[2], coord_center[3], avg_scene_height]))
def get_timeline():
# type: () -> TimelineType
acq_prf = hf['acquisition_prf'][()]
return TimelineType(
CollectStart=start_time,
CollectDuration=duration,
IPP=[IPPSetType(index=0, TStart=0, TEnd=duration,
IPPStart=0, IPPEnd=int_func(round(acq_prf*duration)),
IPPPoly=[0, acq_prf]), ])
def get_position():
# type: () -> PositionType
# fetch the state information
times_str = hf['state_vector_time_utc'][:, 0]
times = numpy.zeros((times_str.shape[0], ), dtype='float64')
positions = numpy.zeros((times.size, 3), dtype='float64')
velocities = numpy.zeros((times.size, 3), dtype='float64')
for i, entry in enumerate(times_str):
times[i] = get_seconds(_parse_time(entry), start_time, precision='us')
positions[:, 0], positions[:, 1], positions[:, 2] = hf['posX'][:], hf['posY'][:], hf['posZ'][:]
velocities[:, 0], velocities[:, 1], velocities[:, 2] = hf['velX'][:], hf['velY'][:], hf['velZ'][:]
# fir the the position polynomial using cross validation
P_x, P_y, P_z = fit_position_xvalidation(times, positions, velocities, max_degree=8)
return PositionType(ARPPoly=XYZPolyType(X=P_x, Y=P_y, Z=P_z))
def get_radar_collection():
# type : () -> RadarCollection
return RadarCollectionType(
TxPolarization=tx_pol,
TxFrequency=(min_freq, max_freq),
Waveform=[WaveformParametersType(TxFreqStart=min_freq,
TxRFBandwidth=tx_bandwidth,
TxPulseLength=hf['chirp_duration'][()],
ADCSampleRate=hf['range_sampling_rate'][()],
RcvDemodType='CHIRP',
RcvFMRate=0,
index=1)],
RcvChannels=[ChanParametersType(TxRcvPolarization=polarization,
index=1)])
def get_image_formation():
# type: () -> ImageFormationType
return ImageFormationType(
TxRcvPolarizationProc=polarization,
ImageFormAlgo='RMA',
TStartProc=0,
TEndProc=duration,
TxFrequencyProc=(min_freq, max_freq),
STBeamComp='NO',
ImageBeamComp='SV',
AzAutofocus='NO',
RgAutofocus='NO',
RcvChanProc=RcvChanProcType(NumChanProc=1, PRFScaleFactor=1, ChanIndices=[1, ]),)
def get_radiometric():
# type: () -> RadiometricType
return RadiometricType(BetaZeroSFPoly=[[float(hf['calibration_factor'][()]), ],])
def calculate_drate_sf_poly():
r_ca_coeffs = numpy.array([r_ca_scp, 1], dtype='float64')
dop_rate_coeffs = hf['doppler_rate_coeffs'][:]
# Prior to ICEYE 1.14 processor, absolute value of Doppler rate was
# provided, not true Doppler rate. Doppler rate should always be negative
if dop_rate_coeffs[0] > 0:
dop_rate_coeffs *= -1
dop_rate_poly = Poly1DType(Coefs=dop_rate_coeffs)
# now adjust to create
t_drate_ca_poly = dop_rate_poly.shift(
t_0=zd_ref_time - rg_time_scp,
alpha=2/speed_of_light, return_poly=False)
return t_drate_ca_poly, -polynomial.polymul(t_drate_ca_poly, r_ca_coeffs)*speed_of_light/(2*center_freq*vm_ca_sq)
def calculate_doppler_polys():
# extract doppler centroid coefficients
dc_estimate_coeffs = hf['dc_estimate_coeffs'][:]
dc_time_str = hf['dc_estimate_time_utc'][:, 0]
dc_zd_times = numpy.zeros((dc_time_str.shape[0], ), dtype='float64')
for i, entry in enumerate(dc_time_str):
dc_zd_times[i] = get_seconds(_parse_time(entry), start_time, precision='us')
# create a sampled doppler centroid
samples = 49 # copied from corresponding matlab, we just need enough for appropriate refitting
# create doppler time samples
diff_time_rg = first_pixel_time - zd_ref_time + \
| numpy.linspace(0, number_of_range_samples/range_sampling_rate, samples) | numpy.linspace |
#!/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
import time
import tqdm
from datetime import datetime
from .mpile import get_par
def mcmc(
self,
p0=None,
nsteps=3000,
nwalks=None,
tune=None,
moves=None,
temp=False,
seed=None,
backend=True,
suffix=None,
linear=None,
resume=False,
append=False,
update_freq=None,
lprob_seed=None,
report=None,
maintenance_interval=10,
verbose=False,
debug=False,
**samplerargs
):
"""Run the emcee ensemble MCMC sampler.
Parameters:
----------
p0 : ndarray of initial states of the walkers in the parameterspace
moves : emcee.moves object
lprob_seed : lprob_seed must be one of ('vec', 'rand', 'set').
"""
import emcee
from grgrlib.multiprocessing import serializer
if not hasattr(self, "ndim"):
# if it seems to be missing, lets do it.
# but without guarantee...
self.prep_estim(load_R=True)
if seed is None:
seed = self.fdict["seed"]
self.tune = tune
if tune is None:
self.tune = int(nsteps * 1 / 5.0)
if update_freq is None:
update_freq = int(nsteps / 5.0)
if linear is None:
linear = self.filter.name == "KalmanFilter"
if "description" in self.fdict.keys():
self.description = self.fdict["description"]
if hasattr(self, "pool"):
from .estimation import create_pool
create_pool(self)
lprob_global = serializer(self.lprob)
if isinstance(temp, bool) and not temp:
temp = 1
def lprob(par):
return lprob_global(
par,
linear=linear,
verbose=verbose,
temp=temp,
lprob_seed=lprob_seed or "set",
)
bnd = np.array(self.fdict["prior_bounds"])
if self.pool:
self.pool.clear()
if p0 is None and not resume:
if temp < 1:
p0 = get_par(
self,
"prior_mean",
asdict=False,
full=False,
nsample=nwalks,
verbose=verbose,
)
else:
p0 = get_par(
self, "best", asdict=False, full=False, nsample=nwalks, verbose=verbose
)
elif not resume:
nwalks = p0.shape[0]
if backend:
if isinstance(backend, str):
# backend_file will only be loaded later if explicitely defined before
self.fdict["backend_file"] = backend
try:
backend = self.fdict["backend_file"]
except KeyError:
# this is the default case
suffix = str(suffix) if suffix else "_sampler.h5"
backend = os.path.join(self.path, self.name + suffix)
if os.path.exists(backend) and not (resume or append):
print(
"[mcmc:]".ljust(15, " ")
+ " HDF backend at %s already exists. Deleting..." % backend
)
os.remove(backend)
backend = emcee.backends.HDFBackend(backend)
if not (resume or append):
if not nwalks:
raise TypeError(
"If neither `resume`, `append` or `p0` is given I need to know the number of walkers (`nwalks`)."
)
try:
backend.reset(nwalks, self.ndim)
except KeyError as e:
raise KeyError(str(e) + ". Your `*.h5` file is likely to be damaged...")
else:
backend = None
if resume:
nwalks = backend.get_chain().shape[1]
if debug:
sampler = emcee.EnsembleSampler(nwalks, self.ndim, lprob)
else:
sampler = emcee.EnsembleSampler(
nwalks, self.ndim, lprob, moves=moves, pool=self.pool, backend=backend
)
if resume and not p0:
p0 = sampler.get_last_sample()
self.sampler = sampler
self.temp = temp
if not verbose:
| np.warnings.filterwarnings("ignore") | numpy.warnings.filterwarnings |
import numpy as np
from experiments.GMM import GMM
from scipy.stats import multivariate_normal as normal_pdf
import os
file_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.abspath(os.path.join(file_path, os.pardir, os.pardir, os.pardir)) + "/data/"
### Gaussian Mixture Model experiment
def build_GMM_lnpdf(num_dimensions, num_true_components, prior_variance=1e3):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
target_mixture = GMM(num_dimensions)
for i in range(0, num_true_components):
this_cov = 0.1 * np.random.normal(0, num_dimensions, (num_dimensions * num_dimensions)).reshape(
(num_dimensions, num_dimensions))
this_cov = this_cov.transpose().dot(this_cov)
this_cov += 1 * np.eye(num_dimensions)
this_mean = 100 * (np.random.random(num_dimensions) - 0.5)
target_mixture.add_component(this_mean, this_cov)
target_mixture.set_weights(np.ones(num_true_components) / num_true_components)
def target_lnpdf(theta, without_prior=False):
target_lnpdf.counter += 1
if without_prior:
return np.squeeze(target_mixture.evaluate(theta, return_log=True) - prior.logpdf(theta))
else:
return np.squeeze(target_mixture.evaluate(theta, return_log=True))
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol, target_mixture]
def build_GMM_lnpdf_autograd(num_dimensions, num_true_components):
import autograd.scipy.stats.multivariate_normal as normal_auto
from autograd.scipy.misc import logsumexp
import autograd.numpy as np
means = np.empty((num_true_components, num_dimensions))
covs = np.empty((num_true_components, num_dimensions, num_dimensions))
for i in range(0, num_true_components):
covs[i] = 0.1 * np.random.normal(0, num_dimensions, (num_dimensions * num_dimensions)).reshape(
(num_dimensions, num_dimensions))
covs[i] = covs[i].transpose().dot(covs[i])
covs[i] += 1 * np.eye(num_dimensions)
means[i] = 100 * (np.random.random(num_dimensions) - 0.5)
def target_lnpdf(theta):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
cluster_lls = []
for i in range(0, num_true_components):
cluster_lls.append(np.log(1./num_true_components) + normal_auto.logpdf(theta, means[i], covs[i]))
return np.squeeze(logsumexp(np.vstack(cluster_lls), axis=0))
target_lnpdf.counter = 0
return [target_lnpdf, means, covs]
### Planar-N-Link experiment
def build_target_likelihood_planar_n_link(num_dimensions, prior_variance, likelihood_variance):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
likelihood = normal_pdf([0.7 * num_dimensions, 0], likelihood_variance * np.eye(2))
l = np.ones(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i+1],1))
x += l[i] * np.cos(np.sum(theta[:,:i+1],1))
if without_prior:
return np.squeeze(likelihood.logpdf(np.vstack((x,y)).transpose()))
else:
return np.squeeze(prior.logpdf(theta) + likelihood.logpdf(np.vstack((x,y)).transpose()))
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol]
def build_target_likelihood_planar_autograd(num_dimensions):
from autograd.scipy.stats import multivariate_normal as normal_auto
import autograd.numpy as np
conf_likelihood_var = 4e-2 * np.ones(num_dimensions)
conf_likelihood_var[0] = 1
cart_likelihood_var = np.array([1e-4, 1e-4])
prior_mean = np.zeros(num_dimensions)
prior_cov = conf_likelihood_var * np.eye(num_dimensions)
likelihood_mean = [0.7 * num_dimensions, 0]
likelihood_cov = cart_likelihood_var * np.eye(2)
l = np.ones(num_dimensions)
def target_lnpdf(theta):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i + 1],1))
x += l[i] * np.cos(np.sum(theta[:,:i + 1],1))
return normal_auto.logpdf(theta, prior_mean, prior_cov) + normal_auto.logpdf(np.vstack([x, y]).transpose(),
likelihood_mean, likelihood_cov)
target_lnpdf.counter = 0
return [target_lnpdf, num_dimensions, None]
### Logistic regression experiments
def build_logist_regression_autograd(X, y, prior_variance):
import autograd.numpy as np
import autograd.scipy.stats.multivariate_normal as normal_auto
num_dimensions = X.shape[1]
prior_mean = np.zeros(num_dimensions)
prior_cov = prior_variance * np.eye(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
weighted_sum = np.dot(theta, X.transpose())
offset = np.maximum(weighted_sum, np.zeros(weighted_sum.shape))
denominator = offset + np.log(np.exp(weighted_sum - offset) + np.exp(-offset))
log_prediction = -denominator
swapped_y = -(y - 1)
log_prediction = log_prediction + swapped_y[np.newaxis, :] * (weighted_sum)
#log_prediction[np.where(np.isinf(log_prediction))] = 0
if (np.any(np.isnan(log_prediction)) or np.any(np.isinf(log_prediction))):
print('nan')
loglikelihood = np.sum(log_prediction,1)
if without_prior:
return np.squeeze(loglikelihood)
else:
return np.squeeze(normal_auto.logpdf(theta, prior_mean, prior_cov) + loglikelihood)
target_lnpdf.counter = 0
return target_lnpdf
def build_logist_regression(X, y, prior_variance):
import numpy as anp
num_dimensions = X.shape[1]
prior = normal_pdf(anp.zeros(num_dimensions), prior_variance * anp.eye(num_dimensions))
prior_chol = anp.sqrt(prior_variance) * anp.eye(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = | anp.atleast_2d(theta) | numpy.atleast_2d |
import numpy as np
from largescale.src.support.common import CommonConfig
from largescale.src.neuron import V1DirectNeuronGroup, T_EXC, T_INH
from largescale.src.support.geometry import gen_coordinates
from largescale.src.support.geometry.gabor import make_gabor
import largescale.src.support.cl_support as clspt
class PinwheelNetwork:
def __init__(self, config = CommonConfig()):
hypercolumn_size = config.fetch("hypercolumn_size", 48)
if not isinstance(hypercolumn_size, tuple): hypercolumn_size = (hypercolumn_size, hypercolumn_size)
hypercolumn_narrow_size = min(hypercolumn_size)
grid_size = config.fetch("grid_size", (6,3))
cluster_num = config.fetch("cluster_num", 12)
exc_ratio = config.fetch("exc_ratio", 0.75)
connectivity_ratio = config.fetch("connectivity_ratio", 0.1)
connectivity_num = config.fetch("connectivity_num", 6)
connectivity_size = config.fetch("connectivity_size", (hypercolumn_size[0]*2, hypercolumn_size[1]*2))
connectivity_scale = config.fetch("connectivity_scale", hypercolumn_narrow_size)
connectivity_peak = config.fetch("connectivity_peak", 1)
connectivity_ratio_lr = config.fetch("connectivity_ratio_lr", 0.1)
connectivity_num_lr = config.fetch("connectivity_num_lr", 6)
connectivity_size_lr = config.fetch("connectivity_size_lr", (hypercolumn_size[0]*2, hypercolumn_size[1]*2))
connectivity_scale_lr = config.fetch("connectivity_scale_lr", hypercolumn_narrow_size)
connectivity_peak_lr = config.fetch("connectivity_peak_lr", 1)
gabor_size = config.fetch("gabor_size", (hypercolumn_size[0]*2, hypercolumn_size[1]*2))
gabor_scale = config.fetch("gabor_scale", hypercolumn_narrow_size * 0.2)
gabor_period = config.fetch("gabor_period", hypercolumn_narrow_size * 0.2)
gabor_peak = config.fetch("gabor_peak", 1.0)
delta_orientation = 360.0 / cluster_num
orientations = [i*delta_orientation for i in xrange(cluster_num)]
gabor_kernels = [make_gabor(size=gabor_size, orientation=o, scale=gabor_scale, period=gabor_period, peak=gabor_peak) for o in orientations]
(coory, coorx) = gen_coordinates(hypercolumn_size, center_zero=True)
prefer_cluster_lt = np.arctan(coory/coorx).astype(np.float32) / np.pi * 180.0 + 90.0
prefer_cluster_lt[coorx<0] += 180.0
prefer_cluster_rt = | np.fliplr(prefer_cluster_lt) | numpy.fliplr |
from sklearn.metrics import mean_squared_error, log_loss
from keras.models import Model
from keras.models import load_model
from keras.layers import Input, Dense
from keras.layers.recurrent import SimpleRNN
from keras.layers.merge import multiply, concatenate, add
from keras import backend as K
from keras import initializers
from keras.callbacks import EarlyStopping
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import Callback
from keras import optimizers
import pandas as pd
import numpy as np
from keras.constraints import max_norm, non_neg, unit_norm
np.random.seed(42)
from math import sqrt
import os
import sys
from collections import defaultdict
class DeepAFM:
def __init__(self):
pass
def custom_bce(self, y_true, y_pred):
b = K.not_equal(y_true, -K.ones_like(y_true))
b = K.cast(b, dtype='float32')
ans = K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1) * K.mean(b, axis=-1)
ans = K.cast(ans, dtype='float32')
return K.sum(ans)
def custom_activation(self, x):
if self.activation.split('-')[0] == "custom":
a = float(self.activation.split('-')[1])
return 1.0 / ( 1 + K.exp(-a*x) )
elif self.activation.split('-')[0] == "rounded":
K.minimum(K.maximum(K.round(K.sigmoid(x)), 0), 1)
def custom_init(self, shape, dtype=None):
return K.cast_to_floatx(self.Q_jk_initialize)
def custom_random(self, shape, dtype=None):
if self.random_init == "normal":
return K.random_normal(shape, 0.5, 0.05, dtype=dtype, seed=22)
else:
return K.random_uniform(shape, 0, 1, dtype=dtype, seed=22)
def f(self, x):
def custom_init(shape, dtype=None):
return K.cast_to_floatx(np.reshape(x, shape))
return custom_init
def build(self, dafm_type="dafm-afm", optimizer="rmsprop", learning_rate=0.01, activation="linear", Q_jk_initialize=0, section="", section_count=0, model1="", stateful=False, theta_student="False", student_count=0, binary="False"):
skills = np.shape(Q_jk_initialize)[1]
steps = np.shape(Q_jk_initialize)[0]
self.activation = activation
if '-' in self.activation:
activation = self.custom_activation
if dafm_type.split("_")[-1] == "different":
skills = int( float(dafm_type.split("_")[-2])*skills )
dafm_type = dafm_type.split('_')[0]
if dafm_type.split("_")[0] == "round-fine-tuned":
try:
self.round_threshold = float(dafm_type.split("_")[-1])
dafm_type = dafm_type.split("_")[0]
except:
pass
q_jk_size = skills
if '^' in dafm_type:
q_jk_size = skills
skills = int (float(dafm_type.split('^')[-1]) * skills)
dafm_type = dafm_type.split('^')[0]
self.dafm_type = dafm_type
if dafm_type == "random-uniform" or dafm_type == "random-normal":
qtrainable, finetuning, randomize = True, False, True
self.random_init = dafm_type.split('-')[-1]
elif dafm_type == "dafm-afm":
qtrainable, finetuning, randomize = False, False, False
elif dafm_type == "fine-tuned":
qtrainable, finetuning, randomize = True, True, False
elif dafm_type == "kcinitialize":
qtrainable, finetuning, randomize = True, False, False
elif dafm_type== "round-fine-tuned":
# if not self.round_threshold == -1:
# rounded_Qjk = np.abs(Q_jk1 - Q_jk_initialize)
# Q_jk1[rounded_Qjk <= self.round_threshold] = Q_jk_initialize[rounded_Qjk <= self.round_threshold]
# Q_jk1[rounded_Qjk > self.round_threshold] = np.ones(np.shape(Q_jk_initialize[rounded_Qjk > self.round_threshold])) - Q_jk_initialize[rounded_Qjk > self.round_threshold]
# else:
Q_jk1 = model1.get_layer("Q_jk").get_weights()[0]
Q_jk1 = np.minimum(np.ones(np.shape(Q_jk1)), np.maximum(np.round(Q_jk1), np.zeros(np.shape(Q_jk1))))
model1.get_layer("Q_jk").set_weights([Q_jk1])
return model1
elif dafm_type == "qjk-dense":
qtrainable, finetuning, randomize = False, False, False
activation_dense = activation
elif dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
qtrainable, finetuning, randomize = False, False, True
self.random_init = dafm_type.split('-')[-1]
activation_dense = activation
else:
print ("No Valid Model Found")
sys.exit()
if section == "onehot":
section_input = Input(batch_shape=(None, None, section_count), name='section_input')
if not theta_student=="False":
student_input = Input(batch_shape=(None, None, student_count), name='student_input')
virtual_input1 = Input(batch_shape=(None, None, 1), name='virtual_input1')
if finetuning:
B_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("B_k").get_weights()[0]), use_bias=False), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("T_k").get_weights()[0]), use_bias=False), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=self.f(model1.get_layer("bias").get_weights()[0]), trainable=True), name="bias")(virtual_input1)
else:
B_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=initializers.Zeros(), trainable=True), name="bias")(virtual_input1)
step_input = Input(batch_shape=(None, None, steps), name='step_input')
if randomize:
if binary=="False":
Q_jk = TimeDistributed(Dense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random), trainable=qtrainable ,name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random),trainable=qtrainable, name="Q_jk")(step_input)
else:
if binary=="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize), use_bias=False,trainable=qtrainable), trainable=qtrainable, name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize),trainable=qtrainable,
use_bias=False), name="Q_jk", trainable=qtrainable)(step_input)
if dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
if binary =="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
elif dafm_type == "qjk-dense":
if binary =='False':
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
pass
Qjk_mul_Bk = multiply([Q_jk, B_k])
sum_Qjk_Bk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False,name="sum_Qjk_Bk")(Qjk_mul_Bk)
P_k = SimpleRNN(skills, kernel_initializer=initializers.Identity(), recurrent_initializer=initializers.Identity() , use_bias=False, trainable=False, activation='linear', return_sequences=True, name="P_k")(Q_jk)
Qjk_mul_Pk_mul_Tk = multiply([Q_jk, P_k, T_k])
sum_Qjk_Pk_Tk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False),trainable=False, name="sum_Qjk_Pk_Tk")(Qjk_mul_Pk_mul_Tk)
Concatenate = concatenate([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])
if not (theta_student=="False"):
if finetuning:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("theta").get_weights()[0])), name='theta')(student_input)
else:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='theta')(student_input)
Concatenate = concatenate([Concatenate, theta])
if section == "onehot":
if finetuning:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("S_k").get_weights()[0])), name='S_k')(section_input)
else:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='S_k')(section_input)
Concatenate = concatenate([Concatenate, S_k])
output = TimeDistributed(Dense(1, activation="sigmoid", trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False, name="output")(Concatenate)
if section == "onehot" and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, section_input, student_input], outputs=output)
elif section == "onehot" and theta_student=="False":
model = Model(inputs=[virtual_input1, step_input, section_input], outputs=output)
elif not (section == "onehot") and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, student_input], outputs=output)
else:
model = Model(inputs=[virtual_input1, step_input], outputs=output)
d_optimizer = {"rmsprop":optimizers.RMSprop(lr=learning_rate), "adam":optimizers.Adam(lr=learning_rate), "adagrad":optimizers.Adagrad(lr=learning_rate) }
model.compile( optimizer = d_optimizer[optimizer],
loss = self.custom_bce)
return model
def fit(self, x_train, y_train, x_train_section, x_train_student, x_test, y_test, x_test_section, x_test_student, model, epochs=5, batch_size=32, loaded=False, validation=True):
loss_epoch = {"epoch":[], "loss":[], "val_loss":[], 'patience':[]}
print ("Max Epochs", epochs)
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
patience, epoch = 0 , 1
prev_best_val_loss = np.inf
counter = 0
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
virtual_input1_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if not validation:
earlyStopping = EarlyStopping(monitor='loss', patience=2)
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=epochs , callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
# print ("Epoch Number:", counter, "Patience:", 0, "val loss:", current_val_loss)
loss_epoch["loss"].extend(history_callback.history["loss"])
loss_epoch["val_loss"].extend(history_callback.history["loss"])
loss_epoch["epoch"].extend(list(range(epochs)))
loss_epoch["patience"].extend(list(range(epochs)))
best_model = model
epoch = epochs
else:
while (patience <=5 and epoch <= epochs and (not self.dafm_type == "round-fine-tuned") and (loaded == False)):
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
counter += 1
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section], y_test), verbose=0, shuffle=True)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_student], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section, x_test_student], y_test), verbose=0, shuffle=True)
current_val_loss = history_callback.history["val_loss"][0]
print ("Epoch Number:", counter, "Patience:", patience, "val loss:", current_val_loss)
loss_epoch["val_loss"].append(history_callback.history["val_loss"][0])
loss_epoch["loss"].append(history_callback.history["loss"][0])
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
if (prev_best_val_loss - current_val_loss) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_val_loss
else:
patience += 1
if len(x_train_student)==0:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section]), x_train)
else:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_student]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
print ("PARAM", model_param)
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
B_k = best_model.get_layer("B_k").get_weights()[0]
T_k = best_model.get_layer("T_k").get_weights()[0]
return best_model, AIC, BIC, epoch, loss_epoch
def fit_batches(self, dafmdata_obj, model, max_epochs=30, earlyStop="val_loss", loaded=False):
print ("Max Epochs", max_epochs)
loss_epoch = {"epoch":[], "loss":[], earlyStop:[], 'patience':[]}
patience, epoch = 0, 1
prev_best_val_loss = np.inf
counter = 0
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
while (patience <= 2 and epoch <= max_epochs and loaded==False and (not self.dafm_type == "round-fine-tuned")):
counter += 1
current_val_loss = 0
total_loss, total_train_samples = 0, 0
train = dafmdata_obj.data_generator1("train")
test = dafmdata_obj.data_generator1("test")
bc = 0
for x_train, y_train, x_train_section, x_train_student, batch_size in train:
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
print ("Batch Number:", bc, np.shape(x_train))
if len(x_train_student)==0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, verbose=1)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=1)
total_loss += history_callback.history["loss"][0] * len(x_train)
total_train_samples += len(x_train)
bc += 1
if earlyStop == "rmse":
current_avg_rmse = self.predict_batches(dafmdata_obj, model)
loss_epoch["rmse"].append(current_avg_rmse)
else:
current_avg_rmse = np.mean(self.bce_loss_batches(dafmdata_obj, model, utype="test"))
loss_epoch["val_loss"].append(current_avg_rmse)
loss_epoch["loss"].append(float(total_loss)/float(total_train_samples))
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
print ("Epoch Number:", counter, "Patience:", patience, earlyStop, current_avg_rmse, "Loss:", loss_epoch["loss"][-1])
if (prev_best_val_loss - current_avg_rmse) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_avg_rmse
else:
patience += 1
x = self.bce_loss_batches(dafmdata_obj, best_model, utype="train")
L, N = - | np.sum(x) | numpy.sum |
# Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.trajectories.trajectory import Trajectory
from tf_agents.utils.tensor_normalizer import StreamingTensorNormalizer
from bellman.agents.trpo.trpo_agent import compute_return_and_advantage
from tests.tools.bellman.agents.trpo.trpo_agent import (
create_trpo_agent_factory,
dummy_trajectory_batch,
)
@pytest.fixture(name="trajectory_batch")
def _trajectory(batch_size=2, n_steps=5, obs_dim=2):
return dummy_trajectory_batch(batch_size, n_steps, obs_dim)
@pytest.fixture(name="time_step_batch")
def _time_step_batch(trajectory_batch):
return trajectory.to_transition(trajectory_batch)[-1]
@pytest.fixture(name="create_trpo_agent")
def _create_trpo_agent_fixture():
return create_trpo_agent_factory()
def _compute_gae(rewards, values, gamma, lambda_):
""" generalised advantage computation"""
deltas = rewards + gamma * values[:, 1:] - values[:, :-1]
coeff = lambda_ * gamma
result = np.zeros_like(rewards)
accumulator = 0
for delta_idx in reversed(range(deltas.shape[-1])):
accumulator = deltas[:, delta_idx] + coeff * accumulator
result[:, delta_idx] = accumulator
return result
def _normalise(array, eps=1e-8):
"""mean / std normalisation"""
return (array - array.mean(keepdims=True)) / (array.std(keepdims=True) + eps)
def test_policy(create_trpo_agent):
""" Test policy returns correct action shapes"""
trpo_agent = create_trpo_agent()
observations = tf.constant([[1, 2]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=1)
action_step = trpo_agent.policy.action(time_steps)
actions = action_step.action
assert tuple(actions.shape.as_list()) == (1, 1)
def test_value_estimation_loss(create_trpo_agent):
""" Test computation of value estimation loss"""
trpo_agent = create_trpo_agent()
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
returns = tf.constant([1.9, 1.0], dtype=tf.float32)
weights = tf.ones_like(returns)
expected_loss = 123.205
loss = trpo_agent.value_estimation_loss(time_steps, returns, weights).numpy()
np.testing.assert_allclose(loss, expected_loss)
def test_compute_return_and_advantage(create_trpo_agent, time_step_batch):
""" Test computation of normalised returns and advantages """
trpo_agent = create_trpo_agent()
values = np.ones(
(time_step_batch.reward.shape[0], time_step_batch.reward.shape[1] + 1),
dtype=np.float32,
)
# manually computed values
expected_gaes = np.array([[1.72262475, 1.48005, 0.99, 0.0]] * 2)
ref_return = np.array([[2.72262475, 2.48005, 1.99, 1.0]] * 2)
ref_gaes = _normalise(expected_gaes)
discount = trpo_agent._discount_factor
lambda_ = trpo_agent._lambda
ret, gae = compute_return_and_advantage(
discount, lambda_, time_step_batch.reward, time_step_batch, values
)
np.testing.assert_array_almost_equal(gae.numpy(), ref_gaes)
np.testing.assert_array_almost_equal(ret.numpy(), ref_return)
def test_policy_gradient_loss(create_trpo_agent):
""" Test computation of value policy gradient loss"""
trpo_agent = create_trpo_agent()
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([[0], [1]], dtype=tf.float32)
sample_action_log_probs = tf.constant([0.9, 0.3], dtype=tf.float32)
advantages = tf.constant([1.9, 1.0], dtype=tf.float32)
weights = tf.ones_like(advantages)
current_policy_distribution, unused_network_state = trpo_agent._actor_net(
time_steps.observation, time_steps.step_type, ()
)
expected_loss = -0.01646461
loss = trpo_agent.policy_gradient_loss(
time_steps,
actions,
sample_action_log_probs,
advantages,
current_policy_distribution,
weights,
).numpy()
np.testing.assert_allclose(loss, expected_loss, rtol=1e-06)
def test_policy_gradient(create_trpo_agent):
""" Test computation of policy gradient"""
trpo_agent = create_trpo_agent()
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([[0], [1]], dtype=tf.float32)
advantages = tf.constant([1.9, 1.0], dtype=tf.float32)
weights = tf.ones_like(advantages)
policy_info = {
"dist_params": {
"loc": tf.constant([[0.0], [0.0]], dtype=tf.float32),
"scale": tf.constant([[1.0], [1.0]], dtype=tf.float32),
}
}
policy_steps = policy_step.PolicyStep(action=actions, state=(), info=policy_info)
# manually computed values for dummy policy
expected_loss = -0.09785123
expected_grads = [
| np.array([[0.01901411, -0.00523423], [0.03126473, -0.008375]], dtype=np.float32) | numpy.array |
import numpy as np
# Import the necessary modules
# Import the necessary modules
from pySOT import SymmetricLatinHypercube, RBFInterpolant, check_opt_prob, CubicKernel, LinearTail, \
CandidateDYCORS, SyncStrategyNoConstraints
# import GridCal modules
from GridCal.Engine.Replacements.poap_controller import SerialController, ThreadController, BasicWorkerThread
from GridCal.Engine.io_structures import OptimalPowerFlowResults
from GridCal.Engine.calculation_engine import MultiCircuit
class AcOPFBlackBox:
def __init__(self, multi_circuit: MultiCircuit, verbose=False):
################################################################################################################
# Compilation
################################################################################################################
self.verbose = verbose
self.multi_circuit = multi_circuit
self.numerical_circuit = self.multi_circuit.compile_snapshot()
self.islands = self.numerical_circuit.compute()
# indices of generators that contribute to the static power vector 'S'
self.gen_s_idx = np.where((np.logical_not(self.numerical_circuit.controlled_gen_dispatchable)
* self.numerical_circuit.controlled_gen_enabled) == True)[0]
self.bat_s_idx = np.where((np.logical_not(self.numerical_circuit.battery_dispatchable)
* self.numerical_circuit.battery_enabled) == True)[0]
# indices of generators that are to be optimized via the solution vector 'x'
self.gen_x_idx = np.where((self.numerical_circuit.controlled_gen_dispatchable
* self.numerical_circuit.controlled_gen_enabled) == True)[0]
self.bat_x_idx = np.where((self.numerical_circuit.battery_dispatchable
* self.numerical_circuit.battery_enabled) == True)[0]
# compute the problem dimension
dim = len(self.gen_x_idx) + len(self.bat_x_idx)
# get the limits of the devices to control
gens = np.array(multi_circuit.get_generators())
bats = np.array(multi_circuit.get_batteries())
gen_x_up = np.array([elm.Pmax for elm in gens[self.gen_x_idx]])
gen_x_low = np.array([elm.Pmin for elm in gens[self.gen_x_idx]])
bat_x_up = np.array([elm.Pmax for elm in bats[self.bat_x_idx]])
bat_x_low = np.array([elm.Pmin for elm in bats[self.bat_x_idx]])
# form S static ################################################################################################
# all the loads apply
self.Sfix = self.numerical_circuit.C_load_bus.T * (
- self.numerical_circuit.load_power / self.numerical_circuit.Sbase * self.numerical_circuit.load_enabled)
# static generators (all apply)
self.Sfix += self.numerical_circuit.C_sta_gen_bus.T * (
self.numerical_circuit.static_gen_power / self.numerical_circuit.Sbase * self.numerical_circuit.static_gen_enabled)
# controlled generators
self.Sfix += (self.numerical_circuit.C_ctrl_gen_bus[self.gen_s_idx, :]).T * (
self.numerical_circuit.controlled_gen_power[self.gen_s_idx] / self.numerical_circuit.Sbase)
# batteries
self.Sfix += (self.numerical_circuit.C_batt_bus[self.bat_s_idx, :]).T * (
self.numerical_circuit.battery_power[self.bat_s_idx] / self.numerical_circuit.Sbase)
# build A_sys per island #######################################################################################
for island in self.islands:
island.build_linear_ac_sys_mat() # builds the A matrix factorization and stores it internally
################################################################################################################
# internal variables for PySOT
################################################################################################################
self.xlow = np.r_[gen_x_low, bat_x_low] / self.multi_circuit.Sbase
self.xup = np.r_[gen_x_up, bat_x_up] / self.multi_circuit.Sbase
self.dim = dim
self.info = str(dim) + "-dimensional OPF problem"
self.min = 0
self.integer = []
self.continuous = np.arange(0, dim)
check_opt_prob(self)
################################################################
def build_solvers(self):
# just present to be compatible
pass
def set_state(self, load_power, static_gen_power, controlled_gen_power,
Emin=None, Emax=None, E=None, dt=0,
force_batteries_to_charge=False, bat_idx=None, battery_loading_pu=0.01):
# all the loads apply
self.Sfix = self.numerical_circuit.C_load_bus.T * (
- load_power / self.numerical_circuit.Sbase * self.numerical_circuit.load_enabled)
# static generators (all apply)
self.Sfix += self.numerical_circuit.C_sta_gen_bus.T * (
static_gen_power / self.numerical_circuit.Sbase * self.numerical_circuit.static_gen_enabled)
# controlled generators
self.Sfix += (self.numerical_circuit.C_ctrl_gen_bus[self.gen_s_idx, :]).T * (
controlled_gen_power / self.numerical_circuit.Sbase)
# batteries
# self.Sfix += (self.numerical_circuit.C_batt_bus[self.bat_s_idx, :]).T * (
# self.numerical_circuit.battery_power_profile[t_idx, self.bat_s_idx] / self.numerical_circuit.Sbase)
def set_default_state(self):
"""
Set the default loading state
"""
self.set_state(load_power=self.numerical_circuit.load_power,
static_gen_power=self.numerical_circuit.static_gen_power,
controlled_gen_power=self.numerical_circuit.controlled_gen_power)
def set_state_at(self, t, force_batteries_to_charge=False, bat_idx=None, battery_loading_pu=0.01,
Emin=None, Emax=None, E=None, dt=0):
"""
Set the problem state at at time index
:param t: time index
"""
self.set_state(load_power=self.numerical_circuit.load_power_profile[t, :],
static_gen_power=self.numerical_circuit.static_gen_power_profile[t, :],
controlled_gen_power=self.numerical_circuit.controlled_gen_power_profile[t, self.gen_s_idx],
Emin=Emin, Emax=Emax, E=E, dt=dt,
force_batteries_to_charge=force_batteries_to_charge,
bat_idx=bat_idx,
battery_loading_pu=battery_loading_pu)
def objfunction(self, x):
"""
Evaluate the Ackley function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
if len(x) != self.dim:
raise ValueError('Dimension mismatch')
# modify S
S = self.Sfix.copy()
ngen = len(self.gen_x_idx)
S += (self.numerical_circuit.C_ctrl_gen_bus[self.gen_x_idx, :]).T * x[0:ngen] # controlled generators
S += (self.numerical_circuit.C_batt_bus[self.bat_x_idx, :]).T * x[ngen:] # batteries
# evaluate
f = 0
for island in self.islands:
npv = len(island.pv)
npq = len(island.pq)
# build the right-hand-side vector
rhs = np.r_[S.real[island.pqpv], S.imag[island.pq]]
# solve the linear system
inc_v = island.Asys(rhs)
# compose the results vector
V = island.Vbus.copy()
# set the PV voltages
va_pv = inc_v[0:npv]
vm_pv = np.abs(island.Vbus[island.pv])
V[island.pv] = vm_pv * np.exp(1j * va_pv)
# set the PQ voltages
va_pq = inc_v[npv:npv + npq]
vm_pq = | np.ones(npq) | numpy.ones |
#!/usr/bin/env python
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import scipy.io
import glob
import os
import csv
import random
import tensorflow as tf
import transition_model_common as tm
# import sys
# sys.path.append('./tensorflow_hmm')
# import tensorflow_hmm.hmm as hmm
def train_model():
dl = tm.DataLoader()
n_examples = dl.num_examples
n_input = dl.feature_len
n_classes = dl.num_labels
# Parameters
learning_rate = 0.01
training_epochs = 5000
batch_size = 100
display_step = 50
tmm = tm.create_model(n_input, n_classes, train=True)
# Define loss and optimizer
# residual_pre = tf.reduce_mean(tf.squared_difference(x_pre, ae_pre_out))
residual_post = tf.reduce_mean(tf.squared_difference(tmm.x_post, tmm.ae_post_out))
# cost_current = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_current, labels=y_current))
cost_next = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tmm.pred_next, labels=tmm.y_next))
regularizer = tf.nn.l2_loss(tmm.pred_weights[0])
for i in range(1, len(tmm.pred_weights)):
regularizer += tf.nn.l2_loss(tmm.pred_weights[i])
# total_loss = 0.01 * (residual_pre + residual_post) + cost_current + cost_next
total_loss = 0.01 * (residual_post) + cost_next + 0.001 * regularizer
# total_loss = cost_next + cost_current
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss)
# Initializing the variables
init = tf.global_variables_initializer()
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter("tensorboard/train", sess.graph)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
x_pre_batch, x_post_batch, y_current_batch, y_next_batch = dl.next_training_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
# feed = {x_pre: x_pre_batch, x_post: x_post_batch, y_current: y_current_batch, y_next: y_next_batch }
feed = {tmm.x_post: x_post_batch,
tmm.y_current: y_current_batch,
tmm.y_next: y_next_batch,
tmm.keep_prob: 0.7}
_, c = sess.run([optimizer, total_loss], feed_dict=feed)
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print('Epoch: {:04d} cost: {:.9f}'.format(epoch, avg_cost))
# print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data, y_next: dl.training_next_action})))
# print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data, y_next: dl.testing_next_action})))
print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data,
tmm.y_current: dl.training_current_action,
tmm.y_next: dl.training_next_action,
tmm.keep_prob: 1.0})))
print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data,
tmm.y_current: dl.testing_current_action,
tmm.y_next: dl.testing_next_action,
tmm.keep_prob: 1.0})))
# print(' train accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.training_pre_data, tmm.x_post: dl.training_post_data, y_current: dl.training_current_action})))
# print(' test accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.testing_pre_data, tmm.x_post: dl.testing_post_data, y_current: dl.testing_current_action})))
test_action_accuracy(accuracy_next, tmm, dl, training=False)
print("Optimization Finished!")
if not os.path.exists('./models/transition'):
os.mkdir('./models/transition')
saver.save(sess, './models/transition/model.ckpt')
writer.close()
def train_mapping():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes, train=True)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/transition/model.ckpt')
rdl = tm.RobotDataLoader(dl, tmm.x_post, tmm.ae_post_enc, tmm.keep_prob)
robot_test_data, human_test_data, y_current_test_data, y_next_test_data = rdl.extract_data_as_arrays(train=False)
n_dim1 = rdl.human_enc_dim
n_dim2 = rdl.robot_dim
# tf Graph input
# x = tf.placeholder('float', [None, n_dim2], name='x_robot_enc')
y_gt = tf.placeholder('float', [None, n_dim1], name='y_human_gt')
# y = create_mapping_model(x, n_dim2, n_dim1, train=True)
x = tmm.x_map_input
y = tmm.y_map_output
# Parameters
learning_rate = 0.001
training_epochs = 10000
batch_size = 100
display_step = 50
total_batch = 20
# Define loss and optimizer
# cost_next = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_next, labels=y_next))
residual = tf.reduce_mean(tf.squared_difference(y, y_gt))
regularizers = tf.nn.l2_loss(tmm.mapping_weights[0])
for i in range(1, len(tmm.mapping_weights)):
regularizers += tf.nn.l2_loss(tmm.mapping_weights[i])
total_loss = residual + 0.001 * regularizers
# total_loss = residual
# total_loss = 0.01 * residual + cost_next
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss, var_list=[ae_post_out, y_current, y_next, x, y_gt, keep_prob])
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss)
new_vars = []
for var in tf.global_variables():
if 'mapping' in var.name or 'beta' in var.name:
new_vars.append(var)
# Initializing the variables
#init = tf.global_variables_initializer()
# init = tf.initialize_variables(new_vars)
init = tf.variables_initializer(new_vars)
# Launch the graph
sess.run(init)
writer = tf.summary.FileWriter("tensorboard/map", sess.graph)
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
num_training = training_epochs * total_batch * batch_size
# robot data projected to human subspace
mapped_robot_data = | np.zeros((num_training, n_dim1), dtype=np.float) | numpy.zeros |
# -*- coding: UTF-8 -*-
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import os
import json
matplotlib.use('Agg')
def reset_style():
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Times New Roman']
rcParams['axes.titlesize'] = 14
rcParams['axes.labelsize'] = 14
rcParams['lines.linewidth'] = 1.5
rcParams['lines.markersize'] = 8
rcParams['xtick.labelsize'] = 14
rcParams['ytick.labelsize'] = 14
rcParams['legend.fontsize'] = 14
reset_style()
def reset_plot(width_in_inches=4.5,
height_in_inches=4.5):
dots_per_inch = 200
plt.close('all')
return plt.figure(
figsize=(width_in_inches, height_in_inches),
dpi=dots_per_inch)
def rand_jitter(arr, scale=0.01):
stdev = scale * (max(arr) - min(arr))
return arr + np.random.randn(len(arr)) * stdev
def heatscatter(x, y):
heatmap, xedges, yedges = np.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
# plt.clf()
reset_plot()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
def heatscatter_sns(x, y, figsize=(8, 8)):
sns.set(rc={'figure.figsize': figsize})
sns.set(style="white", color_codes=True)
sns.jointplot(x=x, y=y, kind='kde', color="skyblue")
def plot_training_history(history, par_dir):
# print(history.history.keys())
reset_plot()
# summarize history for r2
try:
plt.plot(history.history['r_squared'])
plt.plot(history.history['val_r_squared'])
plt.title('model r2')
plt.ylabel('r2')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
# plt.show()
plt.savefig(os.path.join(par_dir, 'r2.png'))
plt.gcf().clear()
except:
pass
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
# plt.show()
plt.savefig(os.path.join(par_dir, 'loss.png'))
plt.gcf().clear()
def plot_controller_performance(controller_hist_file, metrics_dict, save_fn=None, N_sma=10):
'''
Example:
controller_hist_file = 'train_history.csv'
metrics_dict = {'acc': 0, 'loss': 1, 'knowledge': 2}
'''
# plt.clf()
reset_plot()
plt.grid(b=True, linestyle='--', linewidth=0.8)
df = pd.read_csv(controller_hist_file, header=None)
assert df.shape[0] > N_sma
df.columns = ['trial', 'loss_and_metrics', 'reward'] + ['layer_%i' % i for i in range(df.shape[1] - 3)]
# N_sma = 20
plot_idx = []
for metric in metrics_dict:
metric_idx = metrics_dict[metric]
df[metric] = [float(x.strip('\[\]').split(',')[metric_idx]) for x in df['loss_and_metrics']]
df[metric + '_SMA'] = np.concatenate(
[[None] * (N_sma - 1), np.convolve(df[metric], | np.ones((N_sma,)) | numpy.ones |
import gc
import os
import shutil
import signal
import subprocess
import numpy as np
import pandas as pd
import tables
from GenNet_utils.hase.hdgwas.data import MINIMACHDF5Folder
from GenNet_utils.hase.hdgwas.tools import Timer
class Genotype(object):
def __init__(self):
self.file_name = None
self.reader = None
self.probes = None
self.individuals = None
self.genotype = None
self.out = None
class GenotypeHDF5(Genotype):
def __init__(self, name, force=True):
super(GenotypeHDF5, self).__init__()
self.h5_name = '%s.h5' % name
self.file_name = name
self.pytable_filters = tables.Filters(complevel=9, complib='zlib')
self.h5_gen_file = None
self.h5_ind_file = None
self.h5_pr_file = None
self.gen_iter = 0
def write_data(self, type, overwrite=True):
type_dic = {'gen': ['genotype', self.h5_gen_file],
'ind': ['individuals', self.h5_ind_file],
'pr': ['probes', self.h5_pr_file]}
if (not overwrite) and os.path.isfile(os.path.join(self.out, type_dic[type][0], self.h5_name)):
print(('File %s found. Please remove manually.' % self.h5_name))
return
else:
if type == 'pr':
self.h5_pr_file = tables.open_file(os.path.join(self.out, type_dic[type][0], self.h5_name), 'w',
title=self.file_name)
self.h5_pr_file.close() # need to close file before join data
elif type == 'ind':
self.h5_ind_file = tables.open_file(os.path.join(self.out, type_dic[type][0], self.h5_name), 'w',
title=self.file_name)
elif type == 'gen':
self.h5_gen_file = tables.open_file(
os.path.join(self.out, type_dic[type][0], str(self.gen_iter) + '_' + self.h5_name),
'w', title=self.file_name)
self.gen_iter += 1
def close(self):
self.h5_gen_file.close()
self.h5_ind_file.close()
self.h5_pr_file.close()
def summary(self):
raise (NotImplementedError)
class GenotypePLINK(GenotypeHDF5):
def __init__(self, name, reader=None):
super(GenotypePLINK, self).__init__(name)
self.reader = reader
self.split_size = None
def convert_individuals(self):
individuals = self.reader.folder.get_fam()
self.h5_ind_file.create_table(self.h5_ind_file.root, 'individuals', individuals,
title='Individuals', filters=self.pytable_filters)
self.h5_ind_file.root.individuals[:] = individuals
self.individuals = self.h5_ind_file.root.individuals[:]
self.n_ind = len(individuals)
# @profile
def convert_probes(self, chunk_size=100000):
if os.path.isfile(os.path.join(self.out, 'probes', self.h5_name)):
os.remove(os.path.join(self.out, 'probes', self.h5_name))
hash_table = {'keys': np.array([], dtype=np.int), 'allele': np.array([])}
i = 0
chunk = np.array([])
while True:
chunk = self.reader.folder.get_bim(chunk_size)
if isinstance(chunk, type(None)):
break
chunk.columns = ['CHR', 'ID', 'distance', 'bp', 'allele1', 'allele2']
hash_1 = chunk.allele1.apply(hash)
hash_2 = chunk.allele2.apply(hash)
k, indices = np.unique(np.append(hash_1, hash_2), return_index=True)
s = | np.append(chunk.allele1, chunk.allele2) | numpy.append |
import numpy as np
def test_update_mu():
from parameters_update_likelihood_terms import update_mu
mu_old = np.array([1.0, 2.0, 3.0])
alpha_i = 2.0
v_old = np.array([5.0, 4.0, 2.0])
x_i = np.array([2.0, 0.0, 1.0])
expected_result = np.array([21.0, 2.0, 7.0])
np.testing.assert_array_equal(expected_result, update_mu(mu_old, alpha_i, v_old, x_i))
def test_update_nu():
from parameters_update_likelihood_terms import update_nu
nu_old = np.array([1.0, 2.0, 3.0])
alpah_i = 2.0
x_i = np.array([3.0, 1.0, 2.0])
mu_new = np.array([4.0, 2.0, 1.0])
result = update_nu(nu_old, alpah_i, x_i, mu_new)
expected_result = np.array([-25.0 / 2.0, -4.0, -51.0])
np.testing.assert_array_equal(expected_result, result)
def test_update_v_i_nu_old():
from parameters_update_likelihood_terms import update_v_i_nu_old
nu_old = np.array([1.0, 0.5, 0.25])
nu_new = np.array([0.125, 1.0, 0.1])
result = update_v_i_nu_old(nu_new, nu_old)
expected_result = np.array([1.0 / 7.0, -1.0, 1.0 / 6.0])
np.testing.assert_array_equal(result, expected_result)
def test_update_m_i():
from parameters_update_likelihood_terms import update_m_i
mu_old = np.array([1.0, 2.0, 3.0])
alpha_i = 2.0
v_i_new = np.array([2.0, 5.0, 1.0])
x_i = np.array([4.0, 2.0, 3.0])
v_old = np.array([3.0, 1.0, 2.0])
result = update_m_i(mu_old, alpha_i, v_i_new , x_i, v_old)
expected_result = np.array([41.0, 26.0, 21.0])
np.testing.assert_array_equal(result, expected_result)
def test_update_s_i():
from parameters_update_likelihood_terms import update_s_i
from scipy import stats
z = 1
v_i_new = np.array([1.0, 4.0, 2.0])
v_old = np.array([2.0, 1.0, 3.0])
m_i_new = np.array([3.0, 5.0, 4.0])
mu_old = np.array([4.0, 1.0, 2.0])
result = update_s_i(z, v_i_new, v_old, m_i_new, mu_old, 1.0)
expected_result = stats.norm.cdf(z) * np.sqrt(3) * 5 * np.exp(1.0 / 6.0 + 8.0 / 5.0 + 2.0 / 5.0) / (2.0*np.sqrt(2.0))
np.testing.assert_almost_equal(result, expected_result)
def test_calc_mu_old():
from parameters_update_likelihood_terms import calc_mu_old
mu = np.array([1.0, 2.0, 3.0])
v_old = np.array([2.0, 3.0, 4.0])
v_i = np.array([3.0, 1.0, 2.0])
m_i = np.array([2.0, 4.0, 1.0])
result = calc_mu_old(mu, v_old, v_i, m_i)
expected_result = np.array([1 / 3.0, -4.0, 7.0])
np.testing.assert_almost_equal(result, expected_result)
def test_calc_alpha_i():
from parameters_update_likelihood_terms import calc_alpha_i
from scipy import stats
x_i = np.array([1.0, 2.0, 3.0])
v_old = np.array([2.0, 1.0, 0.0])
z = 1.0
result = calc_alpha_i(x_i, v_old, z, 1.0)
expected_result = (1.0 / np.sqrt(7.0)) * stats.norm.pdf(z) / stats.norm.cdf(z)
np.testing.assert_array_equal(result, expected_result)
def test_calc_z():
from parameters_update_likelihood_terms import calc_z
x_i = | np.array([1.0, 2.0, 3.0]) | numpy.array |
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Dot
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
xyvaluesdf = pd.DataFrame(xyvalues, index=['lists', 'loops'])
cat = ['lists', 'loops']
catjython = ['lists:0.75', 'loops:0.75']
catpypy = ['lists:0.5', 'loops:0.5']
catpython = ['lists:0.25', 'loops:0.25']
python = seg_top_python = [2, 5]
pypy = seg_top_pypy = [12, 40]
jython = seg_top_jython = [22, 30]
zero = [0, 0]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['catjython'], catjython)
assert_array_equal(builder._data['catpython'], catpython)
assert_array_equal(builder._data['catpypy'], catpypy)
assert_array_equal(builder._data['python'], python)
assert_array_equal(builder._data['jython'], jython)
| assert_array_equal(builder._data['pypy'], pypy) | numpy.testing.assert_array_equal |
import numpy as np
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from skimage.feature import match_template
import logging
logger = logging.getLogger(__name__)
class LowXCorr(Exception):
pass
def get_abs_max(data):
"""
peaks up absolute maximum position in the stack/ 2D image
First dimension comes first
:param data: np.array
:return: abs max coordinates in data.shape format
"""
return np.unravel_index(np.argmax(data), data.shape)
def fit_gauss_3d(stack, radius_xy=4, radius_z=5, z_zoom=20, debug=False):
"""
Detects maximum on the stack in 3D
Fitting 2D gaussian in xy, 4-order polynomial in z
Ouputs [x,y,z] in pixels
"""
try:
from bfdc import gaussfit
except ImportError:
raise ImportError('Missing gaussfit.py. Please download one from Zhuanglab Github')
#cut_stack = np.zeros((1, 1, 1))
assert np.ndim(stack) == 3, logger.error(f'fit_gauss_3d: input stack shape is wrong, expected 3 dim, got {stack.shape}')
if debug:
plt.imshow(stack.max(axis=0))
plt.title('Max projection of cc-stack')
plt.show()
z_px, y_px, x_px = get_abs_max(stack)
cc_value = np.max(stack)
if cc_value < 0.2:
#raise(LowXCorr("fit_gauss_3d: Cross corellation value os too low!"))
logger.warning("fit_gauss_3d: Cross corellation value os too low!")
return [0, 0, 0, False]
if debug:
print([z_px, y_px, x_px])
r, rz = radius_xy, radius_z
z_start = np.maximum(z_px - rz, 0)
z_stop = np.minimum(z_px + rz + 2, len(stack) - 1)
cut_stack = stack[z_start:z_stop, y_px - r :y_px + r + 1, x_px - r :x_px + r + 1]
if debug: print(f'cut_stack shape {cut_stack.shape}')
xy_proj = cut_stack.max(axis=0)
z_proj = cut_stack.max(axis=(1, 2))
# z_proj = cut_stack[:,r,r]
#[(_min, _max, y, x, sig), good] = gaussfit.fitSymmetricGaussian(xy_proj,sigma=1)
[(_min, _max, y, x, sigy,angle,sigx), good] = gaussfit.fitEllipticalGaussian(xy_proj)
x_found = x - r + x_px
y_found = y - r + y_px
# [(_min,_max,z,sig),good] = gaussfit.fitSymmetricGaussian1D(z_proj)
z_crop = z_proj
x = np.arange(len(z_crop))
x_new = np.linspace(0., len(z_crop), num=z_zoom * len(z_crop), endpoint=False)
fit = np.polyfit(x, z_crop, deg=4)
poly = np.poly1d(fit)
z_fit = poly(x_new)
z_found = (x_new[ | np.argmax(z_fit) | numpy.argmax |
###
# pySuStaIn: a Python implementation of the Subtype and Stage Inference (SuStaIn) algorithm
#
# If you use pySuStaIn, please cite the following core papers:
# 1. The original SuStaIn paper: https://doi.org/10.1038/s41467-018-05892-0
# 2. The pySuStaIn software paper: https://doi.org/10.1101/2021.06.09.447713
#
# Please also cite the corresponding progression pattern model you use:
# 1. The piece-wise linear z-score model (i.e. ZscoreSustain): https://doi.org/10.1038/s41467-018-05892-0
# 2. The event-based model (i.e. MixtureSustain): https://doi.org/10.1016/j.neuroimage.2012.01.062
# with Gaussian mixture modeling (i.e. 'mixture_gmm'): https://doi.org/10.1093/brain/awu176
# or kernel density estimation (i.e. 'mixture_kde'): https://doi.org/10.1002/alz.12083
# 3. The model for discrete ordinal data (i.e. OrdinalSustain): TBD
#
# Thanks a lot for supporting this project.
#
# Authors: <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
# Contributors: <NAME> (<EMAIL>), <NAME> (<EMAIL>), <NAME> (<EMAIL>)
###
from tqdm.auto import tqdm
import numpy as np
from matplotlib import pyplot as plt
from pySuStaIn.AbstractSustain import AbstractSustainData
from pySuStaIn.AbstractSustain import AbstractSustain
#*******************************************
#The data structure class for OrdinalSustain. It holds the score and negative likelihoods that get passed around and re-indexed in places.
class OrdinalSustainData(AbstractSustainData):
def __init__(self, prob_nl, prob_score, numStages):
self.prob_nl = prob_nl
self.prob_score = prob_score
self.__numStages = numStages
def getNumSamples(self):
return self.prob_nl.shape[0]
def getNumBiomarkers(self):
return self.prob_nl.shape[1]
def getNumStages(self):
return self.__numStages
def reindex(self, index):
return OrdinalSustainData(self.prob_nl[index,], self.prob_score[index,], self.__numStages)
#*******************************************
#An implementation of the AbstractSustain class with multiple events for each biomarker based on deviations from normality, measured in z-scores.
#There are a fixed number of thresholds for each biomarker, specified at initialization of the OrdinalSustain object.
class OrdinalSustain(AbstractSustain):
def __init__(self,
prob_nl,
prob_score,
score_vals,
biomarker_labels,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed=None):
# The initializer for the scored events model implementation of AbstractSustain
# Parameters:
# prob_nl - probability of negative/normal class for all subjects across all biomarkers
# dim: number of subjects x number of biomarkers
# prob_score - probability of each score for all subjects across all biomarkers
# dim: number of subjects x number of biomarkers x number of scores
# score_vals - a matrix specifying the scores for each biomarker
# dim: number of biomarkers x number of scores
# biomarker_labels - the names of the biomarkers as a list of strings
# N_startpoints - number of startpoints to use in maximum likelihood step of SuStaIn, typically 25
# N_S_max - maximum number of subtypes, should be 1 or more
# N_iterations_MCMC - number of MCMC iterations, typically 1e5 or 1e6 but can be lower for debugging
# output_folder - where to save pickle files, etc.
# dataset_name - for naming pickle files
# use_parallel_startpoints - boolean for whether or not to parallelize the maximum likelihood loop
# seed - random number seed
N = prob_nl.shape[1] # number of biomarkers
assert (len(biomarker_labels) == N), "number of labels should match number of biomarkers"
num_scores = score_vals.shape[1]
IX_vals = np.array([[x for x in range(N)]] * num_scores).T
stage_score = np.array([y for x in score_vals.T for y in x])
stage_score = stage_score.reshape(1,len(stage_score))
IX_select = stage_score>0
stage_score = stage_score[IX_select]
stage_score = stage_score.reshape(1,len(stage_score))
num_scores = score_vals.shape[1]
IX_vals = np.array([[x for x in range(N)]] * num_scores).T
stage_biomarker_index = np.array([y for x in IX_vals.T for y in x])
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
stage_biomarker_index = stage_biomarker_index[IX_select]
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
prob_score = prob_score.transpose(0,2,1)
prob_score = prob_score.reshape(prob_score.shape[0],prob_score.shape[1]*prob_score.shape[2])
prob_score = prob_score[:,IX_select[0,:]]
prob_score = prob_score.reshape(prob_nl.shape[0],stage_score.shape[1])
self.IX_select = IX_select
self.stage_score = stage_score
self.stage_biomarker_index = stage_biomarker_index
self.biomarker_labels = biomarker_labels
numStages = stage_score.shape[1]
self.__sustainData = OrdinalSustainData(prob_nl, prob_score, numStages)
super().__init__(self.__sustainData,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed)
def _initialise_sequence(self, sustainData, rng):
# Randomly initialises a linear z-score model ensuring that the biomarkers
# are monotonically increasing
#
#
# OUTPUTS:
# S - a random linear z-score model under the condition that each biomarker
# is monotonically increasing
N = np.array(self.stage_score).shape[1]
S = np.zeros(N)
for i in range(N):
IS_min_stage_score = np.array([False] * N)
possible_biomarkers = np.unique(self.stage_biomarker_index)
for j in range(len(possible_biomarkers)):
IS_unselected = [False] * N
for k in set(range(N)) - set(S[:i]):
IS_unselected[k] = True
this_biomarkers = np.array([(np.array(self.stage_biomarker_index)[0] == possible_biomarkers[j]).astype(int) +
(np.array(IS_unselected) == 1).astype(int)]) == 2
if not np.any(this_biomarkers):
this_min_stage_score = 0
else:
this_min_stage_score = min(self.stage_score[this_biomarkers])
if (this_min_stage_score):
temp = ((this_biomarkers.astype(int) + (self.stage_score == this_min_stage_score).astype(int)) == 2).T
temp = temp.reshape(len(temp), )
IS_min_stage_score[temp] = True
events = np.array(range(N))
possible_events = np.array(events[IS_min_stage_score])
this_index = np.ceil(rng.random() * ((len(possible_events)))) - 1
S[i] = possible_events[int(this_index)]
S = S.reshape(1, len(S))
return S
def _calculate_likelihood_stage(self, sustainData, S):
'''
Computes the likelihood of a single scored event model
Outputs:
========
p_perm_k - the probability of each subjects data at each stage of a particular subtype
in the SuStaIn model
'''
N = self.stage_score.shape[1]
B = sustainData.prob_nl.shape[1]
IS_normal = np.ones(B)
IS_abnormal = np.zeros(B)
index_reached = np.zeros(B,dtype=int)
M = sustainData.prob_score.shape[0]
p_perm_k = np.zeros((M,N+1))
p_perm_k[:,0] = 1/(N+1)*np.prod(sustainData.prob_nl,1)
for j in range(N):
index_justreached = int(S[j])
biomarker_justreached = int(self.stage_biomarker_index[:,index_justreached])
index_reached[biomarker_justreached] = index_justreached
IS_normal[biomarker_justreached] = 0
IS_abnormal[biomarker_justreached] = 1
bool_IS_normal = IS_normal.astype(bool)
bool_IS_abnormal = IS_abnormal.astype(bool)
p_perm_k[:,j+1] = 1/(N+1)*np.multiply(np.prod(sustainData.prob_score[:,index_reached[bool_IS_abnormal]],1),np.prod(sustainData.prob_nl[:,bool_IS_normal],1))
return p_perm_k
def _optimise_parameters(self, sustainData, S_init, f_init, rng):
# Optimise the parameters of the SuStaIn model
M = sustainData.getNumSamples() #data_local.shape[0]
N_S = S_init.shape[0]
N = self.stage_score.shape[1]
S_opt = S_init.copy() # have to copy or changes will be passed to S_init
f_opt = np.array(f_init).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
p_perm_k = np.zeros((M, N + 1, N_S))
for s in range(N_S):
p_perm_k[:, :, s] = self._calculate_likelihood_stage(sustainData, S_opt[s])
p_perm_k_weighted = p_perm_k * f_val_mat
#p_perm_k_norm = p_perm_k_weighted / np.tile(np.sum(np.sum(p_perm_k_weighted, 1), 1).reshape(M, 1, 1), (1, N + 1, N_S)) # the second summation axis is different to Matlab version
# adding 1e-250 fixes divide by zero problem that happens rarely
p_perm_k_norm = p_perm_k_weighted / np.sum(p_perm_k_weighted + 1e-250, axis=(1, 2), keepdims=True)
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
order_seq = rng.permutation(N_S) # this will produce different random numbers to Matlab
for s in order_seq:
order_bio = rng.permutation(N) # this will produce different random numbers to Matlab
for i in order_bio:
current_sequence = S_opt[s]
current_location = np.array([0] * len(current_sequence))
current_location[current_sequence.astype(int)] = np.arange(len(current_sequence))
selected_event = i
move_event_from = current_location[selected_event]
this_stage_score = self.stage_score[0, selected_event]
selected_biomarker = self.stage_biomarker_index[0, selected_event]
possible_scores_biomarker = self.stage_score[self.stage_biomarker_index == selected_biomarker]
# slightly different conditional check to matlab version to protect python from calling min,max on an empty array
min_filter = possible_scores_biomarker < this_stage_score
max_filter = possible_scores_biomarker > this_stage_score
events = np.array(range(N))
if np.any(min_filter):
min_score_bound = max(possible_scores_biomarker[min_filter])
min_score_bound_event = events[((self.stage_score[0] == min_score_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_lower_bound = current_location[min_score_bound_event] + 1
else:
move_event_to_lower_bound = 0
if np.any(max_filter):
max_score_bound = min(possible_scores_biomarker[max_filter])
max_score_bound_event = events[((self.stage_score[0] == max_score_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_upper_bound = current_location[max_score_bound_event]
else:
move_event_to_upper_bound = N
# FIXME: hack because python won't produce an array in range (N,N), while matlab will produce an array (N)... urgh
if move_event_to_lower_bound == move_event_to_upper_bound:
possible_positions = np.array([0])
else:
possible_positions = np.arange(move_event_to_lower_bound, move_event_to_upper_bound)
possible_sequences = np.zeros((len(possible_positions), N))
possible_likelihood = np.zeros((len(possible_positions), 1))
possible_p_perm_k = np.zeros((M, N + 1, len(possible_positions)))
for index in range(len(possible_positions)):
current_sequence = S_opt[s]
#choose a position in the sequence to move an event to
move_event_to = possible_positions[index]
# move this event in its new position
current_sequence = np.delete(current_sequence, move_event_from, 0) # this is different to the Matlab version, which call current_sequence(move_event_from) = []
new_sequence = np.concatenate([current_sequence[np.arange(move_event_to)], [selected_event], current_sequence[np.arange(move_event_to, N - 1)]])
possible_sequences[index, :] = new_sequence
possible_p_perm_k[:, :, index] = self._calculate_likelihood_stage(sustainData, new_sequence)
p_perm_k[:, :, s] = possible_p_perm_k[:, :, index]
total_prob_stage = np.sum(p_perm_k * f_val_mat, 2)
total_prob_subj = np.sum(total_prob_stage, 1)
possible_likelihood[index] = sum(np.log(total_prob_subj + 1e-250))
possible_likelihood = possible_likelihood.reshape(possible_likelihood.shape[0])
max_likelihood = max(possible_likelihood)
this_S = possible_sequences[possible_likelihood == max_likelihood, :]
this_S = this_S[0, :]
S_opt[s] = this_S
this_p_perm_k = possible_p_perm_k[:, :, possible_likelihood == max_likelihood]
p_perm_k[:, :, s] = this_p_perm_k[:, :, 0]
S_opt[s] = this_S
p_perm_k_weighted = p_perm_k * f_val_mat
p_perm_k_norm = p_perm_k_weighted / np.tile(np.sum(np.sum(p_perm_k_weighted, 1), 1).reshape(M, 1, 1), (1, N + 1, N_S)) # the second summation axis is different to Matlab version
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
f_opt = f_opt.reshape(N_S)
total_prob_stage = np.sum(p_perm_k * f_val_mat, 2)
total_prob_subj = np.sum(total_prob_stage, 1)
likelihood_opt = sum(np.log(total_prob_subj + 1e-250))
return S_opt, f_opt, likelihood_opt
def _perform_mcmc(self, sustainData, seq_init, f_init, n_iterations, seq_sigma, f_sigma):
# Take MCMC samples of the uncertainty in the SuStaIn model parameters
N = self.stage_score.shape[1]
N_S = seq_init.shape[0]
if isinstance(f_sigma, float): # FIXME: hack to enable multiplication
f_sigma = | np.array([f_sigma]) | numpy.array |
import numpy as np
import json
from collections import Counter
def find_most_common(l):
"""
寻找l列表中出现次数最多的元素
"""
label_count = Counter(l)
most_common_label = label_count.most_common(1)[0][0]
return most_common_label
def entropy(l):
"""
Parameters
----------
l : 1d array-like shape(n_samples, )
"""
feature_values, counts = np.unique(l, return_counts=True)
probabilities = counts/counts.sum()
l_h = (-probabilities*np.log2(probabilities)).sum()
return l_h
def condition_entropy(left, right):
"""
Parameters
----------
left : 1d array-like shape(n_samples, )
条件对应的列
right : 1d array-like shape(n_samples, )
用来计算熵的一列
Returns
------------
result : condition entropy
"""
feature_values, counts = np.unique(left, return_counts=True)
probabilities = counts/counts.sum()
entropies = np.zeros((len(feature_values)))
for i,feature_value in enumerate(feature_values):
# 对于每一个value
# 先找出对应的y中的所有索引,取出相关的y
# 然后调用计算熵的函数去计算
this_indices = np.argwhere(left == feature_value).reshape(-1)
entropies[i] = entropy(right[this_indices])
result = (probabilities * entropies).sum()
return result
def information_gain(left, right):
"""
计算特征 left 对于 数据集right的信息增益
Parameters
------------
left : 1d array-like shape(n_samples, )
条件对应的列
right : 1d array-like shape(n_samples, )
用来计算熵的一列
Returns
----------
result : information_gain
"""
return entropy(right) - condition_entropy(left, right)
def information_gain_radio(left, right):
"""
计算特征 left 对于 数据集right的信息增益率
Parameters
------------
left : 1d array-like shape(n_samples, )
条件对应的列
right : 1d array-like shape(n_samples, )
用来计算熵的一列
Returns
----------
result : information_gain_radio
"""
split_info = entropy(left)
infor_gain = information_gain(left, right)
# FIXME: 可能信息量为0所以加1,这一个需要吗?
return infor_gain/(split_info+0.00000001)
def gini(l):
"""
计算特征 left 对于 数据集right的gini指数
Parameters
------------
l : 1d array-like shape(n_samples, )
条件对应的列
Returns
----------
result : gini
"""
feature_values, counts = np.unique(l, return_counts=True)
probabilities = counts/counts.sum()
l_h = 1-(np.square(probabilities)).sum()
return l_h
def condition_gini(left, right):
"""
Parameters
----------
left : 1d array-like shape(n_samples, )
条件对应的列
right : 1d array-like shape(n_samples, )
用来计算熵的一列
Returns
------------
result : condition entropy
"""
feature_values, counts = np.unique(left, return_counts=True)
probabilities = counts/counts.sum()
entropies = np.zeros((len(feature_values)))
for i,feature_value in enumerate(feature_values):
# 对于每一个value
# 先找出对应的y中的所有索引,取出相关的y
# 然后调用计算熵的函数去计算
this_indices = np.argwhere(left == feature_value).reshape(-1)
entropies[i] = gini(right[this_indices])
result = (probabilities * entropies).sum()
return result
def gini_gain(left, right):
"""
计算特征 left 对于 数据集right的gini信息增益
Parameters
------------
left : 1d array-like shape(n_samples, )
条件对应的列
right : 1d array-like shape(n_samples, )
用来计算熵的一列
Returns
----------
result : information_gain
"""
return gini(right) - condition_gini(left, right)
class DecisionTreeClassifier():
"""
寻找 向量[x1, x2, x3, ... , xn] -> 种类 y 的映射关系
Parameters
----------
method : string
- 'id3'
- 'c4.5'
- 'cart'
Notes
------
None
"""
def __init__(self, method='id3'):
self.method = method
def build_tree(
self,ori_X,
sub_X, sub_Y, features,
parent_class=None
):
"""
Parameters
------------
sub_X : 2d array-like shape(n_samples, n_features)
sub_Y : 1d array-like shape(n_samples, )
features : list of string
特征名的列表,表示当前尚未被使用来建树的特征
Returns
-----------
tree : dict
"""
# 如果此时数据集为空 即此时子节点所获得的数据集中某特征下的量并不完整
if len(sub_X) == 0:
return parent_class.item()
# 有这些情况是可能需要直接返回值的,表示已经能够确定分类结果
# 此时的数据集都分到了同一类,不需要再分类
if len((np.unique(sub_Y))) <= 1:
return sub_Y[0].item()
# 如果此时深度过大,所有特征已经被用完了
if len(features) == 0:
return parent_class.item()
# 从sub_Y里面取出现次数最多的,作为该节点的结果
current_node_class = | np.unique(sub_Y) | numpy.unique |
from __future__ import division
import numpy as np
import scipy.special, scipy.stats
import ctypes
import logging
logger = logging.getLogger("pygmmis")
# set up multiprocessing
import multiprocessing
import parmap
def createShared(a, dtype=ctypes.c_double):
"""Create a shared array to be used for multiprocessing's processes.
Taken from http://stackoverflow.com/questions/5549190/
Works only for float, double, int, long types (e.g. no bool).
Args:
numpy array, arbitrary shape
Returns:
numpy array whose container is a multiprocessing.Array
"""
shared_array_base = multiprocessing.Array(dtype, a.size)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array[:] = a.flatten()
shared_array = shared_array.reshape(a.shape)
return shared_array
# this is to allow multiprocessing pools to operate on class methods:
# https://gist.github.com/bnyeggen/1086393
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
if func_name.startswith('__') and not func_name.endswith('__'): #deal with mangled names
cls_name = cls.__name__.lstrip('_')
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.__mro__:
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
import types
# python 2 -> 3 adjustments
try:
import copy_reg
except ImportError:
import copyreg as copy_reg
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
try:
xrange
except NameError:
xrange = range
# Blantant copy from <NAME>'s esutil
# https://github.com/esheldon/esutil/blob/master/esutil/numpy_util.py
def match1d(arr1input, arr2input, presorted=False):
"""
NAME:
match
CALLING SEQUENCE:
ind1,ind2 = match(arr1, arr2, presorted=False)
PURPOSE:
Match two numpy arrays. Return the indices of the matches or empty
arrays if no matches are found. This means arr1[ind1] == arr2[ind2] is
true for all corresponding pairs. arr1 must contain only unique
inputs, but arr2 may be non-unique.
If you know arr1 is sorted, set presorted=True and it will run
even faster
METHOD:
uses searchsorted with some sugar. Much faster than old version
based on IDL code.
REVISION HISTORY:
Created 2015, <NAME>, SLAC.
"""
# make sure 1D
arr1 = np.array(arr1input, ndmin=1, copy=False)
arr2 = np.array(arr2input, ndmin=1, copy=False)
# check for integer data...
if (not issubclass(arr1.dtype.type,np.integer) or
not issubclass(arr2.dtype.type,np.integer)) :
mess="Error: only works with integer types, got %s %s"
mess = mess % (arr1.dtype.type,arr2.dtype.type)
raise ValueError(mess)
if (arr1.size == 0) or (arr2.size == 0) :
mess="Error: arr1 and arr2 must each be non-zero length"
raise ValueError(mess)
# make sure that arr1 has unique values...
test=np.unique(arr1)
if test.size != arr1.size:
raise ValueError("Error: the arr1input must be unique")
# sort arr1 if not presorted
if not presorted:
st1 = | np.argsort(arr1) | numpy.argsort |
""" Port of Andy's Matlab scripts """
import numpy as np
from IPython import embed
#%
#% This script plots the solution for the 2-layer solution of the
#% ventilated thermocline equation of LPS. The script follows
#% Pedlosky (1996, Ocean Circulation Theory), section 4.4.
#%
#% The x-coordinate is longitude in radians, and the y-coordinate
#% is f/f0, starting at the equator.
#%
#%
#%
#%
#%
#%
#%
#%
#%%%%%%%%%%%%%%% DO NOT EDIT THE FILE BELOW HERE %%%%%%%%
#%
def two_layers(theta0=60.,theta2=50., rho1=1025.50,rho2=1026.75,
Lx=5e6, W0=2e-6, H2=400, max_depth=-1200):
"""[summary]
Args:
theta0 ([type], optional): [description]. Defaults to 60..
theta2 ([type], optional): [description]. Defaults to 50..
rho1 (float, optional): [description]. Defaults to 1025.50.
rho2 (float, optional): [description]. Defaults to 1026.75.
Lx (width of the box in m, optional): [description]. Defaults to 5e6.
W0 ([type], optional): [description]. Defaults to 2e-6.
H2 (int, optional): [description]. Defaults to 400.
max_depth (int, optional): [description]. Defaults to -1200.
#% Specify the layer densities of the active upper two layers (kg/(m*m*m)).
#% Northern most extent of the model domain (degrees).
#% Latitude of the outcrop line for layer 2 (degrees).
#% Width of the domain (m).
#% Amplitude of the Ekman pumping velocity (m/s).
#% Depth of layer 2 along the eastern boundary (m).
#% NOTE:
#% Define max plotting depth (m). This parameter controls the maximum value
#% plotted on the depth axis. You may need to adjust this if in some of your
#% calculations your layer depths exceed the value of -1200m prescribed here.
"""
g=9.81
rho3=1027.50
#%
# Layer 1 reduced gravity.
gamma1=(rho2-rho1)*g/rho3
# Layer 2 reduced gravity.
gamma2=(rho3-rho2)*g/rho3
#
# Define grid.
#
im=201
jm=201
# Position of y-transect for plotting.
xtrans=Lx/2
# Position of x-transect for plotting.
ytrans=theta0/2
# Earth radius.
eradius=6.371e6
# Angular rotation rate of earth.
Omega=7.292e-5
theta0=theta0*2*np.pi/360
theta2=theta2*2*np.pi/360
f0=2*Omega*np.sin(theta0)
f2=2*Omega*np.sin(theta2)
# Latitude grid-spacing.
dtheta=theta0/(jm-1)
# Longitude grid-spacing.
dx=Lx/(im-1)
dphi=dx/eradius
phie=(im-1)*dphi
#
# Coordinate arrays for plotting.
xarr=np.zeros((im,jm))
yarr=np.zeros((jm,jm))
for i in range(im): #=1:im
xarr[i,:]=i*dphi*eradius/1000
for j in range(jm): #1:jm
yarr[:,j]=j*dtheta*eradius/1000
#embed(header='88 of vt')
#
# Coriolis parameter.
#
#for j=1:jm
theta= np.arange(jm)*dtheta
f=2*Omega*np.sin(theta)
#
# Ekman pumping - Pedlosky eqn 4.4.25.
#
we=np.zeros((im,jm))
for j in range(jm): #1:jm
we[:,j]=-W0*f0*f0*np.sin(np.pi*f[j]/f0)/(f[j]*f[j])
#
# D0^2 from Pedlosky eqn 4.4.26 but NOT using the H2 scaling,
# but instead using the actual factor from 4.4.5 so that H2,
# W0, gamma2, phie and theta0 can be variable parameters.
#
D02=np.zeros((im,jm))
D0fact=4*eradius*eradius*W0*Omega*np.sin(
theta0)*np.sin(theta0)*phie/gamma2
for j in range(jm): #1:jm
for i in range(im): #=1:im
phi=i*dphi
D02[i,j]=D0fact*(1-phi/phie)*np.sin(np.pi*f[j]/f0)
#
# Single layer region f0 <= f <= f2, Pedlosky eqn 4.4.6.
#
# h2(i,j)=sqrt(D02(i,j)+H2*H2);
# h(i,j)=h2(i,j);
h2 = np.sqrt(D02+H2*H2)
h = h2.copy()
#
# Process of subduction, f2 < f <= 0..
#
# Pedlosky eqn 4.4.18, where h=h1+h2.
#
#for j=1:jm
#if f(j) <= f2
# for i=1:im
# h(i,j)=sqrt((D02(i,j)+H2*H2)/(1+gamma1*(1-f(j)/f2)^2/gamma2));
# end
gdf = f <= f2
for j in np.where(gdf)[0]:
for i in range(im): #=1:im
h[i,j]=np.sqrt((D02[i,j]+H2*H2)/(
1+gamma1*(1-f[j]/f2)**2/gamma2))
#
# Pedlosky eqn 4.4.14a,b.
#
h1=np.zeros((im,jm))
for j in np.where(gdf)[0]:
for i in range(im): #=1:im
h1[i,j] = (1-f[j]/f2)*h[i,j]
h2[i,j] = f[j]*h[i,j]/f2
#
# The shadow zone.
# The latitude and longitude of the streamline that defines the
# poleward edge of the shadow zone can be computed by equating
# Pedlosky eqn 4.4.26 and 4.4.22.
# Namely:
# phi=phie*(1-fac*gamma1*(1-f/f2)*(1-f/f2)*H2*H2/gamma2)
# where fac=1/(D0fact*sin(pi*f/f0)).
#
#shadx=ones(jm,1)*phie*eradius/1000;
#shady=zeros(jm,1);
#for j=jm:-1:1
shadx = np.ones(jm)*phie*eradius/1000
shady = np.zeros_like(shadx)
gdj = np.where(gdf)[0]
phi=np.arange(im)*dphi
for j in range(jm-1,-1,-1):
shady[j]=j*dtheta*eradius/1000
if j in gdj:
fac=1/(D0fact*np.sin(np.pi*f[j]/f0))
phi_shadow=phie*(1-fac*gamma1*(1-f[j]/f2)**2*H2*H2/gamma2)
shadx[j]=phi_shadow*eradius/1000
#if j == 0:
# import pdb; pdb.set_trace()
gdphi = phi >= phi_shadow
for i in np.where(gdphi)[0]:
h[i,j]=H2
h1[i,j]=np.sqrt(gamma2*D02[i,j]/gamma1)
h2[i,j]=h[i,j]-h1[i,j]
#import pdb; pdb.set_trace()
#
# The western pool region.
# The latitude and longitude of the streamline that defines the
# eastern edge of the pool region can be found by equating Pedlosky
# eqn 4.6.2 and 4.4.26. It is assumed that the PV is homogenized in the
# pool region which yields Pedlosky eqn 4.6.6 for h and 4.6.5 for h2 in the pool
# in which case h1=h-h2.
# Namely:
# phi=phie*(1-fac*(D02w*(1+gamma1*(1-f/f2)^2/gamma2)/(2*H2^2)
# +gamma1*(f-f/f2)^2/(2*gamma2))
# where fac=1/(D0fact*sin(pi*f/f0)), and D02w is the value of D02 evaluated
# at (0,theta2)..
#
D02w=D0fact*np.sin(np.pi*f2/f0)
Gamma12=gamma1/gamma2
hw=np.sqrt(D02w+H2*H2)
pooly=np.arange(jm)*dtheta*eradius/1000
poolx= np.zeros_like(pooly)
# Tricky one!
phi= np.arange(im)*dphi
for j in np.flip(np.where(gdf)[0]):
fac=1/(D0fact*np.sin(np.pi*f[j]/f0))
fac1=Gamma12*(1-f[j]/f2)**2
phi_pool=phie*(1-fac*(D02w*(1+fac1)+H2*H2*fac1))
poolx[j] = max(phi_pool*eradius/1000, 0.)
gdphi = phi <= phi_pool
for i in np.where(gdphi)[0]:
h[i,j]=Gamma12*f[j]*hw/(
f2*(1+Gamma12))+np.sqrt(
(D02[i,j]+H2*H2)*(
1+Gamma12)-Gamma12*(
f[j]*hw/f2)**2)/(1+Gamma12)
h1[i,j]= h[i,j] - f[j]*hw/f2
#if (i == 10) and (j==10):
# import pdb; pdb.set_trace()
#embed(header='211 of vt')
#
psi1=np.nan*np.ones((im,jm))
psi2=np.nan*np.ones((im,jm))
hp1=h1.copy()
ps=shadx*1000/eradius
phi = np.arange(im)*dphi
gdf = f <= f2
for j in range(jm):
if f[j] > f2:
for i in range(im): #=1:im
hp1[i,j]=np.nan
psi2[i,j]=gamma2*h2[i,j]
else:
for i in range(im): #=1:im
psi1[i,j]= gamma1*h1[i,j]+gamma2*(h1[i,j]+h2[i,j])
if phi[i] < ps[j]:
psi2[i,j]=gamma2*(h1[i,j]+h2[i,j])
#import pdb; pdb.set_trace()
# For plotting
outy=np.ones(jm)*theta2*eradius/1000
outx=np.arange(im)*dphi*eradius/1000
ixt = int((xtrans/dx)+1)
iyt = int(((ytrans*2*np.pi/360)/dtheta)+1)
return xarr, yarr, shadx, shady, outx, outy, poolx, pooly, psi1, psi2, ixt, iyt, h, hp1
def three_layers(rho1=0, rho2=0, rho3=0, theta0=0,
theta3=0, theta2=0, Lx=0, W0=0,
H3=0, max_depth=-1200):
"""[summary]
Args:
rho1 (int, optional): [description]. Defaults to 0.
rho2 (int, optional): [description]. Defaults to 0.
rho3 (int, optional): [description]. Defaults to 0.
theta0 (int, optional): [description]. Defaults to 0.
theta3 (int, optional): [description]. Defaults to 0.
theta2 (int, optional): [description]. Defaults to 0.
Lx (int, optional): [description]. Defaults to 0.
W0 (int, optional): [description]. Defaults to 0.
H3 (int, optional): [description]. Defaults to 0.
max_depth (int, optional): [description]. Defaults to -1200.
%
% This script plots the solution for the 3-layer solution of the
% ventilated thermocline equation of LPS. The script follows
% Pedlosky (1996, Ocean Circulation Theory), section 4.7.
%
% The x-coordinate is longitude in radians, and the y-coordinate
% is f/f0, starting at the equator.
%
clear all
close all
%
% Specify the layer densities of the active upper three layers (kg/(m*m*m)).
%
%
% Northern most extent of model domain (degrees).
%
%
% Latitude of the outcrop line for layer 3 (degrees).
%
%
% Latitude of the outcrop line for layer 2 (degrees).
%
%
% Width of the domain (m).
%
%
% Amplitude of the Ekman pumping velocity (m/s).
%
%
% Depth of layer 3 along the eastern boundary (m).
%
%
% NOTE:
% Define max plotting depth (m). This parameter controls the maximum value
% plotted on the depth axis. You may need to adjust this if in some of your
% calculations your layer depths exceed the value of -1200m prescribed here.
%
%
"""
#%%%%%%%%%%%%%%% DO NOT EDIT THE FILE BELOW HERE %%%%%%%%
g=9.81
rho4=1027.75
#% Layer 1 reduced gravity.
gamma1=(rho2-rho1)*g/rho4
#% Layer 2 reduced gravity.
gamma2=(rho3-rho2)*g/rho4
#% Layer 3 reduced gravity.
gamma3=(rho4-rho3)*g/rho4
Gamma12=gamma1/gamma2
Gamma13=gamma1/gamma3
Gamma23=gamma2/gamma3
#%
#% Define grid.
#%
im=201
jm=201
#% Position of y-transect for plotting.
xtrans=Lx/2
#% Position of x-transect for plotting.
ytrans=theta3/2
#% Earth radius.
eradius=6.371e6
#% Angular rotation rate of earth.
Omega=7.292e-5
pi = np.pi
theta0=theta0*2*pi/360
theta3=theta3*2*pi/360
theta2=theta2*2*pi/360
f0=2*Omega*np.sin(theta0)
f3=2*Omega*np.sin(theta3)
#% Latitude grid-spacing.
dtheta=theta0/(jm-1)
j2=int(theta2/dtheta)
theta2=float(j2)*dtheta
f2=2*Omega*np.sin(theta2)
#% Longitude grid-spacing.
dx=Lx/(im-1)
dphi=dx/eradius
phie=(im-1)*dphi
#%
#% Coordinate arrays for plotting.
xarr=np.zeros((im,jm))
yarr=np.zeros((jm,jm))
for j in range (im): #=1:im
for i in range(im): #=1:im
xarr[i,j]=i*dphi*eradius/1000
yarr[i,j]=j*dtheta*eradius/1000
#
# Coriolis parameter.
#
theta= | np.arange(jm) | numpy.arange |
from itertools import combinations
import numpy as np
def _test(actual, expected, description=None, debug=False):
"""Compares the numerically derived list of Nash equilibria with the
expected (analytical) solution, and prints the result of the comparison
to screen.
Keyword arguments:
actual -- Numerically derived list of Nash equilibria (np.array assumed)
expected -- Expected (analytical) solution to the game
description -- (Optional) String description of the game
debug -- (Optional) True if print derived Nash equilibria to screen
"""
def _round_iterable(iterable, dec_places=5):
return map(lambda el: round(el, dec_places), iterable)
actual = set([(tuple(_round_iterable(x.flatten().tolist())), tuple(_round_iterable(y.flatten().tolist())))
for (x, y) in actual])
expected = set([(tuple(_round_iterable(x)), tuple(_round_iterable(y))) for (x, y) in expected])
result = "Test for game {}".format(description)
result += " passed." if actual == expected else " failed."
print(result)
if debug:
print("Derived MSNE for game {}:".format(description))
for ne in actual:
print("{}, {}".format(ne[0], ne[1]))
print()
def support_enumeration(payoff_matrix_p1, payoff_matrix_p2):
r"""Implements support enumeration algorithm for computing all Nash
equilibria of a bimatrix game specified by the input payoff matrices per
player, and returns a list consisting of all Nash equilibria of the game.
Each element of the returned list is a tuple of mixed strategies for both
players, with the first element being the mixed strategy of the first
player.
Full theoretical description of the algorithm can be found in
\"Algorithmic Game Theory\" by Nisan et al. (see Algorithm 3.4).
IMPORTANT: The algorithm requires the game to be _nondegenerate_.
Keyword arguments:
payoff_matrix_p1 -- Payoff matrix of player 1 (np.array assumed)
payoff_matrix_p2 -- Payoff matrix of player 2 (np.array assumed)
"""
# Input params
m, n = payoff_matrix_p1.shape
M = range(m)
N = range(n)
# Output params
msne = []
# 1. Find set K={1,...,min{m,n}}
K = range(1, min((m, n)) + 1)
# 2. For each k in K,
for k in K:
# 3. Let M(k) and N(k) be sets of all k-sized subsets of M and N,
# respectively. For each pair (I, J) such that I in M(k) and J in N(k),
for (I, J) in ((I, J) for I in combinations(M, k) for J in combinations(N, k)):
# 4. Solve for mixed strategy vectors x and y
x = np.zeros((m, 1))
y = np.zeros((n, 1))
if k == 1:
# Trivial case: pure strategies
x[I[0]] = 1
y[J[0]] = 1
else:
# Consider constraints for player 1
v = [np.array([payoff_matrix_p2[i, j] for i in I]) for j in J]
A = np.array([v[0]-v[p] for p in range(1, k)] + [np.ones((k, 1))])
b = np.array((k-1)*[0] + [1])
# Try solving matrix equation Ax = b using LU decomposition method
try:
solution = np.linalg.solve(A, b)
# -- if that fails, then x cannot form Nash equilibrium
except np.linalg.linalg.LinAlgError:
continue
# Create mixed strategy vector x
solution.resize(m)
indices = list(I)
if len(indices) < m:
indices += [p for p in range(m) if p not in indices]
for (i,j) in map(lambda i,j: (i,j), indices, range(m)):
x[i] = solution[j]
# For player 2
u = [np.array([payoff_matrix_p1[i, j] for j in J]) for i in I]
A = np.array([u[0]-u[p] for p in range(1, k)] + [np.ones((k, 1))])
b = np.array((k-1)*[0] + [1])
# Try solving matrix equation Ay = b using LU decomposition method
try:
solution = np.linalg.solve(A, b)
# -- if that fails, then y cannot form Nash equilibrium
except np.linalg.linalg.LinAlgError:
continue
# Create mixed strategy vector y
solution.resize(n)
indices = list(J)
if len(indices) < n:
indices += [p for p in range(n) if p not in indices]
for (i,j) in map(lambda i,j: (i,j), indices, range(n)):
y[i] = solution[j]
# Verify that (x, y) constitutes a Nash equilibrium
# 5. Check if both x and y are nonnegative
if all(x >= 0) and all(y >= 0):
# 6. Check if best response condition is met
# For x
v = [np.dot(x.flatten(), payoff_matrix_p2[:,j]) for j in J]
maximum_x = max([np.dot(x.flatten(), payoff_matrix_p2[:,n]) for n in N])
# For y
u = [np.dot(y.flatten(), payoff_matrix_p1[i,:]) for i in I]
maximum_y = max([np.dot(y.flatten(), payoff_matrix_p1[m,:]) for m in M])
# Account for numerical errors from dot product operation on floats
if list(map(lambda el: abs(el - maximum_x) <= .0000001, v)).count(True) == len(v) and \
list(map(lambda el: abs(el - maximum_y) <= .0000001, u)).count(True) == len(u):
# If the last condition is met, add (x, y) to solution list msne
msne += [(x, y)]
return msne
def vertex_enumeration(payoff_matrix_p1, payoff_matrix_p2):
r"""Implements vertex enumeration algorithm for computing all Nash
equilibria of a bimatrix game specified by the input payoff matrices per
player, and returns a list consisting of all Nash equilibria of the game.
Each element of the returned list is a tuple of mixed strategies for both
players, with the first element being the mixed strategy of the first
player.
Full theoretical description of the algorithm can be found in
\"Algorithmic Game Theory\" by Nisan et al. (see Algorithm 3.5).
IMPORTANT: The algorithm requires the game to be _nondegenerate_, and
payoff matrices of both players not containing a zero column.
Keyword arguments:
payoff_matrix_p1 -- Payoff matrix of player 1 (np.array assumed)
payoff_matrix_p2 -- Payoff matrix of player 2 (np.array assumed)
"""
# Input params
m, n = payoff_matrix_p1.shape
# Output params
msne = []
# 1. Preprocess by creating a nonnegative payoff matrix for either bidder
minimum = min(payoff_matrix_p1.flatten().tolist() + payoff_matrix_p2.flatten().tolist())
payoff_matrix_p1 += np.ones((m, n), dtype=int) * abs(minimum)
payoff_matrix_p2 += np.ones((m, n), dtype=int) * abs(minimum)
# 2. Find all vertices of player 1's polytope
# Let P be the dictionary of all vertices, where key are the labels
# corresponding to that particular vertex
P = {}
# Create matrices and vectors representing Player 1's polytope boundary constraints
identity = | np.identity(m, dtype=int) | numpy.identity |
import abc
import logging
from typing import Union, Optional, Any, Tuple, Callable
import numpy as np
class EmgDataManager(abc.ABC):
"""
Base class encapsulating all of the operations done on the EMG data. This class abstracts away the implementations
for the various interactions with the (whitened, extended) EMG data, in order to hide whether the EMG data lives
on the CPU or GPU, or whether Dask is being used for computation.
All input and output values to these methods assume "vanilla" numpy arrays on the CPU, even for GPU-enabled data
managers. For GPU-enabled methods, this entails more copying back and forth between the CPU and GPU, but we figure
that's okay since the source vectors / source matrix are generally pretty small.
"""
@property
@abc.abstractmethod
def shape(self):
"""
Shape of the underlying data. Should be n_channels x n_samples (n_channels for the extended data)
"""
pass
@abc.abstractmethod
def squared_sum(self) -> np.ndarray:
"""
Computes the squared sum across all channels for each sample.
:return: n_samples x 1 numpy array
"""
pass
@abc.abstractmethod
def mean_slice(self, indices: np.ndarray) -> np.ndarray:
"""
Slices the EMG data at the given sample indices, and then takes the mean across samples.
:return: n_channels x 1
"""
pass
@abc.abstractmethod
def fast_ica_iteration(
self, wi: np.ndarray, g: Callable[[Any], Any], sources: np.ndarray, extras: Optional[Any] = None) -> \
Tuple[np.ndarray, Optional[Any]]:
"""
Performs a single FastICA iteration. Can return some `extras` which will be provided on the next FastICA
iteration as a simple way to maintain state through ICA iterations.
:param wi: initial candidate source vector, to be tuned in this method
:param g: "g" function for ICA
:param sources: all other existing sources, n_channels x n_sources
:param extras: any data returned from previous iterations of ICA
:return: a new candidate source vector, and anything that should be returned on the next iteration
"""
pass
@abc.abstractmethod
def project(self, sources: np.ndarray) -> np.ndarray:
"""
Returns (wi.T * data)
"""
pass
@abc.abstractmethod
def gamma(self, wi: np.ndarray) -> np.ndarray:
"""
Returns (wi.T * data) .^2
"""
pass
class CpuDataManager(EmgDataManager):
"""
Implementation of EmgDataManager where all data is managed via numpy arrays on the CPU.
"""
def __init__(self, whitened_data: np.ndarray):
self._data = whitened_data
@property
def shape(self):
return self._data.shape
def squared_sum(self) -> np.ndarray:
return (self._data ** 2).sum(axis=0)
def mean_slice(self, indices: np.ndarray) -> np.ndarray:
return self._data[:, indices].mean(axis=1)
def fast_ica_iteration(self, wi: np.ndarray, g, sources: np.ndarray, extras: Optional[Any] = None) -> \
Tuple[np.ndarray, Optional[Any]]:
if extras is not None:
sourcesTsources = extras
else:
sourcesTsources = np.matmul(sources, sources.T)
wiTwhitened = np.dot(wi, self._data)
gwtx, g_wtx = g(wiTwhitened)
whitenedGwtx = np.multiply(self._data, gwtx)
wi = whitenedGwtx.mean(axis=1) - g_wtx * wi
# Orthogonalization
wi = wi - np.dot(sourcesTsources, wi)
# Normalization
wi = wi / np.linalg.norm(wi, 2)
return wi, sourcesTsources
def project(self, sources: np.ndarray) -> np.ndarray:
return np.dot(sources.T, self._data)
def gamma(self, wi: np.ndarray) -> np.ndarray:
return | np.dot(wi, self._data) | numpy.dot |
# Built-in
import warnings
import itertools as itt
import copy
import datetime as dtm # DB
# Common
import numpy as np
import scipy.interpolate as scpinterp
import scipy.stats as scpstats
import matplotlib.pyplot as plt
__all__ = [
'fit1d_dinput',
'fit2d_dinput',
'fit12d_dvalid',
'fit12d_dscales',
]
_NPEAKMAX = 12
_DCONSTRAINTS = {
'bck_amp': False,
'bck_rate': False,
'amp': False,
'width': False,
'shift': False,
'double': False,
'symmetry': False,
}
_DORDER = ['amp', 'width', 'shift']
_SAME_SPECTRUM = False
_DEG = 2
_NBSPLINES = 13
_SYMMETRY_CENTRAL_FRACTION = 0.3
_BINNING = False
_POS = False
_SUBSET = False
_VALID_NSIGMA = 6.
_VALID_FRACTION = 0.8
_LTYPES = [int, float, np.int_, np.float_]
_DBOUNDS = {
'bck_amp': (0., 3.),
'bck_rate': (-3., 3.),
'amp': (0, 10),
'width': (0.01, 2.),
'shift': (-1, 1),
'dratio': (0., 2.),
'dshift': (-10., 10.),
'bs': (-10., 10.),
}
_DX0 = {
'bck_amp': 1.,
'bck_rate': 0.,
'amp': 1.,
'width': 1.,
'shift': 0.,
'dratio': 0.5,
'dshift': 0.,
'bs': 1.,
}
_DINDOK = {
0: 'ok',
-1: 'mask',
-2: 'out of domain',
-3: 'neg or NaN',
-4: 'binning=0',
-5: 'S/N valid, excluded',
-6: 'S/N non-valid, included',
-7: 'S/N non-valid, excluded',
}
###########################################################
###########################################################
#
# Preliminary
# utility tools for 1d spectral fitting
#
###########################################################
###########################################################
def get_symmetry_axis_1dprofile(phi, data, cent_fraction=None):
""" On a series of 1d vertical profiles, find the best symmetry axis """
if cent_fraction is None:
cent_fraction = _SYMMETRY_CENTRAL_FRACTION
# Find the phi in the central fraction
phimin = np.nanmin(phi)
phimax = np.nanmax(phi)
phic = 0.5*(phimax + phimin)
dphi = (phimax - phimin)*cent_fraction
indphi = np.abs(phi-phic) <= dphi/2.
phiok = phi[indphi]
# Compute new phi and associated costs
phi2 = phi[:, None] - phiok[None, :]
phi2min = np.min([np.nanmax(np.abs(phi2 * (phi2 < 0)), axis=0),
np.nanmax(np.abs(phi2 * (phi2 > 0)), axis=0)], axis=0)
indout = np.abs(phi2) > phi2min[None, :]
phi2p = np.abs(phi2)
phi2n = np.abs(phi2)
phi2p[(phi2 < 0) | indout] = np.nan
phi2n[(phi2 > 0) | indout] = np.nan
nok = np.min([np.sum((~np.isnan(phi2p)), axis=0),
np.sum((~np.isnan(phi2n)), axis=0)], axis=0)
cost = np.full((data.shape[0], phiok.size), np.nan)
for ii in range(phiok.size):
indp = np.argsort(np.abs(phi2p[:, ii]))
indn = np.argsort(np.abs(phi2n[:, ii]))
cost[:, ii] = np.nansum(
(data[:, indp] - data[:, indn])[:, :nok[ii]]**2,
axis=1)
return phiok[np.nanargmin(cost, axis=1)]
###########################################################
###########################################################
#
# 1d spectral fitting from dlines
#
###########################################################
###########################################################
def _checkformat_dconstraints(dconstraints=None, defconst=None):
# Check constraints
if dconstraints is None:
dconstraints = defconst
# Check dconstraints keys
lk = sorted(_DCONSTRAINTS.keys())
c0 = (
isinstance(dconstraints, dict)
and all([k0 in lk for k0 in dconstraints.keys()])
)
if not c0:
msg = (
"\ndconstraints should contain constraints for spectrum fitting\n"
+ "It be a dict with the following keys:\n"
+ "\t- available keys: {}\n".format(lk)
+ "\t- provided keys: {}".format(dconstraints.keys())
)
raise Exception(msg)
# copy to avoid modifying reference
return copy.deepcopy(dconstraints)
def _checkformat_dconstants(dconstants=None, dconstraints=None):
if dconstants is None:
return
lk = [kk for kk in sorted(dconstraints.keys()) if kk != 'symmetry']
if not isinstance(dconstants, dict):
msg = (
"\ndconstants should be None or a dict with keys in:\n"
+ "\t- available keys: {}\n".format(lk)
+ "\t- provided : {}".format(type(dconstants))
)
raise Exception(msg)
# Check dconstraints keys
lc = [
k0 for k0, v0 in dconstants.items()
if not (
k0 in lk
and (
(
k0 in _DORDER
and isinstance(v0, dict)
and all([
k1 in dconstraints[k0].keys()
and type(v1) in _LTYPES
for k1, v1 in v0.items()
])
)
or (
k0 not in _DORDER
and type(v0) in _LTYPES
)
)
)
]
if len(lc) > 0:
dc0 = [
'\t\t{}: {}'.format(
kk,
sorted(dconstraints[kk].keys()) if kk in _DORDER else float
)
for kk in lk
]
dc1 = [
'\t\t{}: {}'.format(
kk,
sorted(dconstants[kk].keys())
if kk in _DORDER else dconstants[kk]
)
for kk in sorted(dconstants.keys())
]
msg = (
"\ndconstants should be None or a dict with keys in:\n"
+ "\t- available keys:\n"
+ "\n".join(dc0)
+ "\n\t- provided keys:\n"
+ "\n".join(dc1)
)
raise Exception(msg)
# copy to avoid modifying reference
return copy.deepcopy(dconstants)
def _dconstraints_double(dinput, dconstraints, defconst=_DCONSTRAINTS):
dinput['double'] = dconstraints.get('double', defconst['double'])
c0 = (
isinstance(dinput['double'], bool)
or (
isinstance(dinput['double'], dict)
and all([
kk in ['dratio', 'dshift'] and type(vv) in _LTYPES
for kk, vv in dinput['double'].items()
])
)
)
if c0 is False:
msg = (
"dconstraints['double'] must be either:\n"
+ "\t- False: no line doubling\n"
+ "\t- True: line doubling with unknown ratio and shift\n"
+ "\t- {'dratio': float}: line doubling with:\n"
+ "\t \t explicit ratio, unknown shift\n"
+ "\t- {'dshift': float}: line doubling with:\n"
+ "\t \t unknown ratio, explicit shift\n"
+ "\t- {'dratio': float, 'dshift': float}: line doubling with:\n"
+ "\t \t explicit ratio, explicit shift"
)
raise Exception(msg)
def _width_shift_amp(
indict, dconstants=None,
keys=None, dlines=None, nlines=None, k0=None,
):
# ------------------------
# Prepare error message
msg = ''
pavail = sorted(set(itt.chain.from_iterable([
v0.keys() for v0 in dlines.values()
])))
# ------------------------
# Check case
c0 = indict is False
c1 = (
isinstance(indict, str)
and indict in pavail
)
c2 = (
isinstance(indict, dict)
and all([
isinstance(k1, str)
and (
(isinstance(v1, str)) # and v0 in keys)
or (
isinstance(v1, list)
and all([
isinstance(v2, str)
# and v1 in keys
for v2 in v1
])
)
)
for k1, v1 in indict.items()
])
)
c3 = (
isinstance(indict, dict)
and all([
# ss in keys
isinstance(vv, dict)
and all([s1 in ['key', 'coef', 'offset'] for s1 in vv.keys()])
and isinstance(vv['key'], str)
for ss, vv in indict.items()
])
)
c4 = (
isinstance(indict, dict)
and isinstance(indict.get('keys'), list)
and isinstance(indict.get('ind'), np.ndarray)
)
if not any([c0, c1, c2, c3, c4]):
msg = (
f"dconstraints['{k0}'] shoud be either:\n"
f"\t- False ({c0}): no constraint\n"
f"\t- str ({c1}): key from dlines['<lines>'] "
"to be used as criterion\n"
f"\t\t available crit: {pavail}\n"
f"\t- dict ({c2}): "
"{str: line_keyi or [line_keyi, ..., line_keyj}\n"
f"\t- dict ({c3}): "
"{line_keyi: {'key': str, 'coef': , 'offset': }}\n"
f"\t- dict ({c4}): "
"{'keys': [], 'ind': np.ndarray}\n"
f" Available line_keys:\n{sorted(keys)}\n"
f" You provided:\n{indict}"
)
raise Exception(msg)
# ------------------------
# str key to be taken from dlines as criterion
if c0:
lk = keys
ind = np.eye(nlines, dtype=bool)
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
if c1:
lk = sorted(set([dlines[k1].get(indict, k1) for k1 in keys]))
ind = np.array(
[
[dlines[k2].get(indict, k2) == k1 for k2 in keys]
for k1 in lk
],
dtype=bool,
)
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
elif c2:
lkl = []
for k1, v1 in indict.items():
if isinstance(v1, str):
v1 = [v1]
v1 = [k2 for k2 in v1 if k2 in keys]
c0 = (
len(set(v1)) == len(v1)
and all([k2 not in lkl for k2 in v1])
)
if not c0:
msg = (
"Inconsistency in indict[{}], either:\n".format(k1)
+ "\t- v1 not unique: {}\n".format(v1)
+ "\t- some v1 not in keys: {}\n".format(keys)
+ "\t- some v1 in lkl: {}".format(lkl)
)
raise Exception(msg)
indict[k1] = v1
lkl += v1
for k1 in set(keys).difference(lkl):
indict[k1] = [k1]
lk = sorted(set(indict.keys()))
ind = np.array(
[[k2 in indict[k1] for k2 in keys] for k1 in lk],
dtype=bool,
)
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
elif c3:
lk = sorted(set([v0['key'] for v0 in indict.values()]))
lk += sorted(set(keys).difference(indict.keys()))
ind = np.array(
[
[indict.get(k2, {'key': k2})['key'] == k1 for k2 in keys]
for k1 in lk
],
dtype=bool,
)
coefs = np.array([
indict.get(k1, {'coef': 1.}).get('coef', 1.) for k1 in keys
])
offset = np.array([
indict.get(k1, {'offset': 0.}).get('offset', 0.) for k1 in keys
])
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': coefs,
'offset': offset,
}
elif c4:
outdict = indict
if 'coefs' not in indict.keys():
outdict['coefs'] = np.ones((nlines,))
if 'offset' not in indict.keys():
outdict['offset'] = np.zeros((nlines,))
# ------------------------
# Remove group with no match
indnomatch = np.sum(ind, axis=1) == 0
if np.any(indnomatch):
lknom = outdict['keys'][indnomatch]
outdict['keys'] = outdict['keys'][~indnomatch]
outdict['ind'] = outdict['ind'][~indnomatch, :]
lstr = [f"\t- {k1}" for k1 in lknom]
msg = (
f"The following {k0} groups match no lines, they are removed:\n"
+ "\n".join(lstr)
)
warnings.warn(msg)
# ------------------------
# Ultimate conformity checks
assert sorted(outdict.keys()) == ['coefs', 'ind', 'keys', 'offset']
# check ind (root of all subsequent ind arrays)
assert isinstance(outdict['ind'], np.ndarray)
assert outdict['ind'].dtype == np.bool_
assert outdict['ind'].shape == (outdict['keys'].size, nlines)
# check each line is associated to a unique group
assert np.all(np.sum(outdict['ind'], axis=0) == 1)
# check each group is associated to at least one line
assert np.all(np.sum(outdict['ind'], axis=1) >= 1)
assert outdict['coefs'].shape == (nlines,)
assert outdict['offset'].shape == (nlines,)
return outdict
###########################################################
###########################################################
#
# 2d spectral fitting from dlines
#
###########################################################
###########################################################
def _dconstraints_symmetry(
dinput,
dprepare=None,
symmetry=None,
cent_fraction=None,
defconst=_DCONSTRAINTS,
):
if symmetry is None:
symmetry = defconst['symmetry']
dinput['symmetry'] = symmetry
if not isinstance(dinput['symmetry'], bool):
msg = "dconstraints['symmetry'] must be a bool"
raise Exception(msg)
if dinput['symmetry'] is True:
dinput['symmetry_axis'] = get_symmetry_axis_1dprofile(
dprepare['phi1d'],
dprepare['dataphi1d'],
cent_fraction=cent_fraction,
)
###########################################################
###########################################################
#
# data, lamb, phi conformity checks
#
###########################################################
###########################################################
def _checkformat_data_fit12d_dlines_msg(data, lamb, phi=None, mask=None):
datash = data.shape if isinstance(data, np.ndarray) else type(data)
lambsh = lamb.shape if isinstance(lamb, np.ndarray) else type(lamb)
phish = phi.shape if isinstance(phi, np.ndarray) else type(phi)
masksh = mask.shape if isinstance(mask, np.ndarray) else type(mask)
shaped = '(nt, n1)' if phi is None else '(nt, n1, n2)'
shape = '(n1,)' if phi is None else '(n1, n2)'
msg = ("Args data, lamb, phi and mask must be:\n"
+ "\t- data: {} or {} np.ndarray\n".format(shaped, shape)
+ "\t- lamb, phi: both {} np.ndarray\n".format(shape)
+ "\t- mask: None or {}\n".format(shape)
+ " You provided:\n"
+ "\t - data: {}\n".format(datash)
+ "\t - lamb: {}\n".format(lambsh))
if phi is not None:
msg += "\t - phi: {}\n".format(phish)
msg += "\t - mask: {}\n".format(masksh)
return msg
def _checkformat_data_fit12d_dlines(
data, lamb, phi=None,
nxi=None, nxj=None, mask=None,
is2d=False,
):
# Check types
c0 = isinstance(data, np.ndarray) and isinstance(lamb, np.ndarray)
if is2d:
c0 &= isinstance(phi, np.ndarray)
if not c0:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
# Check shapes 1
mindim = 1 if phi is None else 2
phi1d, lamb1d, dataphi1d, datalamb1d = None, None, None, None
if is2d:
# special case
c1 = lamb.ndim == phi.ndim == 1
if c1:
if nxi is None:
nxi = lamb.size
if nxj is None:
nxj = phi.size
lamb1d = np.copy(lamb)
phi1d = np.copy(phi)
lamb = np.repeat(lamb[None, :], nxj, axis=0)
phi = np.repeat(phi[:, None], nxi, axis=1)
if nxi is None or nxj is None:
msg = "Arg (nxi, nxj) must be provided for double-checking shapes"
raise Exception(msg)
c0 = (
data.ndim in mindim + np.r_[0, 1]
and (
lamb.ndim == mindim
and lamb.shape == data.shape[-mindim:]
and lamb.shape == phi.shape
and lamb.shape in [(nxi, nxj), (nxj, nxi)]
)
)
else:
c0 = (
data.ndim in mindim + np.r_[0, 1]
and lamb.ndim == mindim
and lamb.shape == data.shape[-mindim:]
)
if not c0:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
# Check shapes 2
if data.ndim == mindim:
data = data[None, ...]
if is2d and c1:
dataphi1d = np.nanmean(data, axis=2)
datalamb1d = np.nanmean(data, axis=1)
if is2d and lamb.shape == (nxi, nxj):
lamb = lamb.T
phi = phi.T
data = np.swapaxes(data, 1, 2)
# mask
if mask is not None:
if mask.shape != lamb.shape:
if phi is not None and mask.T.shape == lamb.shape:
mask = mask.T
else:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
if is2d:
return lamb, phi, data, mask, phi1d, lamb1d, dataphi1d, datalamb1d
else:
return lamb, data, mask
###########################################################
###########################################################
#
# Domain limitation
#
###########################################################
###########################################################
def _checkformat_domain(domain=None, keys=['lamb', 'phi']):
if keys is None:
keys = ['lamb', 'phi']
if isinstance(keys, str):
keys = [keys]
if domain is None:
domain = {
k0: {
'spec': [np.inf*np.r_[-1., 1.]],
'minmax': np.inf*np.r_[-1., 1.],
}
for k0 in keys
}
return domain
c0 = (
isinstance(domain, dict)
and all([k0 in keys for k0 in domain.keys()])
)
if not c0:
msg = ("\nArg domain must be a dict with keys {}\n".format(keys)
+ "\t- provided: {}".format(domain))
raise Exception(msg)
domain2 = {k0: v0 for k0, v0 in domain.items()}
for k0 in keys:
domain2[k0] = domain2.get(k0, [np.inf*np.r_[-1., 1.]])
ltypesin = [list, np.ndarray]
ltypesout = [tuple]
for k0, v0 in domain2.items():
c0 = (
type(v0) in ltypesin + ltypesout
and (
(
all([type(v1) in _LTYPES for v1 in v0])
and len(v0) == 2
and v0[1] > v0[0]
)
or (
all([
type(v1) in ltypesin + ltypesout
and all([type(v2) in _LTYPES for v2 in v1])
and len(v1) == 2
and v1[1] > v1[0]
for v1 in v0
])
)
)
)
if not c0:
msg = (
"domain[{}] must be either a:\n".format(k0)
+ "\t- np.ndarray or list of 2 increasing values: "
+ "inclusive interval\n"
+ "\t- tuple of 2 increasing values: exclusive interval\n"
+ "\t- a list of combinations of the above\n"
+ " provided: {}".format(v0)
)
raise Exception(msg)
if type(v0) in ltypesout:
v0 = [v0]
else:
c0 = all([
type(v1) in ltypesin + ltypesout
and len(v1) == 2
and v1[1] > v1[0]
for v1 in v0
])
if not c0:
v0 = [v0]
domain2[k0] = {
'spec': v0,
'minmax': [np.nanmin(v0), np.nanmax(v0)],
}
return domain2
def apply_domain(lamb=None, phi=None, domain=None):
lc = [lamb is not None, phi is not None]
if not lc[0]:
msg = "At least lamb must be provided!"
raise Exception(msg)
din = {'lamb': lamb}
if lc[1]:
din['phi'] = phi
domain = _checkformat_domain(domain=domain, keys=din.keys())
ind = np.ones(lamb.shape, dtype=bool)
for k0, v0 in din.items():
indin = np.zeros(v0.shape, dtype=bool)
indout = np.zeros(v0.shape, dtype=bool)
for v1 in domain[k0]['spec']:
indi = (v0 >= v1[0]) & (v0 <= v1[1])
if isinstance(v1, tuple):
indout |= indi
else:
indin |= indi
ind = ind & indin & (~indout)
return ind, domain
###########################################################
###########################################################
#
# binning (2d only)
#
###########################################################
###########################################################
def _binning_check(
binning,
dlamb_ref=None,
dphi_ref=None,
domain=None, nbsplines=None,
):
lk = ['phi', 'lamb']
lkall = lk + ['nperbin']
msg = (
"binning must be dict of the form:\n"
+ "\t- provide number of bins:\n"
+ "\t \t{'phi': int,\n"
+ "\t \t 'lamb': int}\n"
+ "\t- provide bin edges vectors:\n"
+ "\t \t{'phi': 1d np.ndarray (increasing),\n"
+ "\t \t 'lamb': 1d np.ndarray (increasing)}\n"
+ " provided:\n{}".format(binning)
)
# Check input
if binning is None:
binning = _BINNING
if nbsplines is None:
nbsplines = False
if nbsplines is not False:
c0 = isinstance(nbsplines, int) and nbsplines > 0
if not c0:
msg2 = (
"Both nbsplines and deg must be positive int!\n"
+ "\t- nbsplines: {}\n".format(nbsplines)
)
raise Exception(msg2)
# Check which format was passed and return None or dict
ltypes0 = _LTYPES
ltypes1 = [tuple, list, np.ndarray]
lc = [
binning is False,
(
isinstance(binning, dict)
and all([kk in lkall for kk in binning.keys()])
and all([kk in binning.keys() for kk in lk])
),
type(binning) in ltypes0,
type(binning) in ltypes1,
]
if not any(lc):
raise Exception(msg)
if binning is False:
return binning
elif type(binning) in ltypes0:
binning = {
'phi': {'nbins': int(binning)},
'lamb': {'nbins': int(binning)},
}
elif type(binning) in ltypes1:
binning = np.atleast_1d(binning).ravel()
binning = {
'phi': {'edges': binning},
'lamb': {'edges': binning},
}
for kk in lk:
if type(binning[kk]) in ltypes0:
binning[kk] = {'nbins': int(binning[kk])}
elif type(binning[kk]) in ltypes1:
binning[kk] = {'edges': np.atleast_1d(binning[kk]).ravel()}
c0 = all([
all([k1 in ['edges', 'nbins'] for k1 in binning[k0].keys()])
for k0 in lk
])
c0 = (
c0
and all([
(
(
binning[k0].get('nbins') is None
or type(binning[k0].get('nbins')) in ltypes0
)
and (
binning[k0].get('edges') is None
or type(binning[k0].get('edges')) in ltypes1
)
)
for k0 in lk
])
)
if not c0:
raise Exception(msg)
# Check dict
for k0 in lk:
c0 = all([k1 in ['nbins', 'edges'] for k1 in binning[k0].keys()])
if not c0:
raise Exception(msg)
if binning[k0].get('nbins') is not None:
binning[k0]['nbins'] = int(binning[k0]['nbins'])
if binning[k0].get('edges') is None:
binning[k0]['edges'] = np.linspace(
domain[k0]['minmax'][0], domain[k0]['minmax'][1],
binning[k0]['nbins'] + 1,
endpoint=True,
)
else:
binning[k0]['edges'] = np.atleast_1d(
binning[k0]['edges']).ravel()
if binning[k0]['nbins'] != binning[k0]['edges'].size - 1:
raise Exception(msg)
elif binning[k0].get('bin_edges') is not None:
binning[k0]['edges'] = np.atleast_1d(binning[k0]['edges']).ravel()
binning[k0]['nbins'] = binning[k0]['edges'].size - 1
else:
raise Exception(msg)
# ------------
# safet checks
if np.any(~np.isfinite(binning[k0]['edges'])):
msg = (
f"Non-finite value in binning['{k0}']['edges']\n"
+ str(binning[k0]['edges'])
)
raise Exception(msg)
if not np.allclose(
binning[k0]['edges'],
np.unique(binning[k0]['edges']),
):
raise Exception(msg)
# Optional check vs nbsplines and deg
if nbsplines is not False:
if binning['phi']['nbins'] <= nbsplines:
msg = (
"The number of bins is too high:\n"
+ "\t- nbins = {}\n".format(binning['phi']['nbins'])
+ "\t- nbsplines = {}".format(nbsplines)
)
raise Exception(msg)
# --------------
# Check binning
for (dref, k0) in [(dlamb_ref, 'lamb'), (dphi_ref, 'phi')]:
if dref is not None:
di = np.mean(np.diff(binning[k0]['edges']))
if di < dref:
ni_rec = (
(domain[k0]['minmax'][1] - domain[k0]['minmax'][0]) / dref
)
msg = (
f"binning[{k0}] seems finer than the original!\n"
f"\t- estimated original step: {dref}\n"
f"\t- binning step: {di}\n"
f" => nb. of recommended steps: {ni_rec:5.1f}"
)
warnings.warn(msg)
return binning
def binning_2d_data(
lamb, phi, data,
indok=None,
indok_bool=None,
domain=None, binning=None,
nbsplines=None,
phi1d=None, lamb1d=None,
dataphi1d=None, datalamb1d=None,
):
# -------------------------
# Preliminary check on bins
dlamb_ref, dphi_ref = None, None
if lamb.ndim == 2:
indmid = int(lamb.shape[0]/2)
dlamb_ref = (np.max(lamb[indmid, :]) - np.min(lamb[indmid, :]))
dlamb_ref = dlamb_ref / lamb.shape[1]
indmid = int(lamb.shape[1]/2)
dphi_ref = (np.max(phi[:, indmid]) - np.min(phi[:, indmid]))
dphi_ref = dphi_ref / lamb.shape[0]
# ------------------
# Checkformat input
binning = _binning_check(
binning,
domain=domain,
dlamb_ref=dlamb_ref,
nbsplines=nbsplines,
)
nspect = data.shape[0]
if binning is False:
if phi1d is None:
phi1d_edges = np.linspace(
domain['phi']['minmax'][0], domain['phi']['minmax'][1], 100,
)
lamb1d_edges = np.linspace(
domain['lamb']['minmax'][0], domain['lamb']['minmax'][1], 100,
)
dataf = data.reshape((nspect, data.shape[1]*data.shape[2]))
dataphi1d = scpstats.binned_statistic(
phi.ravel(),
dataf,
statistic='sum',
bins=phi1d_edges,
)[0]
datalamb1d = scpstats.binned_statistic(
lamb.ravel(),
dataf,
statistic='sum',
bins=lamb1d_edges,
)[0]
phi1d = 0.5*(phi1d_edges[1:] + phi1d_edges[:-1])
lamb1d = 0.5*(lamb1d_edges[1:] + lamb1d_edges[:-1])
return (
lamb, phi, data, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
)
else:
nphi = binning['phi']['nbins']
nlamb = binning['lamb']['nbins']
bins = (binning['phi']['edges'], binning['lamb']['edges'])
# ------------------
# Compute
databin = np.full((nspect, nphi, nlamb), np.nan)
nperbin = np.full((nspect, nphi, nlamb), np.nan)
indok_new = np.zeros((nspect, nphi, nlamb), dtype=np.int8)
for ii in range(nspect):
databin[ii, ...] = scpstats.binned_statistic_2d(
phi[indok_bool[ii, ...]],
lamb[indok_bool[ii, ...]],
data[ii, indok_bool[ii, ...]],
statistic='mean', # Beware: for valid S/N use sum!
bins=bins,
range=None,
expand_binnumbers=True,
)[0]
nperbin[ii, ...] = scpstats.binned_statistic_2d(
phi[indok_bool[ii, ...]],
lamb[indok_bool[ii, ...]],
np.ones((indok_bool[ii, ...].sum(),), dtype=int),
statistic='sum',
bins=bins,
range=None,
expand_binnumbers=True,
)[0]
binning['nperbin'] = nperbin
lamb1d = 0.5*(
binning['lamb']['edges'][1:] + binning['lamb']['edges'][:-1]
)
phi1d = 0.5*(
binning['phi']['edges'][1:] + binning['phi']['edges'][:-1]
)
lambbin = np.repeat(lamb1d[None, :], nphi, axis=0)
phibin = np.repeat(phi1d[:, None], nlamb, axis=1)
# reconstructing indok
indok_new[np.isnan(databin)] = -1
indok_new[nperbin == 0] = -4
# dataphi1d
dataphi1d = np.full(databin.shape[:2], np.nan)
indok = ~np.all(np.isnan(databin), axis=2)
dataphi1d[indok] = np.nanmean(databin[indok, :], axis=-1)
datalamb1d = np.full(databin.shape[::2], np.nan)
indok = ~np.all(np.isnan(databin), axis=1)
datalamb1d[indok] = (
np.nanmean(databin.swapaxes(1, 2)[indok, :], axis=-1)
+ np.nanstd(databin.swapaxes(1, 2)[indok, :], axis=-1)
)
return (
lambbin, phibin, databin, indok_new, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
)
###########################################################
###########################################################
#
# dprepare dict
#
###########################################################
###########################################################
def _get_subset_indices(subset, indlogical):
if subset is None:
subset = _SUBSET
if subset is False:
return indlogical
c0 = (
(
isinstance(subset, np.ndarray)
and subset.shape == indlogical.shape
and 'bool' in subset.dtype.name
)
or (
type(subset) in [int, float, np.int_, np.float_]
and subset >= 0
)
)
if not c0:
msg = ("subset must be either:\n"
+ "\t- an array of bool of shape: {}\n".format(indlogical.shape)
+ "\t- a positive int (nb. of ind. to keep from indlogical)\n"
+ "You provided:\n{}".format(subset))
raise Exception(msg)
if isinstance(subset, np.ndarray):
indlogical = subset[None, ...] & indlogical
else:
subset = np.random.default_rng().choice(
indlogical.sum(),
size=int(indlogical.sum() - subset),
replace=False,
shuffle=False,
)
for ii in range(indlogical.shape[0]):
ind = indlogical[ii, ...].nonzero()
indlogical[ii, ind[0][subset], ind[1][subset]] = False
return indlogical
def _extract_lphi_spectra(
data, phi, lamb,
lphi=None, lphi_tol=None,
databin=None, binning=None, nlamb=None,
):
""" Extra several 1d spectra from 2d image at lphi """
# --------------
# Check input
if lphi is None:
lphi = False
if lphi is False:
lphi_tol = False
if lphi is not False:
lphi = np.atleast_1d(lphi).astype(float).ravel()
lphi_tol = float(lphi_tol)
if lphi is False:
return False, False
nphi = len(lphi)
# --------------
# Compute non-trivial cases
if binning is False:
if nlamb is None:
nlamb = lamb.shape[1]
lphi_lamb = np.linspace(lamb.min(), lamb.max(), nlamb+1)
lphi_spectra = np.full((data.shape[0], lphi_lamb.size-1, nphi), np.nan)
for ii in range(nphi):
indphi = np.abs(phi - lphi[ii]) < lphi_tol
lphi_spectra[:, ii, :] = scpstats.binned_statistic(
lamb[indphi], data[:, indphi], bins=lphi_lamb,
statistic='mean', range=None,
)[0]
else:
lphi_lamb = 0.5*(
binning['lamb']['edges'][1:] + binning['lamb']['edges'][:-1]
)
lphi_phi = 0.5*(
binning['phi']['edges'][1:] + binning['phi']['edges'][:-1]
)
lphi_spectra = np.full((data.shape[0], nphi, lphi_lamb.size), np.nan)
lphi_spectra1 = np.full((data.shape[0], nphi, lphi_lamb.size), np.nan)
for ii in range(nphi):
datai = databin[:, np.abs(lphi_phi - lphi[ii]) < lphi_tol, :]
iok = np.any(~np.isnan(datai), axis=1)
for jj in range(datai.shape[0]):
if np.any(iok[jj, :]):
lphi_spectra[jj, ii, iok[jj, :]] = np.nanmean(
datai[jj, :, iok[jj, :]],
axis=1,
)
return lphi_spectra, lphi_lamb
def _checkformat_possubset(pos=None, subset=None):
if pos is None:
pos = _POS
c0 = isinstance(pos, bool) or type(pos) in _LTYPES
if not c0:
msg = ("Arg pos must be either:\n"
+ "\t- False: no positivity constraints\n"
+ "\t- True: all negative values are set to nan\n"
+ "\t- float: all negative values are set to pos")
raise Exception(msg)
if subset is None:
subset = _SUBSET
return pos, subset
def multigausfit1d_from_dlines_prepare(
data=None, lamb=None,
mask=None, domain=None,
pos=None, subset=None,
update_domain=None,
):
# --------------
# Check input
pos, subset = _checkformat_possubset(pos=pos, subset=subset)
# Check shape of data (multiple time slices possible)
lamb, data, mask = _checkformat_data_fit12d_dlines(
data, lamb, mask=mask,
)
# --------------
# Use valid data only and optionally restrict lamb
indok = np.zeros(data.shape, dtype=np.int8)
if mask is not None:
indok[:, ~mask] = -1
inddomain, domain = apply_domain(lamb, domain=domain)
if mask is not None:
indok[:, (~inddomain) & mask] = -2
else:
indok[:, ~inddomain] = -2
# Optional positivity constraint
if pos is not False:
if pos is True:
data[data < 0.] = np.nan
else:
data[data < 0.] = pos
indok[(indok == 0) & np.isnan(data)] = -3
# Recompute domain
indok_bool = indok == 0
if update_domain is None:
update_domain = bool(np.any(np.isinf(domain['lamb']['minmax'])))
if update_domain is True:
domain['lamb']['minmax'] = [
np.nanmin(lamb[np.any(indok_bool, axis=0)]),
np.nanmax(lamb[np.any(indok_bool, axis=0)]),
]
# --------------
# Optionally fit only on subset
# randomly pick subset indices (replace=False => no duplicates)
# indok = _get_subset_indices(subset, indok)
if np.any(np.isnan(data[indok_bool])):
msg = (
"Some NaNs in data not caught by indok!"
)
raise Exception(msg)
if np.sum(indok_bool) == 0:
msg = "There does not seem to be any usable data (no indok)"
raise Exception(msg)
# --------------
# Return
dprepare = {
'data': data,
'lamb': lamb,
'domain': domain,
'indok': indok,
'indok_bool': indok_bool,
'dindok': dict(_DINDOK),
'pos': pos,
'subset': subset,
}
return dprepare
def multigausfit2d_from_dlines_prepare(
data=None, lamb=None, phi=None,
mask=None, domain=None,
update_domain=None,
pos=None, binning=None,
nbsplines=None, deg=None, subset=None,
nxi=None, nxj=None,
lphi=None, lphi_tol=None,
):
# --------------
# Check input
pos, subset = _checkformat_possubset(pos=pos, subset=subset)
# Check shape of data (multiple time slices possible)
(
lamb, phi, data, mask,
phi1d, lamb1d, dataphi1d, datalamb1d,
) = _checkformat_data_fit12d_dlines(
data, lamb, phi,
nxi=nxi, nxj=nxj, mask=mask, is2d=True,
)
# --------------
# Use valid data only and optionally restrict lamb / phi
indok = np.zeros(data.shape, dtype=np.int8)
if mask is not None:
indok[:, ~mask] = -1
inddomain, domain = apply_domain(lamb, phi, domain=domain)
if mask is not None:
indok[:, (~inddomain) & mask] = -2
else:
indok[:, ~inddomain] = -2
# Optional positivity constraint
if pos is not False:
if pos is True:
data[data < 0.] = np.nan
else:
data[data < 0.] = pos
# Introduce time-dependence (useful for valid)
indok[(indok == 0) & np.isnan(data)] = -3
# Recompute domain
indok_bool = indok == 0
if not np.any(indok_bool):
msg = "No valid point in data!"
raise Exception(msg)
if update_domain is None:
update_domain = bool(
np.any(np.isinf(domain['lamb']['minmax']))
or np.any(np.isinf(domain['phi']['minmax']))
)
if update_domain is True:
domain['lamb']['minmax'] = [
np.nanmin(lamb[np.any(indok_bool, axis=0)]),
np.nanmax(lamb[np.any(indok_bool, axis=0)]),
]
domain['phi']['minmax'] = [
np.nanmin(phi[np.any(indok_bool, axis=0)]),
np.nanmax(phi[np.any(indok_bool, axis=0)]),
]
# --------------
# Optionnal 2d binning
(
lambbin, phibin, databin, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
) = binning_2d_data(
lamb, phi, data,
indok=indok,
indok_bool=indok_bool,
binning=binning,
domain=domain,
nbsplines=nbsplines,
phi1d=phi1d, lamb1d=lamb1d,
dataphi1d=dataphi1d, datalamb1d=datalamb1d,
)
indok_bool = indok == 0
# --------------
# Optionally fit only on subset
# randomly pick subset indices (replace=False => no duplicates)
# indok_bool = _get_subset_indices(subset, indok == 0)
# --------------
# Optionally extract 1d spectra at lphi
lphi_spectra, lphi_lamb = _extract_lphi_spectra(
data, phi, lamb,
lphi, lphi_tol,
databin=databin,
binning=binning,
)
if np.sum(indok_bool) == 0:
msg = "There does not seem to be any usable data (no indok)"
raise Exception(msg)
# --------------
# Return
dprepare = {
'data': databin, 'lamb': lambbin, 'phi': phibin,
'domain': domain, 'binning': binning,
'indok': indok, 'indok_bool': indok_bool, 'dindok': dict(_DINDOK),
'pos': pos, 'subset': subset, 'nxi': nxi, 'nxj': nxj,
'lphi': lphi, 'lphi_tol': lphi_tol,
'lphi_spectra': lphi_spectra, 'lphi_lamb': lphi_lamb,
'phi1d': phi1d, 'dataphi1d': dataphi1d,
'lamb1d': lamb1d, 'datalamb1d': datalamb1d,
}
return dprepare
def multigausfit2d_from_dlines_dbsplines(
knots=None, deg=None, nbsplines=None,
phimin=None, phimax=None,
symmetryaxis=None,
):
# Check / format input
if nbsplines is None:
nbsplines = _NBSPLINES
c0 = [nbsplines is False, isinstance(nbsplines, int)]
if not any(c0):
msg = "nbsplines must be a int (degree of bsplines to be used!)"
raise Exception(msg)
if nbsplines is False:
lk = ['knots', 'knots_mult', 'nknotsperbs', 'ptsx0', 'nbs', 'deg']
return dict.fromkeys(lk, False)
if deg is None:
deg = _DEG
if not (isinstance(deg, int) and deg <= 3):
msg = "deg must be a int <= 3 (the degree of the bsplines to be used!)"
raise Exception(msg)
if symmetryaxis is None:
symmetryaxis = False
if knots is None:
if phimin is None or phimax is None:
msg = "Please provide phimin and phimax if knots is not provided!"
raise Exception(msg)
phimargin = (phimax - phimin)/1000.
if symmetryaxis is False:
knots = np.linspace(
phimin - phimargin,
phimax + phimargin,
nbsplines + 1 - deg,
)
else:
phi2max = np.max(
np.abs(np.r_[phimin, phimax][None, :] - symmetryaxis[:, None])
)
knots = np.linspace(0, phi2max + phimargin, nbsplines + 1 - deg)
if not np.allclose(knots, np.unique(knots)):
msg = "knots must be a vector of unique values!"
raise Exception(msg)
# Get knots for scipy (i.e.: with multiplicity)
if deg > 0:
knots_mult = np.r_[[knots[0]]*deg, knots, [knots[-1]]*deg]
else:
knots_mult = knots
nknotsperbs = 2 + deg
nbs = knots.size - 1 + deg
assert nbs == knots_mult.size - 1 - deg
if deg == 0:
ptsx0 = 0.5*(knots[:-1] + knots[1:])
elif deg == 1:
ptsx0 = knots
elif deg == 2:
num = (knots_mult[3:]*knots_mult[2:-1]
- knots_mult[1:-2]*knots_mult[:-3])
denom = (knots_mult[3:] + knots_mult[2:-1]
- knots_mult[1:-2] - knots_mult[:-3])
ptsx0 = num / denom
else:
# To be derived analytically for more accuracy
ptsx0 = np.r_[
knots[0],
np.mean(knots[:2]),
knots[1:-1],
np.mean(knots[-2:]),
knots[-1],
]
msg = ("degree 3 not fully implemented yet!"
+ "Approximate values for maxima positions")
warnings.warn(msg)
assert ptsx0.size == nbs
dbsplines = {
'knots': knots, 'knots_mult': knots_mult,
'nknotsperbs': nknotsperbs, 'ptsx0': ptsx0,
'nbs': nbs, 'deg': deg,
}
return dbsplines
###########################################################
###########################################################
#
# dvalid dict (S/N ratio)
#
###########################################################
###########################################################
def _dvalid_checkfocus_errmsg(focus=None, focus_half_width=None,
lines_keys=None):
msg = ("Please provide focus as:\n"
+ "\t- str: the key of an available spectral line:\n"
+ "\t\t{}\n".format(lines_keys)
+ "\t- float: a wavelength value\n"
+ "\t- a list / tuple / flat np.ndarray of such\n"
+ "\t- a np.array of shape (2, N) or (N, 2) (focus + halfwidth)"
+ " You provided:\n"
+ "{}\n\n".format(focus)
+ "Please provide focus_half_width as:\n"
+ "\t- float: a unique wavelength value for all focus\n"
+ "\t- a list / tuple / flat np.ndarray of such\n"
+ " You provided:\n"
+ "{}".format(focus_half_width))
return msg
def _dvalid_checkfocus(
focus=None,
focus_half_width=None,
lines_keys=None,
lines_lamb=None,
lamb=None,
):
""" Check the provided focus is properly formatted and convert it
focus specifies the wavelength range of interest in which S/N is evaluated
It can be provided as:
- a spectral line key (or list of such)
- a wavelength (or list of such)
For each wavelength, a spectral range centered on it, is defined using
the provided focus_half_width
The focus_half_width can be a unique value applied to all or a list of
values of the same length as focus.
focus is then return as a (n, 2) array where:
each line gives a central wavelength and halfwidth of interest
"""
if focus in [None, False]:
return False
# Check focus and transform to array of floats
if isinstance(focus, tuple([str] + _LTYPES)):
focus = [focus]
lc = [
isinstance(focus, (list, tuple, np.ndarray))
and all([
(isinstance(ff, tuple(_LTYPES)) and ff > 0.)
or (isinstance(ff, str) and ff in lines_keys)
for ff in focus
]),
isinstance(focus, (list, tuple, np.ndarray))
and all([
isinstance(ff, (list, tuple, np.ndarray))
for ff in focus
])
and np.asarray(focus).ndim == 2
and 2 in np.asarray(focus).shape
and np.all(np.isfinite(focus))
and np.all(np.asarray(focus) > 0)
]
if not any(lc):
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
# Centered on lines
if lc[0]:
focus = np.array([
lines_lamb[(lines_keys == ff).nonzero()[0][0]]
if isinstance(ff, str) else ff for ff in focus
])
# Check focus_half_width and transform to array of floats
if focus_half_width is None:
focus_half_width = (np.nanmax(lamb) - np.nanmin(lamb))/10.
lc0 = [
type(focus_half_width) in _LTYPES,
(
type(focus_half_width) in [list, tuple, np.ndarray]
and len(focus_half_width) == focus.size
and all([type(fhw) in _LTYPES for fhw in focus_half_width])
)
]
if not any(lc0):
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
if lc0[0] is True:
focus_half_width = np.full((focus.size,), focus_half_width)
focus = np.array([focus, np.r_[focus_half_width]]).T
elif lc[1]:
focus = np.asarray(focus, dtype=float)
if focus.shape[1] != 2:
focus = focus.T
return focus
def fit12d_dvalid(
data=None, lamb=None, phi=None,
indok_bool=None, binning=None,
valid_nsigma=None, valid_fraction=None,
focus=None, focus_half_width=None,
lines_keys=None, lines_lamb=None, dphimin=None,
nbs=None, deg=None,
knots=None, knots_mult=None, nknotsperbs=None,
return_fract=None,
):
""" Return a dict of valid time steps and phi indices
data points are considered valid if there signal is sufficient:
np.sqrt(data) >= valid_nsigma
data is supposed to be provided in counts (or photons).. TBC!!!
"""
# Check inputs
if valid_nsigma is None:
valid_nsigma = _VALID_NSIGMA
if valid_fraction is None:
valid_fraction = _VALID_FRACTION
if binning is None:
binning = False
if dphimin is None:
dphimin = 0.
if return_fract is None:
return_fract = False
data2d = data.ndim == 3
nspect = data.shape[0]
focus = _dvalid_checkfocus(
focus=focus,
focus_half_width=focus_half_width,
lines_keys=lines_keys,
lines_lamb=lines_lamb,
lamb=lamb,
)
# Get indices of pts with enough signal
ind = np.zeros(data.shape, dtype=bool)
isafe = np.isfinite(data)
isafe[isafe] = data[isafe] >= 0.
if indok_bool is not None:
isafe &= indok_bool
# Ok with and w/o binning if data provided as counts
if binning is False:
ind[isafe] = np.sqrt(data[isafe]) > valid_nsigma
else:
# For S/N in binning, if counts => sum = mean * nbperbin
ind[isafe] = (
np.sqrt(data[isafe] * binning['nperbin'][isafe]) > valid_nsigma
)
# Derive indt and optionally dphi and indknots
indbs, ldphi = False, False
if focus is False:
lambok = np.ones(tuple(np.r_[lamb.shape, 1]), dtype=bool)
indall = ind[..., None]
else:
# TBC
lambok = np.rollaxis(
np.array([np.abs(lamb - ff[0]) < ff[1] for ff in focus]),
0,
lamb.ndim + 1,
)
indall = ind[..., None] & lambok[None, ...]
nfocus = lambok.shape[-1]
if data2d is True:
# Code ok with and without binning :-)
# Get knots intervals that are ok
fract = np.full((nspect, knots.size-1, nfocus), np.nan)
for ii in range(knots.size - 1):
iphi = (phi >= knots[ii]) & (phi < knots[ii + 1])
fract[:, ii, :] = (
np.sum(np.sum(indall & iphi[None, ..., None],
axis=1), axis=1)
/ np.sum(np.sum(iphi[..., None] & lambok,
axis=0), axis=0)
)
indknots = np.all(fract > valid_fraction, axis=2)
# Deduce ldphi
ldphi = [[] for ii in range(nspect)]
for ii in range(nspect):
for jj in range(indknots.shape[1]):
if indknots[ii, jj]:
if jj == 0 or not indknots[ii, jj-1]:
ldphi[ii].append([knots[jj]])
if jj == indknots.shape[1] - 1:
ldphi[ii][-1].append(knots[jj+1])
else:
if jj > 0 and indknots[ii, jj-1]:
ldphi[ii][-1].append(knots[jj])
# Safety check
assert all([
all([len(dd) == 2 and dd[0] < dd[1] for dd in ldphi[ii]])
for ii in range(nspect)
])
# Deduce indbs that are ok
nintpbs = nknotsperbs - 1
indbs = np.zeros((nspect, nbs), dtype=bool)
for ii in range(nbs):
ibk = np.arange(max(0, ii-(nintpbs-1)), min(knots.size-1, ii+1))
indbs[:, ii] = np.any(indknots[:, ibk], axis=1)
assert np.all(
(np.sum(indbs, axis=1) == 0) | (np.sum(indbs, axis=1) >= deg + 1)
)
# Deduce indt
indt = np.any(indbs, axis=1)
else:
# 1d spectra
if focus is False:
fract = ind.sum(axis=-1) / ind.shape[1]
indt = fract > valid_fraction
else:
fract = np.sum(indall, axis=1) / lambok.sum(axis=0)[None, :]
indt = np.all(fract > valid_fraction, axis=1)
# Optional debug
if focus is not False and False:
indt_debug, ifocus = 40, 1
if data2d is True:
indall2 = indall.astype(int)
indall2[:, lambok] = 1
indall2[ind[..., None] & lambok[None, ...]] = 2
plt.figure()
plt.imshow(indall2[indt_debug, :, :, ifocus].T, origin='lower')
else:
plt.figure()
plt.plot(lamb[~indall[indt_debug, :, ifocus]],
data[indt_debug, ~indall[indt_debug, :, ifocus]], '.k',
lamb[indall[indt_debug, :, ifocus]],
data[indt_debug, indall[indt_debug, :, ifocus]], '.r')
plt.axvline(focus[ifocus, 0], ls='--', c='k')
if not np.any(indt):
msg = (
"\nThere is no valid time step with the provided constraints:\n"
+ "\t- valid_nsigma = {}\n".format(valid_nsigma)
+ "\t- valid_fraction = {}\n".format(valid_fraction)
+ "\t- focus = {}\n".format(focus)
+ f"\t- fract max, mean = {np.max(fract), np.mean(fract)}\n"
+ "\t- fract = {}\n".format(fract)
)
raise Exception(msg)
# return
dvalid = {
'indt': indt, 'ldphi': ldphi, 'indbs': indbs, 'ind': ind,
'focus': focus, 'valid_fraction': valid_fraction,
'valid_nsigma': valid_nsigma,
}
if return_fract is True:
dvalid['fract'] = fract
return dvalid
###########################################################
###########################################################
#
# dlines dict (lines vs domain)
#
###########################################################
###########################################################
def _checkformat_dlines(dlines=None, domain=None):
if dlines is None:
dlines = False
if not isinstance(dlines, dict):
msg = "Arg dlines must be a dict!"
raise Exception(msg)
lc = [
(k0, type(v0)) for k0, v0 in dlines.items()
if not (
isinstance(k0, str)
and isinstance(v0, dict)
and 'lambda0' in v0.keys()
and (
type(v0['lambda0']) in _LTYPES
or (
isinstance(v0['lambda0'], np.ndarray)
and v0['lambda0'].size == 1
)
)
)
]
if len(lc) > 0:
lc = ["\t- {}: {}".format(*cc) for cc in lc]
msg = (
"Arg dlines must be a dict of the form:\n"
+ "\t{'line0': {'lambda0': float},\n"
+ "\t 'line1': {'lambda0': float},\n"
+ "\t ...\n"
+ "\t 'lineN': {'lambda0': float}}\n"
+ " You provided:\n{}".format('\n'.join(lc))
)
raise Exception(msg)
# Select relevant lines (keys, lamb)
lines_keys = np.array([k0 for k0 in dlines.keys()])
lines_lamb = np.array([float(dlines[k0]['lambda0']) for k0 in lines_keys])
if domain not in [None, False]:
ind = np.zeros((len(lines_keys),), dtype=bool)
for ss in domain['lamb']['spec']:
if isinstance(ss, (list, np.ndarray)):
ind[(lines_lamb >= ss[0]) & (lines_lamb < ss[1])] = True
for ss in domain['lamb']['spec']:
if isinstance(ss, tuple):
ind[(lines_lamb >= ss[0]) & (lines_lamb < ss[1])] = False
lines_keys = lines_keys[ind]
lines_lamb = lines_lamb[ind]
inds = np.argsort(lines_lamb)
lines_keys, lines_lamb = lines_keys[inds], lines_lamb[inds]
nlines = lines_lamb.size
dlines = {k0: dict(dlines[k0]) for k0 in lines_keys}
# Warning if no lines left
if len(lines_keys) == 0:
msg = "There seems to be no lines left!"
warnings.warn(msg)
return dlines, lines_keys, lines_lamb
###########################################################
###########################################################
#
# dinput dict (lines + spectral constraints)
#
###########################################################
###########################################################
def fit1d_dinput(
dlines=None, dconstraints=None, dconstants=None, dprepare=None,
data=None, lamb=None, mask=None,
domain=None, pos=None, subset=None,
update_domain=None,
same_spectrum=None, nspect=None, same_spectrum_dlamb=None,
focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None,
valid_return_fract=None,
dscales=None, dx0=None, dbounds=None,
defconst=_DCONSTRAINTS,
):
""" Check and format a dict of inputs to be fed to fit1d()
This dict will contain all information relevant for solving the fit:
- dlines: dict of lines (with 'lambda0': wavelength at rest)
- lamb: vector of wavelength of the experimental spectrum
- data: experimental spectrum, possibly 2d (time-varying)
- dconstraints: dict of constraints on lines (amp, width, shift)
- pos: bool, consider only positive data (False => replace <0 with nan)
- domain:
- mask:
- subset:
- same_spectrum:
- focus:
"""
# ------------------------
# Check / format dprepare
# ------------------------
if dprepare is None:
dprepare = multigausfit1d_from_dlines_prepare(
data=data, lamb=lamb,
mask=mask, domain=domain,
pos=pos, subset=subset,
update_domain=update_domain,
)
# ------------------------
# Check / format dlines
# ------------------------
dlines, lines_keys, lines_lamb = _checkformat_dlines(
dlines=dlines,
domain=dprepare['domain'],
)
nlines = lines_lamb.size
# Check same_spectrum
if same_spectrum is None:
same_spectrum = _SAME_SPECTRUM
if same_spectrum is True:
if type(nspect) not in [int, np.int]:
msg = "Please provide nspect if same_spectrum = True"
raise Exception(msg)
if same_spectrum_dlamb is None:
same_spectrum_dlamb = min(
2*np.diff(dprepare['domain']['lamb']['minmax']),
dprepare['domain']['lamb']['minmax'][0],
)
# ------------------------
# Check / format dconstraints
# ------------------------
dconstraints = _checkformat_dconstraints(
dconstraints=dconstraints, defconst=defconst,
)
dinput = {}
# ------------------------
# Check / format double
# ------------------------
_dconstraints_double(dinput, dconstraints, defconst=defconst)
# ------------------------
# Check / format width, shift, amp (groups with possible ratio)
# ------------------------
for k0 in ['amp', 'width', 'shift']:
dinput[k0] = _width_shift_amp(
dconstraints.get(k0, defconst[k0]),
dconstants=dconstants,
keys=lines_keys, nlines=nlines,
dlines=dlines, k0=k0,
)
# ------------------------
# add mz, symb, ION, keys, lamb
# ------------------------
mz = np.array([dlines[k0].get('m', np.nan) for k0 in lines_keys])
symb = np.array([dlines[k0].get('symbol', k0) for k0 in lines_keys])
ion = np.array([dlines[k0].get('ion', '?') for k0 in lines_keys])
# ------------------------
# same_spectrum
# ------------------------
if same_spectrum is True:
keysadd = np.array([[kk+'_bis{:04.0f}'.format(ii) for kk in keys]
for ii in range(1, nspect)]).ravel()
lines_lamb = (
same_spectrum_dlamb*np.arange(0, nspect)[:, None]
+ lines_lamb[None, :]
)
keys = np.r_[keys, keysadd]
for k0 in _DORDER:
# Add other lines to original group
keyk = dinput[k0]['keys']
offset = np.tile(dinput[k0]['offset'], nspect)
if k0 == 'shift':
ind = np.tile(dinput[k0]['ind'], (1, nspect))
coefs = (
dinput[k0]['coefs']
* lines_lamb[0, :] / lines_lamb
).ravel()
else:
coefs = np.tile(dinput[k0]['coefs'], nspect)
keysadd = np.array([
[kk+'_bis{:04.0f}'.format(ii) for kk in keyk]
for ii in range(1, nspect)
]).ravel()
ind = np.zeros((keyk.size*nspect, nlines*nspect))
for ii in range(nspect):
i0, i1 = ii*keyk.size, (ii+1)*keyk.size
j0, j1 = ii*nlines, (ii+1)*nlines
ind[i0:i1, j0:j1] = dinput[k0]['ind']
keyk = np.r_[keyk, keysadd]
dinput[k0]['keys'] = keyk
dinput[k0]['ind'] = ind
dinput[k0]['coefs'] = coefs
dinput[k0]['offset'] = offset
nlines *= nspect
lines_lamb = lines_lamb.ravel()
# update mz, symb, ion
mz = np.tile(mz, nspect)
symb = np.tile(symb, nspect)
ion = np.tile(ion, nspect)
# ------------------------
# add lines and properties
# ------------------------
dinput['keys'] = lines_keys
dinput['lines'] = lines_lamb
dinput['nlines'] = nlines
dinput['mz'] = mz
dinput['symb'] = symb
dinput['ion'] = ion
dinput['same_spectrum'] = same_spectrum
if same_spectrum is True:
dinput['same_spectrum_nspect'] = nspect
dinput['same_spectrum_dlamb'] = same_spectrum_dlamb
else:
dinput['same_spectrum_nspect'] = False
dinput['same_spectrum_dlamb'] = False
# ------------------------
# S/N threshold indices
# ------------------------
dinput['valid'] = fit12d_dvalid(
data=dprepare['data'],
lamb=dprepare['lamb'],
indok_bool=dprepare['indok_bool'],
valid_nsigma=valid_nsigma,
valid_fraction=valid_fraction,
focus=focus, focus_half_width=focus_half_width,
lines_keys=lines_keys, lines_lamb=lines_lamb,
return_fract=valid_return_fract,
)
# Update with dprepare
dinput['dprepare'] = dict(dprepare)
# Add dind
dinput['dind'] = multigausfit12d_from_dlines_ind(dinput)
# Add dscales, dx0 and dbounds
dinput['dscales'] = fit12d_dscales(dscales=dscales, dinput=dinput)
dinput['dbounds'] = fit12d_dbounds(dbounds=dbounds, dinput=dinput)
dinput['dx0'] = fit12d_dx0(dx0=dx0, dinput=dinput)
dinput['dconstants'] = fit12d_dconstants(
dconstants=dconstants, dinput=dinput,
)
# add lambmin for bck
dinput['lambmin_bck'] = np.min(dprepare['lamb'])
return dinput
def fit2d_dinput(
dlines=None, dconstraints=None, dconstants=None, dprepare=None,
deg=None, nbsplines=None, knots=None,
data=None, lamb=None, phi=None, mask=None,
domain=None, pos=None, subset=None, binning=None, cent_fraction=None,
update_domain=None,
focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None,
valid_return_fract=None,
dscales=None, dx0=None, dbounds=None,
nxi=None, nxj=None,
lphi=None, lphi_tol=None,
defconst=_DCONSTRAINTS,
):
""" Check and format a dict of inputs to be fed to fit2d()
This dict will contain all information relevant for solving the fit:
- dlines: dict of lines (with 'lambda0': wavelength at rest)
- lamb: vector of wavelength of the experimental spectrum
- data: experimental spectrum, possibly 2d (time-varying)
- dconstraints: dict of constraints on lines (amp, width, shift)
- pos: bool, consider only positive data (False => replace <0 with nan)
- domain:
- mask:
- subset:
- same_spectrum:
- focus:
"""
# ------------------------
# Check / format dprepare
# ------------------------
if dprepare is None:
dprepare = multigausfit2d_from_dlines_prepare(
data=data, lamb=lamb, phi=phi,
mask=mask, domain=domain,
pos=pos, subset=subset, binning=binning,
update_domain=update_domain,
nbsplines=nbsplines, deg=deg,
nxi=nxi, nxj=nxj,
lphi=None, lphi_tol=None,
)
# ------------------------
# Check / format dlines
# ------------------------
dlines, lines_keys, lines_lamb = _checkformat_dlines(
dlines=dlines,
domain=dprepare['domain'],
)
nlines = lines_lamb.size
# ------------------------
# Check / format dconstraints
# ------------------------
dconstraints = _checkformat_dconstraints(
dconstraints=dconstraints, defconst=defconst,
)
dinput = {}
# ------------------------
# Check / format symmetry
# ------------------------
_dconstraints_symmetry(
dinput, dprepare=dprepare, symmetry=dconstraints.get('symmetry'),
cent_fraction=cent_fraction, defconst=defconst,
)
# ------------------------
# Check / format double (spectral line doubling)
# ------------------------
_dconstraints_double(dinput, dconstraints, defconst=defconst)
# ------------------------
# Check / format width, shift, amp (groups with posssible ratio)
# ------------------------
for k0 in ['amp', 'width', 'shift']:
dinput[k0] = _width_shift_amp(
dconstraints.get(k0, defconst[k0]),
dconstants=dconstants,
keys=lines_keys, nlines=nlines,
dlines=dlines, k0=k0,
)
# ------------------------
# add mz, symb, ION, keys, lamb
# ------------------------
mz = np.array([dlines[k0].get('m', np.nan) for k0 in lines_keys])
symb = np.array([dlines[k0].get('symbol', k0) for k0 in lines_keys])
ion = np.array([dlines[k0].get('ION', '?') for k0 in lines_keys])
# ------------------------
# add lines and properties
# ------------------------
dinput['keys'] = lines_keys
dinput['lines'] = lines_lamb
dinput['nlines'] = nlines
dinput['mz'] = mz
dinput['symb'] = symb
dinput['ion'] = ion
# ------------------------
# Get dict of bsplines
# ------------------------
dinput.update(multigausfit2d_from_dlines_dbsplines(
knots=knots, deg=deg, nbsplines=nbsplines,
phimin=dprepare['domain']['phi']['minmax'][0],
phimax=dprepare['domain']['phi']['minmax'][1],
symmetryaxis=dinput.get('symmetry_axis')
))
# ------------------------
# S/N threshold indices
# ------------------------
dinput['valid'] = fit12d_dvalid(
data=dprepare['data'],
lamb=dprepare['lamb'],
phi=dprepare['phi'],
binning=dprepare['binning'],
indok_bool=dprepare['indok_bool'],
valid_nsigma=valid_nsigma,
valid_fraction=valid_fraction,
focus=focus, focus_half_width=focus_half_width,
lines_keys=lines_keys, lines_lamb=lines_lamb,
nbs=dinput['nbs'],
deg=dinput['deg'],
knots=dinput['knots'],
knots_mult=dinput['knots_mult'],
nknotsperbs=dinput['nknotsperbs'],
return_fract=valid_return_fract,
)
# Update with dprepare
dinput['dprepare'] = dict(dprepare)
# Add dind
dinput['dind'] = multigausfit12d_from_dlines_ind(dinput)
# Add dscales, dx0 and dbounds
dinput['dscales'] = fit12d_dscales(dscales=dscales, dinput=dinput)
dinput['dbounds'] = fit12d_dbounds(dbounds=dbounds, dinput=dinput)
dinput['dx0'] = fit12d_dx0(dx0=dx0, dinput=dinput)
dinput['dconstants'] = fit12d_dconstants(
dconstants=dconstants, dinput=dinput,
)
# Update indok with non-valid phi
# non-valid = ok but out of dphi
for ii in range(dinput['dprepare']['indok'].shape[0]):
iphino = dinput['dprepare']['indok'][ii, ...] == 0
for jj in range(len(dinput['valid']['ldphi'][ii])):
iphino &= (
(
dinput['dprepare']['phi']
< dinput['valid']['ldphi'][ii][jj][0]
)
| (
dinput['dprepare']['phi']
>= dinput['valid']['ldphi'][ii][jj][1]
)
)
# valid, but excluded (out of dphi)
iphi = (
(dinput['dprepare']['indok'][ii, ...] == 0)
& (dinput['valid']['ind'][ii, ...])
& (iphino)
)
dinput['dprepare']['indok'][ii, iphi] = -5
# non-valid, included (in dphi)
iphi = (
(dinput['dprepare']['indok'][ii, ...] == 0)
& (~dinput['valid']['ind'][ii, ...])
& (~iphino)
)
dinput['dprepare']['indok'][ii, iphi] = -6
# non-valid, excluded (out of dphi)
iphi = (
(dinput['dprepare']['indok'][ii, ...] == 0)
& (~dinput['valid']['ind'][ii, ...])
& (iphino)
)
dinput['dprepare']['indok'][ii, iphi] = -7
# indok_bool True if indok == 0 or -5 (because ...)
dinput['dprepare']['indok_bool'] = (
(dinput['dprepare']['indok'] == 0)
| (dinput['dprepare']['indok'] == -6)
)
# add lambmin for bck
dinput['lambmin_bck'] = np.min(dinput['dprepare']['lamb'])
return dinput
###########################################################
###########################################################
#
# dind dict (indices storing for fast access)
#
###########################################################
###########################################################
def multigausfit12d_from_dlines_ind(dinput=None):
""" Return the indices of quantities in x to compute y """
# indices
# General shape: [bck, amp, widths, shifts]
# If double [..., double_shift, double_ratio]
# Except for bck, all indices should render nlines (2*nlines if double)
nbs = dinput.get('nbs', 1)
dind = {
'bck_amp': {'x': np.arange(0, nbs)[:, None]},
'bck_rate': {'x': np.arange(nbs, 2*nbs)[:, None]},
'dshift': None,
'dratio': None,
}
nn = dind['bck_amp']['x'].size + dind['bck_rate']['x'].size
inddratio, inddshift = None, None
for k0 in _DORDER:
# l0bs0, l0bs1, ..., l0bsN, l1bs0, ...., lnbsN
ind = dinput[k0]['ind']
lnl = np.sum(ind, axis=1).astype(int)
dind[k0] = {
'x': (
nn
+ nbs*np.arange(0, ind.shape[0])[None, :]
+ np.arange(0, nbs)[:, None]
),
'lines': (
nn
+ nbs*np.argmax(ind, axis=0)[None, :]
+ np.arange(0, nbs)[:, None]
),
# TBF / TBC !!!
'jac': [ind[ii, :].nonzero()[0] for ii in range(ind.shape[0])],
}
nn += dind[k0]['x'].size
sizex = dind['shift']['x'][-1, -1] + 1
nvar_bs = 2 + np.sum([dinput[k0]['ind'].shape[0] for k0 in _DORDER])
indx = np.r_[
dind['bck_amp']['x'].ravel(order='F'),
dind['bck_rate']['x'].ravel(order='F'),
dind['amp']['x'].ravel(order='F'),
dind['width']['x'].ravel(order='F'),
dind['shift']['x'].ravel(order='F'),
]
assert np.allclose(np.arange(0, sizex), indx)
assert nvar_bs == sizex / nbs
# check if double
if dinput['double'] is True:
dind['dshift'] = {'x': np.r_[-2][:, None]}
dind['dratio'] = {'x': np.r_[-1][:, None]}
sizex += 2
elif isinstance(dinput['double'], dict):
if dinput['double'].get('dshift') is None:
dind['dshift'] = {'x': np.r_[-1][:, None]}
sizex += 1
elif dinput['double'].get('dratio') is None:
dind['dratio'] = {'x': np.r_[-1][:, None]}
sizex += 1
dind['nvar_bs'] = nvar_bs # nb of spectral variable with bs dependence
dind['sizex'] = sizex
dind['nbck'] = 2
# Ref line for amp (for x0)
# TBC !!!
amp_x0 = np.zeros((dinput['amp']['ind'].shape[0],), dtype=int)
for ii in range(dinput['amp']['ind'].shape[0]):
indi = dinput['amp']['ind'][ii, :].nonzero()[0]
if indi.size == 0:
import pdb; pdb.set_trace() # DB
amp_x0[ii] = indi[np.argmin(np.abs(dinput['amp']['coefs'][indi]-1.))]
dind['amp_x0'] = amp_x0
# Make bsplines selections easy
# if dinput['valid']['dphi'] is not False:
# dind['bs']['x'] =
# import pdb; pdb.set_trace() # DB
# pass
return dind
###########################################################
###########################################################
#
# Common checks and format for scales, x0, bounds
#
###########################################################
###########################################################
def _fit12d_checkformat_dscalesx0(
din=None, dinput=None,
name=None, is2d=False,
):
lkconst = ['dratio', 'dshift']
lk = ['bck_amp', 'bck_rate']
lkdict = _DORDER
if din is None:
din = {}
if not isinstance(din, dict):
msg = f"Arg {name} must be a dict!"
raise Exception(msg)
lkfalse = [
k0 for k0, v0 in din.items()
if not (
(k0 in lkconst and type(v0) in _LTYPES)
or (k0 in lk and type(v0) in _LTYPES + [np.ndarray])
or (
k0 in lkdict
and type(v0) in _LTYPES + [np.ndarray]
or (
isinstance(v0, dict)
and all([
k1 in dinput[k0]['keys']
and type(v1) in _LTYPES + [np.ndarray]
for k1, v1 in v0.items()
])
)
)
)
]
if len(lkfalse) > 0:
msg = (
f"Arg {name} must be a dict of the form:\n"
+ "\t- {}\n".format({
kk: 'float' if kk in lkconst+lk
else {k1: 'float' for k1 in dinput[kk]['keys']}
for kk in lkfalse
})
+ "\t- provided: {}".format({
kk: din[kk] for kk in lkfalse
})
)
raise Exception(msg)
return {
k0: dict(v0) if isinstance(v0, dict) else v0
for k0, v0 in din.items()
}
def _fit12d_filldef_dscalesx0_dict(
din=None, din_name=None,
key=None, vref=None,
nspect=None, dinput=None,
):
# Check vref
if vref is not None:
if type(vref) not in _LTYPES and len(vref) not in [1, nspect]:
msg = (
"Non-conform vref for "
+ "{}['{}']\n".format(din_name, key)
+ "\t- expected: float or array (size {})\n".format(nspect)
+ "\t- provided: {}".format(vref)
)
raise Exception(msg)
if type(vref) in _LTYPES:
vref = np.full((nspect,), vref)
elif len(vref) == 1:
vref = np.full((nspect,), vref[0])
# check din[key]
if din.get(key) is None:
assert vref is not None
din[key] = {k0: vref for k0 in dinput[key]['keys']}
elif not isinstance(din[key], dict):
assert type(din[key]) in _LTYPES + [np.ndarray]
if hasattr(din[key], '__len__') and len(din[key]) == 1:
din[key] = din[key][0]
if type(din[key]) in _LTYPES:
din[key] = {
k0: np.full((nspect,), din[key])
for k0 in dinput[key]['keys']
}
elif din[key].shape == (nspect,):
din[key] = {k0: din[key] for k0 in dinput[key]['keys']}
else:
msg = (
"{}['{}'] not conform!".format(dd_name, key)
)
raise Exception(msg)
else:
for k0 in dinput[key]['keys']:
if din[key].get(k0) is None:
din[key][k0] = vref
elif type(din[key][k0]) in _LTYPES:
din[key][k0] = np.full((nspect,), din[key][k0])
elif len(din[key][k0]) == 1:
din[key][k0] = np.full((nspect,), din[key][k0][0])
elif din[key][k0].shape != (nspect,):
msg = (
"Non-conform value for "
+ "{}['{}']['{}']\n".format(din_name, key, k0)
+ "\t- expected: float or array (size {})\n".format(nspect)
+ "\t- provided: {}".format(din[key][k0])
)
raise Exception(msg)
return din
def _fit12d_filldef_dscalesx0_float(
din=None, din_name=None,
key=None, vref=None,
nspect=None,
):
if din.get(key) is None:
if type(vref) in _LTYPES:
din[key] = | np.full((nspect,), vref) | numpy.full |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 16:13:04 2021
@author: grego
"""
"""
Snakes and Ladders (1-Player) Markov Decision Processes (MDPs).
This implements the game given in http://ericbeaudry.uqam.ca/publications/ieee-cig-2010.pdf
Adapted from gridworld.py
The MDPs in this module are actually not complete MDPs, but rather the
sub-part of an MDP containing states, actions, and transitions (including
their probabilistic character). Reward-function and terminal-states are
supplied separately.
"""
import numpy as np
from itertools import product
import random
class SnakeLadderWorld:
"""
1-Player Snake and Ladder Game MDP.
Args:
size: Length of the board.
num_shortcuts: Number of snakes/ladders
seed: Seed used in random number generators of class
Attributes:
n_states: The number of states of this MDP.
n_actions: The number of actions of this MDP.
p_transition: The transition probabilities as table. The entry
`p_transition[from, to, a]` contains the probability of
transitioning from state `from` to state `to` via action `a`.
size: The width and height of the world.
actions: The actions of this world as paris, indicating the
direction in terms of coordinates.
"""
def __init__(self, size, shortcut_density):
### ADD NUMPY RANDOM SEED AT SOME POINT?
self.size = size
self.shortcut_density = shortcut_density
self.actions = [0, 1, 2]
# Need to decide whether to keep states with universally 0 probability
self.n_states = self.size
self.n_actions = len(self.actions)
self.game_board = self._generate_game()
self.p_transition = self._transition_prob_table()
def _generate_game(self):
"""
Builds a board of Snakes and Ladders with (self.size) squares and
int(self.size * self.shortcut_density) Snakes/Ladders
Returns
-------
game_board : np.array
When landing on entry [i] of the game_board A[i] gives the final
location of the player accounting for Snakes/Ladders.
"""
game_board = np.arange(self.size)
num_links = int(self.size * self.shortcut_density)
# Don't let the first/last space be a source/sink
paired_states = np.random.choice(np.arange(1, self.size - 1),
size=(num_links, 2), replace = False)
for source, sink in paired_states:
game_board[source] = sink
return game_board
def _transition_prob_table(self):
"""
Builds the internal probability transition table.
Returns:
The probability transition table of the form
[state_from, state_to, action]
containing all transition probabilities. The individual
transition probabilities are defined by `self._transition_prob'.
"""
table = np.zeros(shape=(self.n_states, self.n_states, self.n_actions))
s1, a = range(self.n_states), range(self.n_actions)
for s_from, a in product(s1, a):
table[s_from, :, a] = self._transition_prob(s_from, a)
return table
def _transition_prob(self, s_from, a):
"""
Compute the transition probability for a single transition.
Args:
s_from: The state in which the transition originates.
a: The action via which the target state should be reached.
Returns:
A vector containing the transition probability from `s_from`
to all states under action `a`.
"""
transition_probs = np.zeros(self.size)
if a == 0:
transition_probs[self._protected_move(s_from, 1)] += 1
if a == 1:
for dist in np.arange(1, 7):
transition_probs[self._protected_move(s_from, dist)] += 1/6
if a==2:
dice_combinations = [1,2,3,4,5,6,5,4,3,2,1]
for dist in np.arange(2, 13):
transition_probs[self._protected_move(s_from, dist)] \
+= dice_combinations[dist-2]/36
return transition_probs
def _protected_move(self, s_cur, offset):
"""
Parameters
----------
s_cur : TYPE
Current state.
offset : TYPE
Number of spaces to move.
Returns
-------
TYPE
Returns the end state of the move accounting for end of the board
and Snakes/Ladders.
"""
if s_cur + offset >= self.size-1:
return self.size - 1
return self.game_board[s_cur + offset]
def __repr__(self):
return "SnakeLadderWorld(size={})".format(self.size)
def state_features(self):
"""
Rows represent individual states, columns the feature entries.
Returns:
The coordinate-feature-matrix for the specified world.
"""
feature_vector_list = []
feature_vector_list.append(np.arange(0, self.size))
# Put feature functions in this list to include in the MaxEnt method
# Not including all features to see how it affects the model
feature_function_list = [self._next_snake, self._next_ladder, self._worst_outcome_one_dice,
self._worst_outcome_one_dice]
for func in feature_function_list:
func = | np.vectorize(func) | numpy.vectorize |
import unittest
from ABBA import ABBA
import numpy as np
import warnings
from util import dtw
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test_func(self, *args, **kwargs)
return do_test
class test_ABBA(unittest.TestCase):
#--------------------------------------------------------------------------#
# _check_parameters
#--------------------------------------------------------------------------#
def test_CheckParameters_TolFloat(self):
"""
tolerance should be float not integer
"""
self.assertRaises(ValueError, ABBA, tol=1)
def test_CheckParameters_TolList(self):
"""
tolerance should be list, maximum size 2
"""
self.assertRaises(ValueError, ABBA, tol=[1.0, 1.0, 1.0])
def test_CheckParameters_SclPositive(self):
"""
Scaling parameter should be >=0
"""
self.assertRaises(ValueError, ABBA, scl=-0.1)
def test_CheckParameters_KBounds(self):
"""
min_k and max_k bounds should be such that min_k < max_k
"""
self.assertRaises(ValueError, ABBA, min_k=6, max_k=3)
#--------------------------------------------------------------------------#
# transform
#--------------------------------------------------------------------------#
def test_transform_SimpleExample(self):
"""
Check transform function returns identical results as performing
compression followed by digitization.
"""
abba = ABBA(verbose=0, scl=1)
ts = np.random.rand(20).tolist()
string, centers = abba.transform(ts)
pieces = abba.compress(np.array(ts))
string2, centers2 = abba.digitize(pieces)
self.assertTrue(np.allclose(centers, centers2))
#--------------------------------------------------------------------------#
# inverse_transform
#--------------------------------------------------------------------------#
def test_InverseTransform_SimpleExample(self):
"""
Check inverse_transform function returns identical results as performing
inverse_digitization followed by quantization then inverse_compression.
"""
abba = ABBA(verbose=0, scl=1)
ts = np.random.rand(20)
pieces = abba.compress(np.array(ts))
string, centers = abba.digitize(pieces)
reconstructed_ts1 = abba.inverse_transform(string, centers, ts[0])
pieces1 = abba.inverse_digitize(string, centers)
pieces1 = abba.quantize(pieces1)
reconstructed_ts2 = abba.inverse_compress(ts[0], pieces1)
self.assertTrue(np.allclose(reconstructed_ts1, reconstructed_ts2))
#--------------------------------------------------------------------------#
# compress
#--------------------------------------------------------------------------#
@ignore_warnings
def test_Compress_tslength2(self):
"""
Test compression when time series given is of length 2
"""
ts = [1, 3]
abba = ABBA(verbose=0)
pieces = abba.compress(ts)
self.assertTrue(np.allclose(np.array([[1.0,2.0,0.0]]), pieces))
@ignore_warnings
def test_Compress_Flatline(self):
"""
Test compression on a flat time series
"""
ts = [1]*100
abba = ABBA(verbose=0, tol=[0.1])
pieces = abba.compress(ts)
self.assertTrue(np.allclose(np.array([[99,0.0,0.0]]), pieces))
@ignore_warnings
def test_Compress_NoCompression(self):
"""
Test compression on time series where tolerance so small that no compression
is achieved
"""
ts = [1, -1]*50
abba = ABBA(verbose=0)
pieces = abba.compress(ts)
correct_pieces = [[1, -2, 0], [1, 2, 0]]*49
correct_pieces += [[1, -2, 0]]
correct_pieces = np.array(correct_pieces)
self.assertTrue(np.allclose(correct_pieces, pieces))
@ignore_warnings
def test_Compress_Norm2(self):
"""
Test compression with norm = 2
"""
ts = [0, 2, 3, 2, 4, -1, 0, -1, 1, 0, -4, 0]
abba = ABBA(tol=2.0, verbose=0)
pieces = abba.compress(ts)
correct_pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 38/16],
[1, -4, 0],
[1, 4, 0]]
correct_pieces = np.array(correct_pieces)
self.assertTrue(np.allclose(correct_pieces, pieces))
@ignore_warnings
def test_Compress_Norm1(self):
"""
Test compression with norm = 1
"""
ts = [0, 2, 3, 2, 4, -1, 0, -1, 1, 0, -4, 0]
abba = ABBA(tol=2.0, verbose=0, norm=1)
pieces = abba.compress(ts)
correct_pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
correct_pieces = np.array(correct_pieces)
self.assertTrue(np.allclose(correct_pieces, pieces))
#--------------------------------------------------------------------------#
# inverse_compress
#--------------------------------------------------------------------------#
@ignore_warnings
def test_InverseCompress_OnePiece(self):
"""
Test inverse_compress with only one piece
"""
abba = ABBA(verbose=0)
pieces = np.array([[1,4.0,0]])
ts = abba.inverse_compress(0, pieces)
correct_ts = np.array([0, 4])
self.assertTrue(np.allclose(ts, correct_ts))
@ignore_warnings
def test_InverseCompress_Example(self):
"""
Test inverse_compress on generic example
"""
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
abba = ABBA(verbose=0)
ts = abba.inverse_compress(0, pieces)
correct_ts = np.array([0, 1, 2, 3, 4, -1, -3/4, -2/4, -1/4, 0, -4, 0])
self.assertTrue(np.allclose(ts, correct_ts))
#--------------------------------------------------------------------------#
# digitize
#--------------------------------------------------------------------------#
@ignore_warnings
def test_Digitize_ExampleScl0(self):
"""
Test digitize function on same generic example with scl = 0
"""
abba = ABBA(scl=0, verbose=0, seed=True)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[3, 3], [1, -9/2]])
self.assertTrue(all([string=='ababa', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_ExampleScl1(self):
"""
Test digitize function on same generic example with scl = 1
"""
abba = ABBA(scl=1, verbose=0, seed=True)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[4, 5/2], [1, -9/2], [1, 4]])
self.assertTrue(all([string=='ababc', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_ExampleSclInf(self):
"""
Test digitize function on same generic example with scl = inf
"""
abba = ABBA(scl=np.inf, verbose=0, seed=True)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[1, -5/3], [4, 5/2]])
self.assertTrue(all([string=='babaa', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_SymbolOrdering(self):
"""
Test digitize function orders letters by most occuring symbol.
"""
abba = ABBA(verbose=0)
pieces = [[1,1,0],
[50,50,0],
[100,100,0],
[2,2,0],
[51,51,0],
[3,3,0]]
pieces = np.array(pieces).astype(float)
string, centers = abba.digitize(pieces)
self.assertTrue('abcaba'==string)
@ignore_warnings
def test_Digitize_OneCluster(self):
"""
Test digitize function to make one large cluster
"""
inc = np.random.randn(100,1)
abba = ABBA(verbose=0, min_k=1, tol=10.0)
pieces = np.hstack([np.ones((100,1)), inc, np.zeros((100,1))])
string, centers = abba.digitize(pieces)
self.assertTrue('a'*100 == string)
@ignore_warnings
def test_Digitize_NotEnoughPieces(self):
"""
Test digitize function where min_k is greater than the number of pieces
"""
abba = ABBA(verbose=0, min_k=10)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
self.assertRaises(ValueError, abba.digitize, pieces)
@ignore_warnings
def test_Digitize_TooManyK(self):
"""
Test digitize function where less than min_k are required for perfect
clustering.
"""
abba = ABBA(verbose=0, min_k=3, seed=True)
pieces = [[1, 1, 0],
[1, 1, 0],
[1, 1, 0],
[1, 1, 0],
[1, 1, 0]]
pieces = np.array(pieces).astype(float)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[1, 1], [1, 1], [1, 1]])
self.assertTrue(all([string=='aaaaa', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_zeroerror(self):
"""
Test digitize function when zero error, i.e. use max amount of clusters.
"""
abba = ABBA(verbose=0, max_k=5, tol=[0.01, 0])
pieces = [[1, 1, 0],
[1, 2, 0],
[1, 3, 0],
[1, 4, 0],
[1, 5, 0]]
pieces = np.array(pieces).astype(float)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[1, 1], [1, 2], [1, 3], [1, 4], [1, 5]])
self.assertTrue(all([string=='abcde', np.allclose(centers, correct_centers)]))
#--------------------------------------------------------------------------#
# inverse_digitize
#--------------------------------------------------------------------------#
@ignore_warnings
def test_InverseDigitize_example(self):
"""
Test inverse digitize on a generic example
"""
abba = ABBA(verbose=0)
centers = np.array([[3, 3], [1, -9/2]]).astype(float)
string = 'ababa'
pieces = abba.inverse_digitize(string, centers)
correct_pieces = [[3, 3],
[1, -9/2],
[3, 3],
[1, -9/2],
[3, 3]]
correct_pieces = np.array(correct_pieces).astype(float)
self.assertTrue(np.allclose(pieces, correct_pieces))
#--------------------------------------------------------------------------#
# quantize
#--------------------------------------------------------------------------#
@ignore_warnings
def test_Quantize_NoRoundingNeeded(self):
"""
Test quantize function on an array where no rounding is needed
"""
pieces = [[2, 1],
[3, 1],
[4, 2],
[1, 2],
[1, -5],
[2, -1]]
pieces = np.array(pieces)
abba = ABBA(verbose=0)
self.assertTrue(np.allclose(pieces, abba.quantize(pieces)))
@ignore_warnings
def test_Quantize_AccumulateError(self):
"""
Test quantize function with distributed rounding
"""
pieces = [[7/4, 1],
[7/4, 1],
[7/4, 1],
[7/4, 1],
[5/4, 1],
[5/4, 1],
[5/4, 1],
[5/4, 1]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0)
pieces = abba.quantize(pieces)
correct_pieces = [[2, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1],
[1, 1]]
self.assertTrue(np.allclose(correct_pieces, abba.quantize(pieces)))
@ignore_warnings
def test_Quantise_Half(self):
"""
Test quantize function where all values are 1.5
"""
pieces = [[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0)
pieces = abba.quantize(pieces)
correct_pieces = [[2, 1],
[1, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1]]
self.assertTrue(np.allclose(correct_pieces, abba.quantize(pieces)))
#--------------------------------------------------------------------------#
# _build_centers
#--------------------------------------------------------------------------#
@ignore_warnings
def test_BuildCenters_c1(self):
"""
Test utility function _build_centers on column 2
"""
pieces = [[4, 4],
[1, -5],
[4, 1],
[1, -4],
[1, 4]]
pieces = np.array(pieces).astype(float)
labels = np.array([0, 1, 1, 1, 0])
k = 2
c1 = [4,-4]
col = 0
abba = ABBA(verbose=0)
c = abba._build_centers(pieces, labels, c1, k, col)
correct_c = np.array([[5/2, 4], [2, -4]])
self.assertTrue(np.allclose(correct_c, c))
@ignore_warnings
def test_BuildCenters_c2(self):
"""
Test utility function _build_centers on column 1
"""
pieces = [[4, 4],
[1, -5],
[4, 1],
[1, -4],
[1, 4]]
pieces = np.array(pieces).astype(float)
labels = np.array([0, 1, 0, 1, 1])
k = 2
c1 = [4,1]
col = 1
abba = ABBA(verbose=0)
c = abba._build_centers(pieces, labels, c1, k, col)
correct_c = np.array([[4, 5/2], [1, -5/3]])
self.assertTrue(np.allclose(correct_c, c))
#--------------------------------------------------------------------------#
# _max_cluster_var
#--------------------------------------------------------------------------#
@ignore_warnings
def test_MaxClusterVar_example(self):
"""
Test utility function _max_cluster_var
"""
pieces = [[4, 4],
[1, -5],
[4, 1],
[1, -4],
[1, 4]]
pieces = np.array(pieces).astype(float)
labels = np.array([0, 0, 0, 1, 1])
centers = np.array([[3, 0], [1, 0]]).astype(float)
k = 2
abba = ABBA()
(e1, e2) = abba._max_cluster_var(pieces, labels, centers, k)
ee1 = max([np.var([1,-2,1]), | np.var([0,0]) | numpy.var |
#!/usr/bin/env python
# coding: utf-8
"""
index_calc.py: This python module contains the CaIIH, NaI, and Hα (CaI within it) activity index calculation functions.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__date__ = "10-03-2022"
__version__ = "1.8.1"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
import warnings
from tqdm.notebook import tqdm as log_progress
import astropy.units as u
import astropy as ap
from specutils import Spectrum1D, SpectralRegion
from specutils.fitting import fit_generic_continuum
from specutils.manipulation import extract_region
from astropy.modeling.polynomial import Chebyshev1D
from astropy.nddata import StdDevUncertainty
from astropy.io import fits
from krome.spec_analysis import find_nearest, read_data, calc_ind
## Defining a function for calculating the H alpha index following Boisse et al. 2009 (2009A&A...495..959B)
def H_alpha_index(file_path,
radial_velocity,
degree=4,
H_alpha_line=656.2808,
H_alpha_band=0.16,
CaI_line=657.2795,
CaI_band=0.08,
F1_line=655.087,
F1_band=1.075,
F2_line=658.031,
F2_band=0.875,
Instrument='NARVAL',
norm_spec=False,
plot_fit=False,
plot_spec=True,
print_stat=True,
save_results=False,
results_file_name=None,
save_figs=False,
save_figs_name=None,
out_file_path=None,
ccf_file_path=None,
CaI_index=True):
"""
Calculates the H alpha index following <NAME>., et al., 2009, A&A, 495, 959. In addition, it also
calculates the CaI index following <NAME>., <NAME>., <NAME>., <NAME>., 2013, ApJ, 764, 3.
This index uses the exact same reference continuums, F1 and F2, used for the H alpha index to serve as a
control against the significance of H alpha index variations!
Parameters:
-----------
file_path: list, .s format (NARVAL), ADP..._.fits format (HARPS) or s1d_A.fits format (HARPS-N)
List containng the paths of the spectrum files
radial_velocity: int
Stellar radial velocity along the line-of-sight. This value is used for doppler shifting the spectra to its rest frame.
degree: int, default: 4
The degree of the Chebyshev1D polynomial to fit to the continuum for normalisation.
Normalisation done using Specutils.
For more info,
see https://specutils.readthedocs.io/en/stable/api/specutils.fitting.fit_generic_continuum.html#specutils.fitting.fit_generic_continuum
H_alpha_line: int, default: 656.2808 nm
H alpha line centre in nm.
H_alpha_band: int, default: 0.16 nm
Band width (nm) in which to calculate the mean flux.
CaI_line: int, default: 657.2795 nm
CaI line centre in nm.
CaI_band: int, default: 0.08 nm
Band width (nm) in which to calculate the mean flux.
F1_line: int, default: 655.087 nm
Line centre of the blue reference continuum.
F1_band: int, default: 1.075 nm
Band width (nm) in which to calculate the mean continuum flux.
F2_line: int, default: 658.031 nm
Line centre of the red reference continuum.
F2_band: int, default: 0.875 nm
Band width (nm) in which to calculate the mean continuum flux.
Instrument: str, default: 'NARVAL'
The instrument from which the data has been collected. Available options are 'NARVAL', 'HARPS' or 'HARPS-N'.
norm_spec: bool, default: False
Normalizes the spectrum.
plot_fit: bool, default: False
Plots the continuum fitting normalization processes.
plot_spec: bool, default: True
Plots the final reduced spectrum.
print_stat: bool, default: True
Prints the status of each process within the function.
save_results: bool, default: False
Saves the run results in a .csv format in the working directory
results_file_name: str, default: None
Name of the file with which to save the results file
save_figs: bool, default: False
Save the plots in a pdf format in the working directory
save_figs_name: str, default=None
Name with which to save the figures. NOTE: This should ideally be the observation date of the given spectrum.
out_file_path: list, .out format (NARVAL), default: None
List containing the paths of the .out files to extract the OBS_HJD. If None, HJD is returned as NaN. Used only when Instrument type is 'NARVAL'
ccf_file_path: list, .fits format (HARPS/HARPS-N), default: None
List containig the paths of the CCF FITS files to extract the radial velocity. If None, the given radial velocity argument is used for all files for doppler shift corrections
CaI_index: bool, default=True
Calculates the activity insensitive CaI index as well. If False, NaN values are returned instead.
Returns:
-----------
NARVAL: HJD, RA, DEC, AIRMASS, Exposure time[s], No. of exposures, GAIN [e-/ADU], ReadOut Noise [e-], V_mag, T_eff[K], RV[m/s], H alpha index, error on H alpha index, CaI index and error on CaI index.
HARPS: BJD, RA, DEC, AIRMASS, Exposure time[s], Barycentric RV[km/s], OBS_DATE, Program ID, SNR, CCD Readout Noise[e-], CCD conv factor[e-/ADU], ReadOut Noise[ADU], RV[m/s], H alpha index, error on H alpha index, CaI index, error on CaI index
HARPS-N: BJD, RA, DEC, AIRMASS, Exposure time[s], OBS_DATE, Program ID', RV[m/s], H alpha index, error on H alpha index, CaI index and error on CaI index
All values are type float() given inside a list.
"""
results = [] # Empty list to which the run results will be appended
# Creating a loop to go through each given file_path in the list of file paths
# Using the tqdm function 'log_progress' to provide a neat progress bar in Jupyter Notebook which shows the total number of
# runs, the run time per iteration and the total run time for all files!
for i in log_progress(range(len(file_path)), desc='Calculating Hα Index'):
# Creating a loop for data from each Instrument;
# NARVAL
if Instrument == 'NARVAL':
if out_file_path != None:
# Using read_data from krome.spec_analysis to extract useful object parameters and all individual spectral orders
obj_params, orders = read_data(file_path=file_path[i],
out_file_path=out_file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
obj_params['RV'] = radial_velocity # setting radial_velocity as part of the obj_params dictionary for continuity
else:
orders = read_data(file_path=file_path[i],
Instrument=Instrument,
print_stat=print_stat,
out_file_path=None,
show_plots=False)
if print_stat:
print('"out_file_path" not given as an argument. Run will only return the indices and their errros instead.')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
if print_stat:
print('Total {} spectral orders extracted'.format(len(orders)))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
order_34 = orders[61-34] # The orders begin from # 61 so to get # 34, we index as 61-34.
if print_stat:
print('The #34 order wavelength read from .s file using pandas is: {}'.format(order_34[0].values))
print('The #34 order intensity read from .s file using pandas is: {}'.format(order_34[1].values))
print('The #34 order intensity error read from .s file using pandas is: {}'.format(order_34[2].values))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# The spectra is now doppler shift corrected in the wavelength axis using the stellar radial velocity and the rest wavelength of H alpha line; delta_lambda = (v/c)*lambda
shift = ((radial_velocity/ap.constants.c.value)*H_alpha_line)
shift = (round(shift, 4)) # Using only 4 decimal places for the shift value since that's the precision of the wavelength in the .s files!
wvl = np.round((order_34[0].values - shift), 4) # Subtracting the calculated doppler shift value from the wavelength axis since the stellar radial velocity is positive. If the stellar RV is negative, the shift value will be added instead.
flx = order_34[1].values # Indexing flux array from order_34
flx_err = order_34[2].values # Indexing flux_err array from order_34
# Creating a spectrum object called 'spec1d' using 'Spectrum1D' from 'specutils'
# Docs for 'specutils' are here; https://specutils.readthedocs.io/en/stable/
# The spectral and flux axes are given units nm and Jy respectively using 'astropy.units'.
# The uncertainty has units Jy as well!
spec1d = Spectrum1D(spectral_axis=wvl*u.nm,
flux=flx*u.Jy,
uncertainty=StdDevUncertainty(flx_err, unit=u.Jy))
# Printing info
if print_stat:
print('The doppler shift size using RV {} m/s and the H alpha line of 656.2808nm is: {}nm'.format(radial_velocity, shift))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('The spectral order used ranges from {}nm to {}nm. These values are doppler shift corrected and rounded off to 4 decimal places'.format(spec1d.spectral_axis[0].value, spec1d.spectral_axis[-1].value))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# Fitting an nth order polynomial to the continuum for normalisation using specutils
if norm_spec:
if print_stat:
print('Normalising the spectra by fitting a {}th order polynomial to the enitre spectral order'.format(degree))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# 'fit_generic_continuum' is a function imported from 'specutils' which fits a given polynomial model to the given spectrum.
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
g_fit = fit_generic_continuum(spec1d, model=Chebyshev1D(degree)) # Using 'Chebyshev1D' to define an nth order polynomial model
if print_stat:
print('Polynomial fit coefficients:')
print(g_fit)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
y_cont_fitted = g_fit(spec1d.spectral_axis) # Continuum fit y values are calculated by inputting the spectral axis x values into the polynomial fit equation
spec_normalized = spec1d / y_cont_fitted # Spectrum is normalised by diving it with the polynomial fit
# Plots the polynomial fits
if plot_fit:
f, ax1 = plt.subplots(figsize=(10,4))
ax1.plot(spec1d.spectral_axis, spec1d.flux)
ax1.plot(spec1d.spectral_axis, y_cont_fitted)
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel('Normalized Flux')
ax1.set_title("Continuum Fitting")
plt.tight_layout()
# Saves the plot in a pdf format in the working directory
if save_figs:
if print_stat:
print('Saving plots as PDFs in the working directory')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
plt.savefig('{}_cont_fit_plot.pdf'.format(save_figs_name), format='pdf')
f, ax2 = plt.subplots(figsize=(10,4))
ax2.plot(spec_normalized.spectral_axis, spec_normalized.flux, color='blue', label='Re-Normalized', alpha=0.6)
ax2.plot(spec1d.spectral_axis, spec1d.flux, color='red', label='Pipeline Normalized', alpha=0.6)
plt.axhline(1.0, ls='--', c='gray')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec1d.flux.value), linestyles='--', colors='black', label='Region used for index calc.')
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec1d.flux.value), linestyles='--', colors='black')
ax2.set_xlabel('$\lambda (nm)$')
ax2.set_ylabel('Normalized Flux')
ax2.set_title("Continuum Normalized ")
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_cont_norm_plot.pdf'.format(save_figs_name), format='pdf')
spec = spec_normalized # Note the continuum normalized spectrum also has new uncertainty values!
else:
spec = spec1d
# Plots the final reduced spectra along with the relevant bandwidths and line/continuum positions
if plot_spec:
f, ax = plt.subplots(figsize=(10,4))
ax.plot(spec.spectral_axis, spec.flux, '-k')
ax.set_xlabel('$\lambda (nm)$')
ax.set_ylabel("Normalized Flux")
plt.vlines(H_alpha_line-(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Hα {}±{}nm'.format(H_alpha_line, H_alpha_band/2))
plt.vlines(H_alpha_line+(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='blue', label='Blue cont. {}±{}nm'.format(F1_line, F1_band/2))
plt.vlines(F1_line+(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='blue')
plt.vlines(F2_line-(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='red', label='Red cont. {}±{}nm'.format(F2_line, F2_band/2))
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='red')
if CaI_index:
plt.vlines(CaI_line-(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='black', label='CaI {}±{}nm'.format(CaI_line, CaI_band/2))
plt.vlines(CaI_line+(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='black')
ax.set_xlim(F1_line-1.1, F2_line+1.1)
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
plt.minorticks_on()
ax.tick_params(direction='in', which='both')
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_reduced_spec_plot.pdf'.format(save_figs_name), format='pdf')
# Plots the zoomed in regions around the H alpha line.
f, ax1 = plt.subplots(figsize=(10,4))
ax1.plot(spec.spectral_axis, spec.flux)
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel("Normalized Flux")
plt.vlines(H_alpha_line, ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='green')
plt.vlines(H_alpha_line-(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Hα band width = {}nm'.format(H_alpha_band))
plt.vlines(H_alpha_line+(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax1.set_xlim(H_alpha_line-(H_alpha_band/2)-0.1, H_alpha_line+(H_alpha_band/2)+0.1)
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_H_alpha_line_plot.pdf'.format(save_figs_name), format='pdf')
if CaI_index:
# Plots the zoomed in regions around the CaI line.
f, ax2 = plt.subplots(figsize=(10,4))
ax2.plot(spec.spectral_axis, spec.flux)
ax2.set_xlabel('$\lambda (nm)$')
ax2.set_ylabel("Normalized Flux")
plt.vlines(CaI_line, ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='green')
plt.vlines(CaI_line-(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='CaI band width = {}nm'.format(CaI_band))
plt.vlines(CaI_line+(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax2.set_xlim(CaI_line-(CaI_band/2)-0.1, CaI_line+(CaI_band/2)+0.1)
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_CaI_line_plot.pdf'.format(save_figs_name), format='pdf')
# HARPS
elif Instrument == 'HARPS':
# Opening the FITS file using 'astropy.io.fits' and extracting useful object parameters and spectrum using read_data from krome.spec_analysis
# NOTE: The format of this FITS file must be ADP which contains the reduced spectrum with the wav, flux and flux_err in three columns
if ccf_file_path != None:
obj_params, spec = read_data(file_path=file_path[i],
ccf_file_path=ccf_file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
else:
obj_params, spec = read_data(file_path=file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
obj_params['RV'] = radial_velocity # setting obj_params['RV'] to the given radial_velocity argument!
# Assigning appropriate variables from spec individually!
wvl = spec[0] # nm
flx = spec[1] # ADU
flx_err = spec[2]
# Calculating doppler shift size using delta_lambda/lambda = v/c and the RV from the CCF FITS file
shift = ((obj_params['RV']/ap.constants.c.value)*H_alpha_line)
shift = (round(shift, 3)) # Using only 3 decimal places for the shift value since that's the precision of the wavelength in the .fits files!
# Since the HARPS spectra have their individual spectral orders stitched together, we do not have to extract them separately as done for NARVAL. Thus for HARPS, the required region is extracted by slicing the spectrum with the index corresponding to the left and right continuum obtained using the 'find_nearest' function.
left_idx = find_nearest(wvl, F1_line-2) # ± 2nm extra included for both!
right_idx = find_nearest(wvl, F2_line+2)
# If condition for when certain files have NaN as the flux errors; probably for all since the ESO Phase 3 data currently does not provide the flux errors
flx_err_nan = np.isnan(np.sum(flx_err)) # NOTE: This returns true if there is one NaN or all are NaN!
if flx_err_nan:
if print_stat:
print('File contains NaN in flux errors array. Calculating flux error using CCD readout noise: {}'.format(np.round(obj_params['RON'], 4)))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# Flux error calculated as photon noise plus CCD readout noise
# NOTE: The error calculation depends on a lot of other CCD parameters such as the pixel binning in each CCD
# array and so on. But for photometric limited measurements, this noise is generally insignificant.
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
flx_err_ron = [np.sqrt(flux + np.square(obj_params['RON'])) for flux in flx]
if np.isnan(np.sum(flx_err_ron)):
if print_stat:
print('The calculated flux error array contains a few NaN values due to negative flux encountered in the square root.')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# Slicing the data to contain only the region required for the index calculation as explained above and
# creating a spectrum class for it.
spec1d = Spectrum1D(spectral_axis=(wvl[left_idx:right_idx] - shift)*u.nm,
flux=flx[left_idx:right_idx]*u.Jy,
uncertainty=StdDevUncertainty(flx_err_ron[left_idx:right_idx], unit=u.Jy))
else:
spec1d = Spectrum1D(spectral_axis=(wvl[left_idx:right_idx] - shift)*u.nm,
flux=flx[left_idx:right_idx]*u.Jy,
uncertainty=StdDevUncertainty(flx_err[left_idx:right_idx], unit=u.Jy))
if print_stat:
print('The doppler shift size using RV {} m/s and the H alpha line of 656.2808nm is: {}nm'.format(obj_params['RV'], shift))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('The spectral region used ranges from {}nm to {}nm. These values are doppler shift corrected and rounded off to 3 decimal places'.format(spec1d.spectral_axis[0].value, spec1d.spectral_axis[-1].value))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
if norm_spec:
if print_stat:
print('Normalising the spectra by fitting a {}th order polynomial to the enitre spectral order'.format(degree))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# 'fit_generic_continuum' is a function imported from 'specutils' which fits a given polynomial model to the given spectrum.
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
g_fit = fit_generic_continuum(spec1d, model=Chebyshev1D(degree)) # Using 'Chebyshev1D' to define an nth order polynomial model
if print_stat:
print('Polynomial fit coefficients:')
print(g_fit)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
y_cont_fitted = g_fit(spec1d.spectral_axis) # Continuum fit y values are calculated by inputting the spectral axis x values into the polynomial fit equation
spec_normalized = spec1d / y_cont_fitted
spec = spec_normalized # Note the continuum normalized spectrum also has new uncertainty values which are simply the errors divided by this polynomial fit.
# Plots the polynomial fits
if plot_fit:
f, ax1 = plt.subplots(figsize=(10,4))
ax1.plot(spec1d.spectral_axis, spec1d.flux)
ax1.plot(spec1d.spectral_axis, y_cont_fitted)
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel('Flux (adu)')
ax1.set_title("Continuum Fitting")
plt.tight_layout()
# Saves the plot in a pdf format in the working directory
if save_figs:
if print_stat:
print('Saving plots as PDFs in the working directory')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
plt.savefig('{}_cont_fit_plot.pdf'.format(save_figs_name), format='pdf')
f, ax2 = plt.subplots(figsize=(10,4))
ax2.plot(spec_normalized.spectral_axis, spec_normalized.flux, label='Re-Normalized')
plt.axhline(1.0, ls='--', c='gray')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Region used for index calc.')
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax2.set_xlabel('$\lambda (nm)$')
ax2.set_ylabel('Normalized Flux')
ax2.set_title("Continuum Normalized ")
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_cont_norm_plot.pdf'.format(save_figs_name), format='pdf')
else:
spec = spec1d
# Plots the final reduced spectra along with the relevant bandwidths and line/continuum positions
if plot_spec:
f, ax = plt.subplots(figsize=(10,4))
ax.plot(spec.spectral_axis, spec.flux, '-k')
ax.set_xlabel('$\lambda (nm)$')
if norm_spec:
ax.set_ylabel("Normalized Flux")
else:
ax.set_ylabel("Flux (adu)")
plt.vlines(H_alpha_line-(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Hα {}±{}nm'.format(H_alpha_line, H_alpha_band/2))
plt.vlines(H_alpha_line+(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='blue', label='Blue cont. {}±{}nm'.format(F1_line, F1_band/2))
plt.vlines(F1_line+(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='blue')
plt.vlines(F2_line-(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='red', label='Red cont. {}±{}nm'.format(F2_line, F2_band/2))
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='red')
if CaI_index:
plt.vlines(CaI_line-(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='black', label='CaI {}±{}nm'.format(CaI_line, CaI_band/2))
plt.vlines(CaI_line+(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='black')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
plt.minorticks_on()
ax.tick_params(direction='in', which='both')
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_reduced_spec_plot.pdf'.format(save_figs_name), format='pdf')
f, ax1 = plt.subplots(figsize=(10,4))
ax1.plot(spec.spectral_axis, spec.flux)
ax1.set_xlabel('$\lambda (nm)$')
if norm_spec:
ax1.set_ylabel("Normalized Flux")
else:
ax1.set_ylabel("Flux (adu)")
plt.vlines(H_alpha_line, ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='green')
plt.vlines(H_alpha_line-(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Hα band width = {}nm'.format(H_alpha_band))
plt.vlines(H_alpha_line+(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax1.set_xlim(H_alpha_line-(H_alpha_band/2)-0.1, H_alpha_line+(H_alpha_band/2)+0.1)
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_H_alpha_line_plot.pdf'.format(save_figs_name), format='pdf')
if CaI_index:
# Plots the zoomed in regions around the CaI line.
f, ax2 = plt.subplots(figsize=(10,4))
ax2.plot(spec.spectral_axis, spec.flux)
ax2.set_xlabel('$\lambda (nm)$')
if norm_spec:
ax2.set_ylabel("Normalized Flux")
else:
ax2.set_ylabel("Flux (adu)")
plt.vlines(CaI_line, ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='green')
plt.vlines(CaI_line-(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='CaI band width = {}nm'.format(CaI_band))
plt.vlines(CaI_line+(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax2.set_xlim(CaI_line-(CaI_band/2)-0.1, CaI_line+(CaI_band/2)+0.1)
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_CaI_line_plot.pdf'.format(save_figs_name), format='pdf')
elif Instrument=='HARPS-N':
# Opening the FITS file using 'astropy.io.fits' and extracting useful object parameters and spectrum using read_data from krome.spec_analysis
# NOTE: The format of this FITS file must be s1d which only contains flux array.
# The wavelength array is constructed using the starting point (CRVAL1), length of spectral axis (NAXIS1)
# and wavelength step (CDELT1) from the FITS file header.
if ccf_file_path != None:
obj_params, spec = read_data(file_path=file_path[i],
ccf_file_path=ccf_file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
else:
obj_params, spec = read_data(file_path=file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
obj_params['RV'] = radial_velocity # setting obj_params['RV'] to the given radial_velocity argument!
# Assigning appropriate variables from spec individually!
wvl = spec[0] # nm
flx = spec[1] # ADU
# Calculating doppler shift size using delta_lambda/lambda = v/c and the RV from the CCF FITS file
shift = ((obj_params['RV']/ap.constants.c.value)*H_alpha_line)
shift = (round(shift, 3))
# Same as the HARPS spectra, the HARPS-N spectra have their individual spectral orders stitched together and
# we do not have to extract them separately as done for NARVAL. Thus, the required region is extracted by slicing
# the spectrum with the index corresponding to the left and right continuum obtained using the
# 'find_nearest' function.
left_idx = find_nearest(wvl, F1_line-2) # ± 2nm extra included for both!
right_idx = find_nearest(wvl, F2_line+2)
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
flx_err = [np.sqrt(flux) for flux in flx] # Using only photon noise as flx_err approx since no RON info available!
# Slicing the data to contain only the region required for the index calculation as explained above and creating
# a spectrum class for it
spec1d = Spectrum1D(spectral_axis=(wvl[left_idx:right_idx] - shift)*u.nm,
flux=flx[left_idx:right_idx]*u.Jy,
uncertainty=StdDevUncertainty(flx_err[left_idx:right_idx], unit=u.Jy))
if print_stat:
print('The doppler shift size using RV {} m/s and the H alpha line of 656.2808nm is: {}nm'.format(obj_params['RV'], shift))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('The spectral region used ranges from {}nm to {}nm. These values are doppler shift corrected and rounded off to 3 decimal places'.format(spec1d.spectral_axis[0].value, spec1d.spectral_axis[-1].value))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
if norm_spec:
if print_stat:
print('Normalising the spectra by fitting a {}th order polynomial to the enitre spectral order'.format(degree))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# 'fit_generic_continuum' is a function imported from 'specutils' which fits a given polynomial model to the given spectrum.
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
g_fit = fit_generic_continuum(spec1d, model=Chebyshev1D(degree)) # Using 'Chebyshev1D' to define an nth order polynomial model
if print_stat:
print('Polynomial fit coefficients:')
print(g_fit)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
y_cont_fitted = g_fit(spec1d.spectral_axis)
spec_normalized = spec1d / y_cont_fitted
# Plots the polynomial fits
if plot_fit:
f, ax1 = plt.subplots(figsize=(10,4))
ax1.plot(spec1d.spectral_axis, spec1d.flux)
ax1.plot(spec1d.spectral_axis, y_cont_fitted)
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel('Flux (adu)')
ax1.set_title("Continuum Fitting")
plt.tight_layout()
# Saves the plot in a pdf format in the working directory
if save_figs:
if print_stat:
print('Saving plots as PDFs in the working directory')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
plt.savefig('{}_cont_fit_plot.pdf'.format(save_figs_name), format='pdf')
f, ax2 = plt.subplots(figsize=(10,4))
ax2.plot(spec_normalized.spectral_axis, spec_normalized.flux, color='blue', label='Re-Normalized', alpha=0.6)
# ax2.plot(spec1d.spectral_axis, spec1d.flux, color='red', label='Pipeline Normalized', alpha=0.6)
plt.axhline(1.0, ls='--', c='gray')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Region used for index calc.')
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax2.set_xlabel('$\lambda (nm)$')
ax2.set_ylabel('Normalized Flux')
ax2.set_title("Continuum Normalized ")
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_cont_norm_plot.pdf'.format(save_figs_name), format='pdf')
spec = spec_normalized # Note the continuum normalized spectrum also has new uncertainty values!
else:
spec = spec1d
# Plots the final reduced spectra along with the relevant bandwidths and line/continuum positions
if plot_spec:
f, ax = plt.subplots(figsize=(10,4))
ax.plot(spec.spectral_axis, spec.flux, '-k')
ax.set_xlabel('$\lambda (nm)$')
if norm_spec:
ax.set_ylabel("Normalized Flux")
else:
ax.set_ylabel("Flux (adu)")
plt.vlines(H_alpha_line-(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Hα {}±{}nm'.format(H_alpha_line, H_alpha_band/2))
plt.vlines(H_alpha_line+(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='blue', label='Blue cont. {}±{}nm'.format(F1_line, F1_band/2))
plt.vlines(F1_line+(F1_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='blue')
plt.vlines(F2_line-(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='red', label='Red cont. {}±{}nm'.format(F2_line, F2_band/2))
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='red')
if CaI_index:
plt.vlines(CaI_line-(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='black', label='CaI {}±{}nm'.format(CaI_line, CaI_band/2))
plt.vlines(CaI_line+(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='dashdot', colors='black')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
plt.minorticks_on()
ax.tick_params(direction='in', which='both')
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_reduced_spec_plot.pdf'.format(save_figs_name), format='pdf')
f, ax1 = plt.subplots(figsize=(10,4))
ax1.plot(spec.spectral_axis, spec.flux)
ax1.set_xlabel('$\lambda (nm)$')
if norm_spec:
ax1.set_ylabel("Normalized Flux")
else:
ax1.set_ylabel("Flux (adu)")
plt.vlines(H_alpha_line, ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='green')
plt.vlines(H_alpha_line-(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='Hα band width = {}nm'.format(H_alpha_band))
plt.vlines(H_alpha_line+(H_alpha_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax1.set_xlim(H_alpha_line-(H_alpha_band/2)-0.1, H_alpha_line+(H_alpha_band/2)+0.1)
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_H_alpha_line_plot.pdf'.format(save_figs_name), format='pdf')
if CaI_index:
# Plots the zoomed in regions around the CaI line.
f, ax2 = plt.subplots()
ax2.plot(spec.spectral_axis, spec.flux)
ax2.set_xlabel('$\lambda (nm)$')
if norm_spec:
ax2.set_ylabel("Normalized Flux")
else:
ax2.set_ylabel("Flux (adu)")
plt.vlines(CaI_line, ymin=0, ymax=max(spec.flux.value), linestyles='dotted', colors='green')
plt.vlines(CaI_line-(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black', label='CaI band width = {}nm'.format(CaI_band))
plt.vlines(CaI_line+(CaI_band/2), ymin=0, ymax=max(spec.flux.value), linestyles='--', colors='black')
ax2.set_xlim(CaI_line-(CaI_band/2)-0.1, CaI_line+(CaI_band/2)+0.1)
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_CaI_line_plot.pdf'.format(save_figs_name), format='pdf')
else:
raise ValueError('Instrument type not recognised. Available options are "NARVAL", "HARPS" and "HARPS-N"')
# Now we have the spectrum to work with as a variable, 'spec'!
# The three regions required for H alpha index calculation are extracted from 'spec' using the 'extract region' function from 'specutils'.
# The function uses another function called 'SpectralRegion' as one of its arguments which defines the region to be extracted done so using the line and line bandwidth values; i.e. left end of region would be 'line - bandwidth/2' and right end would be 'line + bandwidth/2'.
# Note: These values must have the same units as the spec wavelength axis.
F_H_alpha_region = extract_region(spec, region=SpectralRegion((H_alpha_line-(H_alpha_band/2))*u.nm, (H_alpha_line+(H_alpha_band/2))*u.nm))
F1_region = extract_region(spec, region=SpectralRegion((F1_line-(F1_band/2))*u.nm, (F1_line+(F1_band/2))*u.nm))
F2_region = extract_region(spec, region=SpectralRegion((F2_line-(F2_band/2))*u.nm, (F2_line+(F2_band/2))*u.nm))
if CaI_index:
F_CaI_region = extract_region(spec, region=SpectralRegion((CaI_line-(CaI_band/2))*u.nm, (CaI_line+(CaI_band/2))*u.nm))
regions = [F_H_alpha_region, F1_region, F2_region, F_CaI_region]
else:
regions = [F_H_alpha_region, F1_region, F2_region]
# The indices are calculated using the 'calc_ind' function from krome.spec_analysis by inputting the extracted regions as shown
I_Ha, I_Ha_err, I_CaI, I_CaI_err = calc_ind(regions=regions,
index_name='HaI',
print_stat=print_stat,
CaI_index=CaI_index)
if Instrument=='NARVAL':
if out_file_path != None:
header = ['HJD', 'RA', 'DEC', 'AIRMASS', 'T_EXP', 'NUM_EXP', 'GAIN', 'RON', 'V_mag', 'T_eff', 'RV', 'I_Ha', 'I_Ha_err', 'I_CaI', 'I_CaI_err']
res = list(obj_params.values()) + [I_Ha, I_Ha_err, I_CaI, I_CaI_err] # Creating results list 'res' containing the calculated parameters and appending this list to the 'results' empty list created at the start of this function!
results.append(res)
else:
header = ['I_Ha', 'I_Ha_err', 'I_CaI', 'I_CaI_err']
res = [I_Ha, I_Ha_err, I_CaI, I_CaI_err]
results.append(res)
elif Instrument=='HARPS':
header = ['BJD', 'RA', 'DEC', 'AIRMASS', 'T_EXP', 'BERV', 'OBS_DATE', 'PROG_ID', 'SNR', 'SIGDET', 'CONAD', 'RON', 'RV', 'I_Ha', 'I_Ha_err', 'I_CaI', 'I_CaI_err']
res = list(obj_params.values()) + [I_Ha, I_Ha_err, I_CaI, I_CaI_err]
results.append(res)
elif Instrument=='HARPS-N':
header = ['BJD', 'RA', 'DEC', 'AIRMASS', 'T_EXP', 'OBS_DATE', 'PROG_ID', 'RV', 'I_Ha', 'I_Ha_err', 'I_CaI', 'I_CaI_err']
res = list(obj_params.values()) + [I_Ha, I_Ha_err, I_CaI, I_CaI_err]
results.append(res)
# Saving the results in a csv file format
if save_results:
if print_stat:
print('Saving results in the working directory in file: {}.csv'.format(results_file_name))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
with open('{}.csv'.format(results_file_name), 'w') as csvfile:
writer = csv.writer(csvfile, dialect='excel')
writer.writerow(header)
for row in results:
writer.writerow(row)
return results
## Defining a function to calculate the NaI index following <NAME> et al. 2007 (2007MNRAS.378.1007D)
def NaI_index(file_path,
radial_velocity,
degree=4,
NaID2=588.995,
NaID1=589.592,
NaI_band=0.1,
F1_line=580.5,
F1_band=1.0,
F2_line=609.0,
F2_band=2.0,
hfv=10,
Instrument='NARVAL',
norm_spec=False,
plot_fit=False,
plot_spec=True,
print_stat=True,
save_results=False,
results_file_name=None,
save_figs=False,
save_figs_name=None,
out_file_path=None,
ccf_file_path=None):
"""
This function calculates the NaI doublet index following the method proposed in <NAME> et al. 2007.
Parameters:
-----------
file_path: list, .s format (NARVAL), ADP..._.fits format (HARPS) or s1d_A.fits format (HARPS-N)
List containing paths of the spectrum files
radial_velocity: int
Stellar radial velocity along the line-of-sight. This value is used for doppler shifting the spectra to its rest frame.
degree: int, default: 4
The degree of the Chebyshev1D polynomial to fit to the continuum for normalisation.
Normalisation done using Specutils.
For more info, see https://specutils.readthedocs.io/en/stable/api/specutils.fitting.fit_generic_continuum.html#specutils.fitting.fit_generic_continuum
NaID1: int, default: 588.995 nm
Line centre for the first doublet in nm.
NaID2: int, default: 589.592 nm
Line centre for the second doublet in nm.
NaI_band: int, default: 0.1 nm
Band width (nm) in which to calculate the mean doublet flux value.
F1_line: int, default: 580.5 nm
Centre of the blue continuum for pseudo-cont. estimation
F1_band: int, default: 1.0 nm
Band width (nm) in which to estimate the continuum flux.
F2_line: int, default: 609.0 nm
Centre of the red continuum for pseudo-cont. estimation
F2_band: int, default: 2.0 nm
Band width (nm) in which to estimate the continuum flux.
hfv: int, default: 10
Number of highest flux values (hfv) to use for estimating the continuum flux in each red/blue band.
NOTE: If you'd like to use all of the flux points within the bandwidth, set this parameter to None.
Instrument: str, default: 'NARVAL'
The instrument from which the data has been collected. Input takes arguments 'NARVAL', 'HARPS' or 'HARPS-N'.
norm_spec: bool, default: False
Normalizes ths spectrum. NOTE: This argument also accepts str type of 'scale' and 'poly1dfit' to normalize the spectrum by either scaling it down
to maximum flux of 1.0, or, by fitting the continuum with a line. But these are ONLY used for Instrument types 'HARPS' & 'HARPS-N'
plot_fit: bool, default: False
Plots the continuum fitting normalization processes.
plot_spec: bool, default: True
Plots the final reduced spectrum.
print_stat: bool, default: True
Prints the status of each process within the function.
save_results: bool, default: False
Saves the run results in a .csv file format in the working directory
results_file_name: str, default: None
Name of the file with the which the results file is saved
save_figs: bool, default: False
Save the plots in a pdf format in the working directory
save_figs_name: str, default=None
Name with which to save the figures. NOTE: This should ideally be the observation date of the given spectrum.
out_file_path: list, .out format (NARVAL), default: None
List containing paths of the .out files used to extract OBS_HJD.
ccf_file_path: list, .fits format (HARPS/HARPS-N), default: None
List containing paths of the CCF FITS files used to extract the radial velocity. If None, the given radial velocity arg is used for all files
Returns:
-----------
NARVAL: HJD, RA, DEC, AIRMASS, Exposure time[s], No. of exposures, GAIN [e-/ADU], ReadOut Noise [e-], V_mag, T_eff[K], RV[m/s], NaI index and error on NaI index
HARPS: BJD, RA, DEC, AIRMASS, Exposure time[s], Barycentric RV[km/s], OBS_DATE, Program ID, SNR, CCD Readout Noise[e-], CCD conv factor[e-/ADU], ReadOut Noise[ADU], RV[m/s], NaI index and error on NaI index
HARPS-N: BJD, RA, DEC, AIRMASS, Exposure time[s], OBS_DATE, Program ID', RV[m/s], NaI index and error on NaI index
All values are type float() given inside a list.
"""
results = [] # Empty list to which the run results will be appended
# Creating a loop to go through each given file_path in the list of file paths
# Using the tqdm function 'log_progress' to provide a neat progress bar in Jupyter Notebook which shows the total number of
# runs, the run time per iteration and the total run time for all files!
for i in log_progress(range(len(file_path)), desc='Calculating NaI Index'):
# Creating a loop for each instrument type.
## NARVAL
if Instrument=='NARVAL':
if out_file_path != None:
# Using read_data from krome.spec_analysis to extract useful object parameters and all individual spectral orders
obj_params, orders = read_data(file_path=file_path[i],
out_file_path=out_file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
obj_params['RV'] = radial_velocity
else:
orders = read_data(file_path=file_path[i],
Instrument=Instrument,
print_stat=print_stat,
out_file_path=None,
show_plots=False)
if print_stat:
print('"out_file_path" not given as an argument. Run will only return the indices and their errros instead.')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
if print_stat:
print('Total {} spectral orders extracted'.format(len(orders)))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
ord_39 = orders[61-39] # order 39 contains the F1 line
ord_38 = orders[61-38] # Both order 39 and 38 contain the D1 and D2 lines but only order 38 is used since it has a higher SNR; (see .out file)
ord_37 = orders[61-37] # order 37 contains the F2 line
if print_stat:
print('Using orders #39, #38 and #37 for Index calculation')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# Calculating doppler shift size using delta_lambda/lambda = v/c
shift = ((radial_velocity/ap.constants.c.value)*NaID1) # Using the rest wavelength of NaID1 line
shift = (round(shift, 4)) # Using only 4 decimal places for the shift value since that's the precision of the wavelength in .s file!
# Creating three spectrum classes for each of the three orders using 'Spectrum1D' from 'specutils'
# Docs for 'specutils' here; https://specutils.readthedocs.io/en/stable/
# The spectral and flux axes are given nm and Jy units using 'astropy.units' as 'u'. The uncertainty has units Jy as well!
spec1 = Spectrum1D(spectral_axis=np.round((ord_39[0].values - shift), 4)*u.nm,
flux=ord_39[1].values*u.Jy,
uncertainty=StdDevUncertainty(ord_39[2].values))
spec2 = Spectrum1D(spectral_axis=np.round((ord_38[0].values - shift), 4)*u.nm,
flux=ord_38[1].values*u.Jy,
uncertainty=StdDevUncertainty(ord_38[2].values))
spec3 = Spectrum1D(spectral_axis=np.round((ord_37[0].values - shift), 4)*u.nm,
flux=ord_37[1].values*u.Jy,
uncertainty=StdDevUncertainty(ord_37[2].values))
if print_stat:
print('The three spectral orders used range from; {}nm-{}nm, {}nm-{}nm, and {}nm-{}nm'.format(spec1.spectral_axis[0].value,
spec1.spectral_axis[-1].value,
spec2.spectral_axis[0].value,
spec2.spectral_axis[-1].value,
spec3.spectral_axis[0].value,
spec3.spectral_axis[-1].value))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('The doppler shift size using RV {} m/s and the NaID1 line of 588.995nm is: {}nm'.format(radial_velocity, shift))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# Fitting the continuum for each order separately using 'specutils'
if norm_spec:
if print_stat:
print('Normalising the spectras by fitting a {}th order polynomial to the enitre spectral order'.format(degree))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# First order
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
g1_fit = fit_generic_continuum(spec1, model=Chebyshev1D(degree)) # Using 'Chebyshev1D' to define an nth order polynomial model
if print_stat:
print('Polynomial fit coefficients:')
print(g1_fit)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
y_cont_fitted1 = g1_fit(spec1.spectral_axis) # Continuum fit y values are calculated by inputting the spectral axis x values into the polynomial fit equation
# The spectrum is divided by the continuum fit to get the normalized spectrum
spec_normalized1 = spec1 / y_cont_fitted1 # Note the continuum normalized spectrum also has new uncertainty values!
if plot_fit:
f, ax1 = plt.subplots()
ax1.plot(spec1.spectral_axis, spec1.flux)
ax1.plot(spec1.spectral_axis, y_cont_fitted1)
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel('Normalized Flux')
ax1.set_title("Continuum Fitting First Order")
f, ax2 = plt.subplots()
ax2.plot(spec_normalized1.spectral_axis, spec_normalized1.flux, label='Normalized', alpha=0.6)
ax2.plot(spec1.spectral_axis, spec1.flux, color='red', label='Non-Normalized', alpha=0.6)
plt.axhline(1.0, ls='--', c='gray')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec1.flux.value), linestyles='--', colors='black', label='Blue cont. region')
plt.vlines(F1_line+(F1_band/2), ymin=0, ymax=max(spec1.flux.value), linestyles='--', colors='black')
ax2.set_xlabel('$\lambda (nm)$')
ax2.set_ylabel('Normalized Flux')
ax2.set_title("Continuum Normalized First Order")
plt.legend()
if save_figs:
if print_stat:
print('Saving plots as PDFs in the working directory')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
plt.savefig('{}_cont_fit_F1_plot.pdf'.format(save_figs_name), format='pdf')
# Second order
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
g2_fit = fit_generic_continuum(spec2, model=Chebyshev1D(degree))
if print_stat:
print('Polynomial fit coefficients:')
print(g2_fit)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
y_cont_fitted2 = g2_fit(spec2.spectral_axis)
spec_normalized2 = spec2 / y_cont_fitted2
if plot_fit:
f, ax1 = plt.subplots()
ax1.plot(spec2.spectral_axis, spec2.flux)
ax1.plot(spec2.spectral_axis, y_cont_fitted2)
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel('Normalized Flux')
ax1.set_title("Continuum Fitting Second Order")
f, ax2 = plt.subplots()
ax2.plot(spec_normalized2.spectral_axis, spec_normalized2.flux, label='Normalized', alpha=0.6)
ax2.plot(spec2.spectral_axis, spec2.flux, color='red', label='Non-Normalized', alpha=0.6)
plt.axhline(1.0, ls='--', c='gray')
plt.vlines(NaID1-1.0, ymin=0, ymax=max(spec2.flux.value), linestyles='--', colors='black', label='NaID lines region')
plt.vlines(NaID2+1.0, ymin=0, ymax=max(spec2.flux.value), linestyles='--', colors='black')
ax2.set_xlabel('$\lambda (nm)$')
ax2.set_ylabel('Normalized Flux')
ax2.set_title("Continuum Normalized Second Order")
plt.legend()
if save_figs:
plt.savefig('{}_cont_fit_F2_plot.pdf'.format(save_figs_name), format='pdf')
# Third order
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
g3_fit = fit_generic_continuum(spec3, model=Chebyshev1D(degree))
if print_stat:
print('Polynomial fit coefficients:')
print(g3_fit)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
y_cont_fitted3 = g3_fit(spec3.spectral_axis)
spec_normalized3 = spec3 / y_cont_fitted3
if plot_fit:
f, ax1 = plt.subplots()
ax1.plot(spec3.spectral_axis, spec3.flux)
ax1.plot(spec3.spectral_axis, y_cont_fitted3)
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel('Normalized Flux')
ax1.set_title("Continuum Fitting Third Order")
f, ax2 = plt.subplots()
ax2.plot(spec_normalized3.spectral_axis, spec_normalized3.flux, label='Normalized', alpha=0.6)
ax2.plot(spec3.spectral_axis, spec3.flux, color='red', label='Non-Normalized', alpha=0.6)
plt.axhline(1.0, ls='--', c='gray')
plt.vlines(F2_line-(F2_band/2), ymin=0, ymax=max(spec3.flux.value), linestyles='--', colors='black', label='F2 region')
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec3.flux.value), linestyles='--', colors='black')
ax2.set_xlabel('$\lambda (nm)$')
ax2.set_ylabel('Normalized Flux')
ax2.set_title("Continuum Normalized Third Order")
plt.legend()
if save_figs:
plt.savefig('{}_cont_fit_F3_plot.pdf'.format(save_figs_name), format='pdf')
spec1 = spec_normalized1
spec2 = spec_normalized2
spec3 = spec_normalized3
# Extracting the regions required for index calculation from each spectrum using 'extract_region' and the given bandwidths
NaID1_region = extract_region(spec2, region=SpectralRegion((NaID1-(NaI_band/2))*u.nm,
(NaID1+(NaI_band/2))*u.nm))
NaID2_region = extract_region(spec2, region=SpectralRegion((NaID2-(NaI_band/2))*u.nm,
(NaID2+(NaI_band/2))*u.nm))
# Using spec1 for blue continuum
F1_region = extract_region(spec1, region=SpectralRegion((F1_line-(F1_band/2))*u.nm,
(F1_line+(F1_band/2))*u.nm))
# Using spec3 for red continuum
F2_region = extract_region(spec3, region=SpectralRegion((F2_line-(F2_band/2))*u.nm,
(F2_line+(F2_band/2))*u.nm))
regions = [NaID1_region, NaID2_region, F1_region, F2_region]
# Calculating the index using 'calc_ind' from krome.spec_analysis
I_NaI, I_NaI_err, F1_mean, F2_mean = calc_ind(regions=regions,
index_name='NaI',
print_stat=print_stat,
hfv=hfv)
# Plotting the pseudo-continuum as the linear interpolation of the values in each red and blue cont. window!
if plot_spec:
x = [F1_line, F2_line]
y = [F1_mean.value, F2_mean.value]
f, ax = plt.subplots(figsize=(10,4))
ax.plot(spec1.spectral_axis, spec1.flux, color='red', label='#39', alpha=0.5)
ax.plot(spec2.spectral_axis, spec2.flux, color='blue', label='#38', alpha=0.5)
ax.plot(spec3.spectral_axis, spec3.flux, color='green', label='#37', alpha=0.5)
ax.plot(x, y, 'ok--', label='pseudo-continuum')
ax.set_xlabel('$\lambda (nm)$')
ax.set_ylabel("Normalized Flux")
ax.set_title('Overplotting 3 orders around NaI D lines')
plt.vlines(F1_line-(F1_band/2), ymin=0, ymax=max(spec1.flux.value), linestyles='dotted', colors='blue', label='Blue cont. {}±{}'.format(F1_line, F1_band/2))
plt.vlines(F1_line+(F1_band/2), ymin=0, ymax=max(spec1.flux.value), linestyles='dotted', colors='blue')
plt.vlines(F2_line-(F2_band/2), ymin=0, ymax=max(spec1.flux.value), linestyles='dashdot', colors='red', label='Red cont. {}±{}'.format(F2_line, F2_band/2))
plt.vlines(F2_line+(F2_band/2), ymin=0, ymax=max(spec1.flux.value), linestyles='dashdot', colors='red')
plt.axhline(1.0, ls='--', c='gray')
plt.tight_layout()
plt.legend()
if save_figs:
if print_stat:
print('Saving plots as PDFs in the working directory')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
plt.savefig('{}_reduced_spec_plot.pdf'.format(save_figs_name), format='pdf')
f, ax1 = plt.subplots(figsize=(10,4))
ax1.plot(spec2.spectral_axis, spec2.flux, color='blue', label='#38')
ax1.set_xlabel('$\lambda (nm)$')
ax1.set_ylabel("Normalized Flux")
plt.vlines(NaID1, ymin=0, ymax=max(spec2.flux.value), linestyles='dotted', colors='red', label='D1')
plt.vlines(NaID2, ymin=0, ymax=max(spec2.flux.value), linestyles='dotted', colors='blue', label='D2')
plt.vlines(NaID1-(NaI_band/2), ymin=0, ymax=max(spec2.flux.value), linestyles='--', colors='black', label='D1,D2 band width = {}nm'.format(NaI_band))
plt.vlines(NaID1+(NaI_band/2), ymin=0, ymax=max(spec2.flux.value), linestyles='--', colors='black')
plt.vlines(NaID2-(NaI_band/2), ymin=0, ymax=max(spec2.flux.value), linestyles='--', colors='black')
plt.vlines(NaID2+(NaI_band/2), ymin=0, ymax=max(spec2.flux.value), linestyles='--', colors='black')
ax1.set_xlim(NaID2-(NaI_band/2)-0.2, NaID1+(NaI_band/2)+0.2)
plt.tight_layout()
plt.legend()
if save_figs:
plt.savefig('{}_NaID1D2_lines_plot.pdf'.format(save_figs_name), format='pdf')
if out_file_path != None:
header = ['HJD', 'RA', 'DEC', 'AIRMASS', 'T_EXP', 'NUM_EXP', 'GAIN', 'RON', 'V_mag', 'T_eff', 'RV', 'I_NaI', 'I_NaI_err']
res = list(obj_params.values()) + [I_NaI, I_NaI_err] # Creating results list 'res' containing the calculated parameters and appending this list to the 'results' empty list created at the start of this function!
results.append(res)
else:
header = ['I_NaI', 'I_NaI_err']
res = [I_NaI, I_NaI_err]
results.append(res)
## HARPS
elif Instrument=='HARPS':
# Opening the FITS file using 'astropy.io.fits' and extracting useful object parameters and spectrum using read_data from krome.spec_analysis
# NOTE: The format of this FITS file must be ADP which contains the reduced spectrum with the wav, flux and flux_err in three columns
if ccf_file_path != None:
obj_params, spec = read_data(file_path=file_path[i],
ccf_file_path=ccf_file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
else:
obj_params, spec = read_data(file_path=file_path[i],
Instrument=Instrument,
print_stat=print_stat,
show_plots=False)
obj_params['RV'] = radial_velocity # setting obj_params['RV'] to the given radial_velocity argument!
# Assigning appropriate variables from spec individually!
wvl = spec[0] # nm
flx = spec[1] # ADU
flx_err = spec[2]
# Calculating doppler shift size using delta_lambda/lambda = v/c and the RV from the CCF FITS file
shift = ((obj_params['RV']/ap.constants.c.value)*NaID1)
shift = (round(shift, 3)) # Using only 3 decimal places for the shift value since that's the precision of the wavelength in the .FITS files!
# Since the HARPS spectra have their individual spectral orders stitched together, we do not have to extract them separately as done for NARVAL. Thus for HARPS, the required region is extracted by slicing the spectrum with the index corresponding to the left and right continuum obtained using the 'find_nearest' function.
left_idx = find_nearest(wvl, F1_line-2) # ± 2 nm extra included for both!
right_idx = find_nearest(wvl, F2_line+2)
# If condition for when certain files have NaN as the flux errors; probably for all since the ESO Phase 3 data currently does not the flux errors
flx_err_nan = np.isnan(np.sum(flx_err)) # NOTE: This returns True if there is one NaN value or all are NaN values!
if flx_err_nan:
if np.isnan(obj_params['RON']):
if print_stat:
print('File contains NaN in flux errors array. Calculating flux errors using CCD readout noise: {}'.format(np.round(obj_params['RON'], 4)))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
# Flux error calculated as photon noise plus CCD readout noise
# NOTE: The error calculation depends on a lot of other CCD parameters such as the pixel binning in each CCD
# array and so on. But for photometric limited measurements, this noise is generally insignificant.
with warnings.catch_warnings(): # Ignore warnings
warnings.simplefilter('ignore')
flx_err_ron = [np.sqrt(flux + | np.square(obj_params['RON']) | numpy.square |
# Copyright 2019, Oath Inc.
# Licensed under the terms of the Apache License, Version 2.0. See LICENSE file for terms.
from ariel import utils, LOGGER
from datetime import timedelta
import numpy as np
import pandas as pd
import operator
import sys
# Two reports to be generated:
# 1) By region / family - % chance a new instance is likely to be covered by an RI
# Report 2 .groupby() will calculate this
# 2) By time of week / region / family - % chance a new instance is likely to be covered
# Needs for both:
# Unused RIs -- If unused, chance = 100%
# RIs not used by purchasing account -- If no unused, chance = unused-by-purchasing/not-covered-by-purchasing
def generate(config, instances, ris, pricing):
def get_units(instancetype):
try:
return pricing['units'][instancetype]
except KeyError as e:
if '.' in instancetype:
raise e
for key in pricing['units']:
if key.endswith('.' + instancetype):
return pricing['units'][key]
raise e
# Make sure we have a reasonable about of data
if instances['usagestartdate'].max() - instances['usagestartdate'].min() < timedelta(days=14):
raise ValueError('Insufficient Data')
# Preaggregate some data
timerange = instances['usagestartdate'].unique()
# Add some additional data to instances
hourofweek_column = instances.columns.get_loc('usagestartdate') + 1
hourofweek_value = instances['usagestartdate'].dt.dayofweek * 24 + instances['usagestartdate'].dt.hour
instances.insert(hourofweek_column, 'hourofweek', hourofweek_value)
region_column = instances.columns.get_loc('availabilityzone')
region_value = instances['availabilityzone'].str[:-1]
instances.insert(region_column, 'region', region_value)
family_column = instances.columns.get_loc('instancetype')
# meckstmd:07/29/2019 - Metal RIs are no different than regular RIs - they are a family with a normalization factor
# for example, i3.metal is equivalent to i3.16xlarge. See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/apply_ri.html
#family_value = instances['instancetype'].apply(lambda x: x if x.endswith('.metal') else x.split('.')[0])
family_value = instances['instancetype'].apply(lambda x: x.split('.')[0])
instances.insert(family_column, 'instancetypefamily', family_value)
# Amazon still hasn't fixed g4dn, so we need to filter out instance types and RIs that we don't have size data about.
instances = instances[instances.instancetype.isin(pricing['units'].keys())].reset_index(drop=True)
ris = ris[ris.instancetype.isin(pricing['units'].keys())].reset_index(drop=True)
# Filter out instances and RIs we're not interested in
skip_accounts = utils.get_config_value(config, 'RI_PURCHASES', 'SKIP_ACCOUNTS', '').split(' ')
instances = instances[~instances.usageaccountid.isin(skip_accounts)].reset_index(drop=True)
ris = ris[~ris.accountid.isin(skip_accounts)].reset_index(drop=True)
include_accounts = utils.get_config_value(config, 'RI_PURCHASES', 'INCLUDE_ACCOUNTS', '').split(' ')
if (include_accounts[0] != ''):
instances = instances[instances.usageaccountid.isin(include_accounts)].reset_index(drop=True)
ris = ris[ris.accountid.isin(include_accounts)].reset_index(drop=True)
instance_units_column = instances.columns.get_loc('instances') + 2
units_value = instances['instancetype'].apply(get_units) * instances['instances']
instances.insert(instance_units_column, 'instance_units', units_value)
reserved_units_column = instances.columns.get_loc('reserved') + 2
units_value = instances['instancetype'].apply(get_units) * instances['reserved']
instances.insert(reserved_units_column, 'reserved_units', units_value)
# Add some additional data to ris
family_column = ris.columns.get_loc('instancetype') + 1
# meckstmd:07/29/2019 - Metal RIs are no different than regular RIs - they are a family with a normalization factor
# for example, i3.metal is equivalent to i3.16xlarge. See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/apply_ri.html
#family_value = ris['instancetype'].apply(lambda x: x if x.endswith('.metal') else x.split('.')[0])
family_value = ris['instancetype'].apply(lambda x: x.split('.')[0])
ris.insert(family_column, 'instancetypefamily', family_value)
units_column = ris.columns.get_loc('quantity') + 1
units_value = ris['instancetype'].apply(get_units) * ris['quantity']
ris.insert(units_column, 'units', units_value)
# Create aggregates for faster processing
az_instance_groups = instances.groupby(['availabilityzone', 'instancetype', 'tenancy', 'operatingsystem'])
az_account_instance_groups = instances.groupby(az_instance_groups.keys + ['usageaccountid'])
region_instance_groups = instances.groupby(['region', 'instancetypefamily', 'tenancy', 'operatingsystem'])
region_account_instance_groups = instances.groupby(region_instance_groups.keys + ['usageaccountid'])
ri_groups = ris.groupby(region_instance_groups.keys + ['scope'])
# Reference Lookup
all_sizes = instances['instancetype'].apply(lambda x: x.split('.')[1]).unique()
reference_sizes = {}
for family in ris['instancetypefamily'].unique():
for size in all_sizes:
if "{}.{}".format(family, size) in pricing['us-east-1']:
reference_sizes[family] = size
break
# Reports
unused_az_ris = pd.DataFrame(columns=az_instance_groups.keys + ['min_unused_qty', 'avg_unused_qty', 'max_unused_qty'])
ri_hourly_usage_report = pd.DataFrame(columns=region_instance_groups.keys + ['hourofweek'] +
['total_ri_units', 'total_instance_units', 'floating_ri_units', 'floating_instance_units', 'unused_ri_units', 'coverage_chance'])
ri_purchases = pd.DataFrame(columns=['Account ID', 'Scope', 'Region / AZ', 'Instance Type', 'Operating System',
'Tenancy', 'Offering Class', 'Payment Type', 'Term', 'Quantity', 'accountid', 'family', 'units',
'ri upfront cost', 'ri total cost', 'ri savings', 'ondemand value', 'algorithm'])
# NOTE: For usage values, AZ usage is booked by quantity (instances), Region usage is booked by units.
# Iterate by Union of (Region Instance Groups and RI Groups)
for group in sorted(list(set(region_instance_groups.groups.keys()) |
set(ris.groupby(region_instance_groups.keys).groups.keys()))):
# for group in [('ap-northeast-1', 'c4', 'Shared', 'Linux')]:
region, family, tenancy, operatingsystem = group
LOGGER.debug("Evaluting {:>14}:{:3} ({}, {})".format(region, family, tenancy, operatingsystem))
if region not in pricing:
LOGGER.warning("Skipping region {} due to missing pricing information".format(region))
continue
# Account for In-Account AZ RI usage
# In-Account RI usage only needs to be counted against Regional Usage for accuracy
try:
az_ris = ri_groups.get_group(group + tuple(['Availability Zone']))
except KeyError:
az_ris = pd.DataFrame(columns=ris.columns)
az_account_hour_ri_usage = pd.DataFrame(columns=az_account_instance_groups.keys + ['hourofweek', 'instances'])
region_account_hour_ri_usage = pd.DataFrame(columns=region_account_instance_groups.keys + ['hourofweek', 'instance_units'])
for index, az_ri in az_ris.iterrows():
LOGGER.debug("Evaluating In-Account AZ RI: {}:{} {} x{}".format(az_ri['accountid'], az_ri['availabilityzone'],
az_ri['instancetype'], az_ri['quantity']))
try:
group_key = (az_ri['availabilityzone'], az_ri['instancetype'], tenancy, operatingsystem, az_ri['accountid'])
az_account_instance_group = az_account_instance_groups.get_group(group_key)
except KeyError:
continue
# Straight to hourofweek average, since there should not be multiple rows per usagestartdate
in_account_usage = az_account_instance_group.groupby(['hourofweek'])['instances'].mean()
# Account for already assigned usage from previously evaluated AZ RIs
in_account_assigned = az_account_hour_ri_usage[
(az_account_hour_ri_usage['availabilityzone'] == az_ri['availabilityzone']) &
(az_account_hour_ri_usage['instancetype'] == az_ri['instancetype']) &
(az_account_hour_ri_usage['usageaccountid'] == az_ri['accountid'])
].groupby('hourofweek')['instances'].sum()
if len(in_account_assigned) > 0:
in_account_usage -= in_account_assigned
in_account_used = np.minimum(in_account_usage, az_ri['quantity'])
# Build assignment usage rows
usage_keys = pd.DataFrame([group_key], columns=az_account_instance_groups.keys)
usage_data = pd.DataFrame({'key': 1, 'hourofweek': in_account_used.index, 'instances': in_account_used.values})
usage = usage_keys.assign(key=1).merge(usage_data, on='key').drop('key', 1)
LOGGER.debug("In-Account Assigned AZ Usage:\n" + str(usage.head()))
az_account_hour_ri_usage = az_account_hour_ri_usage.append(usage, ignore_index=True)
# Build regional usage rows
usage_keys = pd.DataFrame([group + tuple([az_ri['accountid']])], columns=region_account_instance_groups.keys)
usage_data = pd.DataFrame({'key': 1, 'hourofweek': in_account_used.index,
'instance_units': in_account_used.values * get_units(az_ri['instancetype'])})
usage = usage_keys.assign(key=1).merge(usage_data, on='key').drop('key', 1)
LOGGER.debug("In-Account Regional Assigned AZ Usage:\n" + str(usage.head()))
region_account_hour_ri_usage = region_account_hour_ri_usage.append(usage, ignore_index=True)
# Account for Cross-Account AZ RI Usage
# To simplify analysis, treat in-account and cross-account identically since we only report unused AZ RIs
az_ris = az_ris.groupby(['availabilityzone', 'instancetype'])
# for index, az_ri in az_ris.iterrows():
for az_group in az_ris.groups.keys():
availabilityzone, instancetype = az_group
quantity = az_ris.get_group(az_group)['quantity'].sum()
LOGGER.debug("Evaluating Cross-Account AZ RI: {} {} x{}".format(availabilityzone, instancetype, quantity))
try:
group_key = (availabilityzone, instancetype, tenancy, operatingsystem)
az_instance_group = az_instance_groups.get_group(group_key)
except KeyError:
continue
# Aggregate by hour before hourofweek
total_usage = az_instance_group.groupby(['usagestartdate', 'hourofweek'])['instances'].sum(). \
groupby(['hourofweek']).mean()
# No pre-assigned usage since individual RI subscriptions are getting bundled
total_used = np.minimum(total_usage, quantity)
# Add to regional usage for purchase recommendations
usage_keys = pd.DataFrame([group + tuple(['000000000000'])], columns=region_account_instance_groups.keys)
usage_data = pd.DataFrame({'key': 1, 'hourofweek': total_used.index,
'instance_units': total_used.values * get_units(az_ri['instancetype'])})
usage = usage_keys.assign(key=1).merge(usage_data, on='key').drop('key', 1)
LOGGER.debug("Cross-Account Regional Assigned AZ Usage:\n" + str(usage.head()))
region_account_hour_ri_usage = region_account_hour_ri_usage.append(usage, ignore_index=True)
unused = quantity - total_used
if unused.max() > 0:
unused_az_ri_row = {
'availabilityzone': availabilityzone,
'instancetype': instancetype,
'tenancy': tenancy,
'operatingsystem': operatingsystem,
'min_unused_qty': unused.min(),
'avg_unused_qty': unused.mean(),
'max_unused_qty': unused.max(),
}
unused_az_ris = unused_az_ris.append(unused_az_ri_row, ignore_index=True)
LOGGER.debug("Unused AZ RIs:\n" + str(unused_az_ri_row))
# Account for In-Account Region RI Usage
# In-Account Region RI usage only needed to calculate RI Float
try:
region_ris = ri_groups.get_group(group + tuple(['Region']))
except KeyError:
region_ris = pd.DataFrame(columns=ris.columns)
region_hour_ri_usage = pd.DataFrame(columns=region_instance_groups.keys + ['hourofweek', 'units'])
account_region_ris = region_ris.groupby(['accountid'])
for accountid in account_region_ris.groups.keys():
ri_units = account_region_ris.get_group(accountid)['units'].sum()
LOGGER.debug("Evaluating In-Account Region RI: {}:{} {} x{}".format(accountid, region, family, ri_units))
try:
group_key = (region, family, tenancy, operatingsystem, accountid)
region_account_instance_group = region_account_instance_groups.get_group(group_key)
except KeyError:
continue
# Aggregate by hour before hourofweek
in_account_usage = region_account_instance_group.groupby(['usagestartdate', 'hourofweek']) \
['instance_units'].sum().groupby(['hourofweek']).mean()
# Account for already assigned usage from AZ RIs
in_account_assigned = region_account_hour_ri_usage[
(region_account_hour_ri_usage['usageaccountid'] == accountid)
].groupby('hourofweek')['instance_units'].sum()
if len(in_account_assigned) > 0:
in_account_usage -= in_account_assigned
in_account_used = np.minimum(in_account_usage, ri_units)
# Fix partial indexes
in_account_used = in_account_used.reindex(range(168), copy=False, fill_value=0.0)
# Build usage rows
usage_keys = pd.DataFrame([group], columns=region_instance_groups.keys)
usage_data = pd.DataFrame({'key': 1, 'hourofweek': in_account_used.index, 'units': in_account_used.values})
usage = usage_keys.assign(key=1).merge(usage_data, on='key').drop('key', 1)
LOGGER.debug("In-Account Assigned Region Usage:\n" + str(usage.head()))
region_hour_ri_usage = region_hour_ri_usage.append(usage, ignore_index=True)
try:
region_instance_group = region_instance_groups.get_group(group)
except:
# This is a bit heavy, but it shouldn't be called frequently.
# Create a new DataFrame that has the right structure
region_instance_group = region_instance_groups.get_group(list(region_instance_groups.groups.keys())[0])
region_instance_group = region_instance_group.assign(instances=0)
region_instance_group = region_instance_group.assign(instance_units=0)
region_instance_group = region_instance_group.assign(reserved=0)
# Account for Cross-Account Region RI Usage
if len(region_ris) == 0:
ri_units = 0
else:
ri_units = region_ris['units'].sum()
LOGGER.debug("Evaluating Cross-Account Region RI: {} {} x{}".format(region, family, ri_units))
# Aggregate by hour before hourofweek
total_usage = region_instance_group.groupby(['usagestartdate', 'hourofweek']) \
['instance_units'].sum().groupby(['hourofweek']).mean()
# In-Account usage to calculate float
in_account_usage = region_hour_ri_usage.groupby(['hourofweek'])['units'].sum()
if len(in_account_usage) == 0:
in_account_usage = pd.Series(0, index=total_usage.index)
# Floating RIs
floating_ri_units = ri_units - in_account_usage
# Instances eligible for float
floating_instance_units = total_usage - in_account_usage
# Unused RIs
unused_ri_units = np.maximum(ri_units - total_usage, 0)
# % Change a new instance will be covered
coverage_chance = floating_ri_units / np.maximum(np.maximum(floating_instance_units, floating_ri_units), 1) * 100
# Build report rows
usage_keys = pd.DataFrame([group], columns=region_instance_groups.keys)
usage_data = pd.DataFrame({
'key': 1,
'hourofweek': total_usage.index,
'total_ri_units': ri_units,
'total_instance_units': total_usage.values,
'floating_ri_units': floating_ri_units.values,
'floating_instance_units': floating_instance_units.values,
'unused_ri_units': unused_ri_units.values,
'coverage_chance': coverage_chance.values,
})
usage = usage_keys.assign(key=1).merge(usage_data, on='key').drop('key', 1)
LOGGER.debug("Cross-Account Region Usage Report:\n" + str(usage.head()))
ri_hourly_usage_report = ri_hourly_usage_report.append(usage, ignore_index=True)
# RI Utilization Evaluation complete. Evaluate Purchase recommendations
if region_instance_group['instance_units'].sum() > 0:
# Calculate usage slope to determine purchase aggressiveness
# First filter data to reduce noise.
region_hourly_usage = region_instance_group.groupby(['usagestartdate', 'hourofweek'])['instance_units'].sum()
threshold = int(utils.get_config_value(config, 'RI_PURCHASES', 'FILTER_THRESHOLD', 3))
signal = region_hourly_usage.values.copy()
delta = np.abs(signal - np.mean(signal))
median_delta = np.median(delta)
if median_delta > 0:
mask = (delta / float(median_delta)) > threshold
signal[mask] = | np.median(signal) | numpy.median |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import numpy as np
import torch
from utils.utils import get_array_affine_header, get_largest_connected_region, get_2_largest_connected_region
from torch.utils.data import DataLoader
import tqdm
import distutils.dir_util
import nibabel
from skimage.transform import resize, rotate
from datasets.dataset_flare21 import tiny_dataset_flare21
from nets.whichnet import whichnet
import os
def listdir_nohidden(path):
l = []
for f in np.sort(os.listdir(path)) :
if f.startswith('.') == False :
l.append(f)
return l
def infer_flare21(net,
net_id,
anatomy,
output,
device,
vgg,
size):
list_ = listdir_nohidden('./inputs/')
test_ids = []
for elem_ in list_:
if elem_.split('.')[1] == 'nii':
test_ids.append(elem_.split('_')[1])
test_ids = np.array(test_ids, dtype = np.int)
for index, id_ in enumerate(tqdm.tqdm(test_ids)):
test_dataset = tiny_dataset_flare21(id_, size, anatomy, vgg)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
array, affine, header = get_array_affine_header(test_dataset, 'CT')
array_liver = | np.copy(array) | numpy.copy |
# -*- coding: utf-8 -*-
"""
A DIY Neural Net code for basic concepts
@author: whatdhack
Download MNIST csv data from https://pjreddie.com/projects/mnist-in-csv/
Ref:
1. The Deep Learning Book by <NAME>, <NAME> and <NAME>
(http://www.deeplearningbook.org/)
2. Make Your Own Neural Networks by <NAME>
(https://github.com/makeyourownneuralnetwork/makeyourownneuralnetwork )
3. Machine Learning by Andrew Ng on Coursera
(https://www.coursera.org/learn/machine-learning)
"""
import numpy as np
# scipy.special for the sigmoid function expit()
import scipy.special
from random import shuffle
import time
# neural network class definition
class NeuralNetwork:
# initialise the neural network
def __init__(self, layernodes, learningrate, mbsize=3, training_data_file_name="data/mnist_train.csv", test_data_file_name="data/mnist_test.csv"):
# set number of layers , nodss per layer
self.layernodes = layernodes
self.numlayers = len(layernodes)
# link weight matrices, w
self.w = []
for i in range (len(layernodes) -1) :
self.w.append (np.random.normal(0.0, pow(self.layernodes[i], -0.5), (self.layernodes[i+1], self.layernodes[i])))
print ('w[%d].shape'% i, self.w[i].shape)
# learning rate
self.lr = learningrate
# mini-batch size
self.mbsize = mbsize
# activation function is the sigmoid function
self.activation_function = lambda x: scipy.special.expit(x)
# load the training data CSV file into a list
training_data_file = open(training_data_file_name, 'r')
self.training_data = training_data_file.readlines()
training_data_file.close()
# shuffle the trainign data
shuffle(self.training_data)
# load the mnist test data CSV file into a list
test_data_file = open(test_data_file_name, 'r')
self.test_data = test_data_file.readlines()
test_data_file.close()
self.train = self.trainMBSGD
return
# forward pass through the neural network
# Ni Wi Nj Wj Nk
# Ii Oi Ij Oj Ik Ok
def forward (self, inputs_list):
# convert inputs list to 2d array
# calculate signals into a layer
# calculate the signals emerging from a layer
inputsl = [np.array(inputs_list, ndmin=2).T]
outputsl = [np.array(inputs_list, ndmin=2).T]
for l in range(1,self.numlayers):
inputsl.append(np.dot(self.w[l-1], outputsl[l-1]))
outputsl.append(self.activation_function(inputsl[l]))
return outputsl
# brack prop errors and weight adjustment the neural network
# Ni Wi Nj Wj Nk
# Ei Ej Ek
def backpropSGD(self, inputs_list, targets_list):
# convert inputs list to 2d array
targets = np.array(targets_list, ndmin=2).T
outputsl = self.forward(inputs_list)
errorsl = [None]*(self.numlayers)
errorsl[-1] = targets - outputsl[-1]
#print ('range(self.numlayers-2,0,-1)', range(self.numlayers-2,0,-1))
for l in range(self.numlayers-2,-1,-1):# W1,W0
errorsl[l] = np.dot(self.w[l].T, errorsl[l+1]) # error bp is proportinal to weight
errorp = errorsl[l+1] * outputsl[l+1] * (1.0 - outputsl[l+1]) # dE/dw = E*do/dw for sigmoid
opprev = np.transpose(outputsl[l])
dw = self.lr * np.dot( errorp, opprev)
#print ('w[l].shape', self.w[l].shape, 'dw.shape', dw.shape)
self.w[l] += dw
#print ('w[%d]'%l, self.w[l])
# brack prop errors and weight adjustment the neural network
# <NAME> Nj Wj Nk
# Ei Ej Ek
def backprop1a(self, inputs_list, targets_list):
# convert inputs list to 2d array
targets = np.array(targets_list, ndmin=2).T
outputsl = self.forward(inputs_list)
errorsl = [None]*(self.numlayers)
errorsl[-1] = targets - outputsl[-1]
#print ('range(self.numlayers-2,0,-1)', range(self.numlayers-2,0,-1))
dwl = []
for l in range(self.numlayers-2,-1,-1):# W1,W0
errorsl[l] = np.dot(self.w[l].T, errorsl[l+1])
errorp = errorsl[l+1] * outputsl[l+1] * (1.0 - outputsl[l+1])
opprev = np.transpose(outputsl[l])
dw = self.lr * np.dot( errorp, opprev)
#print ('w[l].shape', self.w[l].shape, 'dw.shape', dw.shape)
#self.w[l] += dw
dwl.append(dw)
return dwl
# brack prop errors and weight adjustment the neural network
# <NAME>j Wj Nk
# Ei Ej Ek
def backpropMBSGD(self, inputs_list, targets_list):
# convert inputs list to 2d array
targets = np.array(targets_list, ndmin=2).T
outputsl = self.forward(inputs_list)
errorsl = [None]*(self.numlayers)
errorsl[-1] = targets - outputsl[-1]
errl = []
opprevl = []
for l in range(self.numlayers-2,-1,-1):# W1,W0
errorsl[l] = np.dot(self.w[l].T, errorsl[l+1])
errorp = errorsl[l+1] * outputsl[l+1] * (1.0 - outputsl[l+1])
opprev = outputsl[l]
#print ('errorp.shape', errorp.shape, 'opprev.shape', opprev.shape)
errl.append(errorp)
opprevl.append(opprev)
return errl, opprevl
# train the neural network with SGD and minibatch size 1
def trainSGD(self, numepochs):
# epochs is the number of times the training data set is used for training
for e in range(numepochs):
# go through all records in the training data set
starttime = time.time()
for record in self.training_data:
# split the record by the ',' commas
all_values = record.split(',')
# scale and shift the inputs
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = np.zeros(self.layernodes[-1]) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
self.backpropSGD(inputs, targets)
print ('epoch ', e, 'completed in %d s'%(time.time()- starttime))
# train the neural network with SGD and minibatch not 1
def trainMBSGD(self, numepochs):
# epochs is the number of times the training data set is used for training
minibatch_size = self.mbsize
for e in range(numepochs):
# go through all records in the training data set
num_minibatch = int (len(self.training_data) / minibatch_size)
if num_minibatch > 100000:
num_minibatch = 100000
starttime = time.time()
for i in range(0, num_minibatch*minibatch_size, minibatch_size):
errlmb = []
opprevlmb=[]
for record in self.training_data[i:i+minibatch_size]:
# split the record by the ',' commas
all_values = record.split(',')
# scale and shift the inputs
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = np.zeros(self.layernodes[-1]) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
errl, opprevl = self.backpropMBSGD(inputs, targets)
errlmb.append(errl)
opprevlmb.append(opprevl)
errnd = np.array(errlmb)
errf = np.mean(errnd, axis=0)
opprevnd = np.array(opprevlmb)
opprevf = | np.mean(opprevnd, axis=0) | numpy.mean |
# -*- coding: utf-8 -*-
import warnings
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
matplotlib.rcParams['agg.path.chunksize'] = 100000
class StepSizeError(Exception):
pass
def nlms_agm_on(alpha, update_count, threshold, d, adf_N, tap_len=64):
"""
Update formula
_________________
w_{k+1} = w_k + alpha * e_k * x_k / ||x||^2 + 1e-8
Parameters
-----------------
alpha : float
step size
0 < alpha < 2
update_count : int
update count
threshold : float
threshold of end condition
sample_num : int
sample number
x : ndarray(adf_N, 1)
filter input figures
w : ndarray(adf_N, 1)
initial coefficient (adf_N, 1)
d : ndarray(adf_N, 1)
desired signal
adf_N : int
length of adaptive filter
"""
if not 0 < alpha < 2:
raise StepSizeError
def nlms_agm_adapter(sample_num):
nonlocal x
nonlocal w
start_chunk = sample_num * adf_N
end_chunk = (sample_num + 1) * adf_N
for _ in range(1, update_count + 1):
### y = np.dot(w.T, x) # find dot product of coefficients and numbers
# =============
# TODO 8/14 掛け算を畳み込みにする
# y = w * x # find dot product of coefficients and numbers
y = np.convolve(a=w[:, 0], v=x[:, 0], mode='same').reshape(len(x),1)
# =============
### 動かない d_part_tmp = d_part[start_chunk:end_chunk, 0].reshape(adf_N, 1)
d_part_tmp = d_part.reshape(adf_N, 1)
### 2の1 y_tmp = np.full((adf_N, 1), y)
# e = (d_part[start_chunk:end_chunk, 0] - np.full((adf_N, 1), y)) # find error
e = d_part_tmp - y # find error
"""
e = d[sample_num] - y # find error
"""
# update w -> array(e)
# 8/14 アダマール積じゃなくてじゃなくてノルムのスカラー積??
w = w + alpha * e * x / (x_norm_squ + 1e-8)
e_norm = np.linalg.norm(e)
if e_norm < threshold: # error threshold
break
# TODO 8/14 次の消す
"""
e_norm = np.linalg.norm(e)
w = w + alpha * e_norm * x / x_norm_squ
if e_norm < threshold: # error threshold
break
"""
# y_opt = np.dot(w.T, x) # adapt filter
# =============
# TODO 8/14 掛け算を畳み込みにする
# y_opt = (w * x).reshape(adf_N, ) # adapt filter
y_opt = (np.convolve(a=w[:, 0], v=x[:, 0], mode='same')).reshape(adf_N, ) # adapt filter
# =============
return y_opt
# define time samples
# t = np.array(np.linspace(0, adf_N, adf_N)).T
w = np.random.rand(tap_len, 1) # initial coefficient (data_len, 1)
# w = (w - np.mean(w)) * 2
x = np.random.rand(tap_len, 1) # Make filter input figures
x = (x - np.mean(x)) * 2
# find norm square
x_norm_squ = np.dot(x.T, x)
# devision number
dev_num = len(d) // adf_N
if len(d) % adf_N != 0:
sample_len = dev_num * adf_N
warnings.warn(
f"the data was not divisible by adf_N, the last part was truncated. \
original sample : {len(d)} > {sample_len} : truncated sample")
d = d[:dev_num * adf_N]
d_dev = | np.split(d, dev_num) | numpy.split |
"""Utils for Peru soil moisture analysis.
Todo
----
- replace pickle, e.g. by csv files
"""
import datetime
import glob
import itertools
import os
import pickle
import geopandas
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import rasterio
import sklearn.metrics as me
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model, load_model
from tqdm import tqdm
def getHyperspectralBands(preprocessed=False):
"""Get list of hyperspectral bands.
Parameters
----------
preprocessed : bool, optional (default=False)
If False, all bands are included.
If True, only bands which are included after pre-processing are
included.
Returns
-------
bands : np.array of float
List of bands.
"""
bands = np.array([
888.219, 897.796, 907.373, 916.95, 926.527, 936.104, 945.681, 955.257,
964.834, 974.411, 983.988, 993.565, 1003.14, 1012.72, 1022.3, 1031.87,
1041.45, 1051.03, 1060.6, 1070.18, 1079.76, 1089.33, 1098.91, 1108.49,
1118.06, 1127.64, 1137.22, 1146.8, 1156.37, 1165.95, 1175.53, 1185.1,
1194.68, 1204.26, 1213.83, 1223.41, 1232.99, 1242.56, 1252.14, 1261.72,
1271.29, 1280.87, 1290.45, 1300.03, 1309.6, 1319.18, 1328.76, 1338.33,
1347.91, 1357.49, 1367.06, 1376.64, 1386.22, 1395.79, 1405.37, 1414.95,
1424.53, 1434.1, 1443.68, 1453.26, 1462.83, 1472.41, 1481.99, 1491.56,
1501.14, 1510.72, 1520.29, 1529.87, 1539.45, 1549.02, 1558.6, 1568.18,
1577.76, 1587.33, 1596.91, 1606.49, 1616.06, 1625.64, 1635.22, 1644.79,
1654.37, 1663.95, 1673.52, 1683.1, 1692.68, 1702.25, 1711.83, 1721.41,
1730.99, 1740.56, 1750.14, 1759.72, 1769.29, 1778.87, 1788.45, 1798.02,
1807.6, 1817.18, 1826.75, 1836.33, 1845.91, 1855.49, 1865.06, 1874.64,
1884.22, 1893.79, 1903.37, 1912.95, 1922.52, 1932.1, 1941.68, 1951.25,
1960.83, 1970.41, 1979.98, 1989.56, 1999.14, 2008.72, 2018.29, 2027.87,
2037.45, 2047.02, 2056.6, 2066.18, 2075.75, 2085.33, 2094.91, 2104.48,
2114.06, 2123.64, 2133.21, 2142.79, 2152.37, 2161.95, 2171.52, 2181.1,
2190.68, 2200.25, 2209.83, 2219.41, 2228.98, 2238.56, 2248.14, 2257.71,
2267.29, 2276.87, 2286.45, 2296.02, 2305.6, 2315.18, 2324.75, 2334.33,
2343.91, 2353.48, 2363.06, 2372.64, 2382.21, 2391.79, 2401.37, 2410.94,
2420.52, 2430.1, 2439.68, 2449.25, 2458.83, 2468.41, 2477.98, 2487.56,
2497.14, 2506.71])
if preprocessed:
ignored_bands = getIgnoredBands()
return bands[~np.isin(np.arange(len(bands)), ignored_bands)]
return bands
def getIgnoredBands():
"""Get list of indices of all ignored bands.
Returns
-------
np.array of int
List of indices of the ignored hyperspectral bands.
"""
return np.array([0, 1, 49, 50, 51, 52, 53, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 111, 153, 154, 155, 156,
157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
168, 169])
def processHeadwallData(data, verbose=0):
"""Pre-process Headwall hyperspectral images (170 to 133 bands).
Parameters
----------
data : np.array
Raw hyperspectral image with 170 bands. Shape (pixel, bands)
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
data : np.array
Pre-processed hyperspectral image with 146 bands.
"""
if data.shape[1] < 170:
print("Error: Array already pre-processed.")
return data
# remove bands
if verbose:
print("| Shape with all bands:", data.shape)
ignore_bands = getIgnoredBands()
mask = np.ones(data.shape[1], np.bool)
mask[ignore_bands] = 0
data = data[:, mask]
if verbose:
print("| Shape after band removal:", data.shape)
# cap reflectance data to 1
data[data > 1.0] = 1.0
return data
def trainAutoencoder(data_pre_gen, epochs=20,
bottleneck=5, title="", verbose=0):
"""Train autoencoder on Headwall hyperspectral data.
Parameters
----------
data_pre_gen : generator
Data generator with pre-processed data
epochs : int
Number of epochs for the Autoencoder training
bottleneck : int
Number of neurons at the bottleneck layer of the Autoencoder
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
autoencoder : Keras model
Trained model of the Autoencoder
encoder : Keras model
Trained model of the encoder
ae_weights_path : str
Path to the trained Autoencoder weights
en_weights_path : str
Path to the trained encoder weights
"""
run = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
n_features = 132 # TODO refactor
inp = Input(shape=(n_features))
encoded = Dense(bottleneck, activation="relu")(inp)
decoded = Dense(n_features, activation="linear")(encoded)
autoencoder = Model(input, decoded)
encoder = Model(input, encoded)
earlystopping = EarlyStopping(
monitor="loss", mode="min", patience=20)
autoencoder.compile(optimizer='nadam', loss='mean_squared_error')
autoencoder.fit_generator(data_pre_gen, epochs=epochs,
steps_per_epoch=100, shuffle=True,
verbose=verbose, callbacks=[earlystopping])
ae_weights_path = "data/models/autoencoder_"+str(title)+"_"+run+".h5"
autoencoder.save(ae_weights_path)
en_weights_path = "data/models/encoder_"+str(title)+"_"+run+".h5"
encoder.save(en_weights_path)
return autoencoder, encoder, ae_weights_path, en_weights_path
def getPixelsAroundMeasurement(x, y, n_pixels):
"""Get `n_pixels` pixel indices around one center pixels.
Parameters
----------
x : int
X-coordinate from 0 to width-1.
y : int
Y-coordinate from 0 to height-1.
n_pixels : int
Number of pixels around center pixel.
Returns
-------
pixel_list : list of tuples (int, int)
List of pixels around center pixel.
"""
neighborhood = itertools.product(
range(-n_pixels, n_pixels+1),
range(-n_pixels, n_pixels+1))
pixel_list = [(x+z[0], y+z[1]) for z in neighborhood]
return pixel_list
def getTrainingData(gdf, tif_file, n_pixels_nbh=4, verbose=0):
"""Get training data.
Remember: We expect to have missing points per tif_file, since we sometimes
have several tif_images for one field.
Parameters
----------
gdf : Geopandas DataFrame
Soil moisture data
tif_file : tif image
Hyperspectral image
n_pixels_nbh : int, optional (default=4)
Number of pixels in neighborhood of soil moisture measurement. For
example, `n_pixels_nbh=4` means that 9 x 9 pixels are used.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
X
y
n_missing_points :
"""
if verbose:
print("| GDF before transformation", gdf.crs)
print(gdf.head())
# transform to similar crs
gdf = gdf.to_crs(tif_file.crs.to_dict())
if verbose:
print("| GDF after transformation", gdf.crs)
print("| tif-file", list(tif_file.xy(tif_file.height // 2,
tif_file.width // 2)))
X = []
y = []
n_missing_points = 0
for i, row in gdf.iterrows():
point_x = row["geometry"].x
point_y = row["geometry"].y
index_x, index_y = tif_file.index(point_x, point_y)
if verbose > 1:
print("| Point", i, ":", point_x, point_y, "|", index_x, index_y)
pixel_list = getPixelsAroundMeasurement(index_x, index_y, n_pixels_nbh)
spectra_temp = []
for p in pixel_list:
p_x, p_y = tif_file.xy(p[0], p[1])
spectrum_temp = list(tif_file.sample([(p_x, p_y)]))[0]
if np.sum(spectrum_temp) > 0.:
spectra_temp.append(spectrum_temp)
if not spectra_temp:
n_missing_points += 1
continue
# get median spectrum
spectrum = np.median(spectra_temp, axis=0)
soilmoisture = row["soilmoistu"]
if verbose > 1:
print("| Soilmoisture:", soilmoisture)
X.append(spectrum)
y.append(soilmoisture)
X = np.array(X)
y = np.array(y)
return X, y, n_missing_points
def preprocessingPixelHeadwall(x, area=None):
"""Preprocess Headwall pixel with 146 bands.
Calculate `mean` and `std` with `calcPreprocessingStats()`
Parameters
----------
x : list or array
Original input pixel(s) with 146 spectral bands
area : str or None, optional (default=None)
Name of measurement area as str. If `None`, all areas are used.
"""
# get mean and std
file_path = "data/scaling/"
if area is None:
file_path += "area_all"
else:
file_path += "area_"+str(area)
mean = pickle.load(open(file_path+"_mean.p", "rb"))
std = pickle.load(open(file_path+"_std.p", "rb"))
# loop over image channels
for idx, mean_value in enumerate(mean):
x[..., idx] -= mean_value
x[..., idx] /= std[idx]
return x
def calcPreprocessingStats(area=None):
"""Calculate mean and std for preprocessingPixelHeadwall().
Parameters
----------
area : str or None, optional (default=None)
Name of measurement area as str. If `None`, all areas are used.
"""
if area is None:
files = glob.glob("data/hyp_data_pre_area*.p")
elif isinstance(area, str):
files = glob.glob("data/hyp_data_pre_area"+str(area)+"*.p")
X = []
for f in files:
a = pickle.load(open(f, "rb"))
X.append(a)
X = np.concatenate(X)
np.set_printoptions(suppress=True)
mean = np.mean(X, axis=0)
std = np.std(X, axis=0)
std[std == 0] = 1. # fix for 4 bands in area "5a"
# save
output_path = "data/scaling/"
os.makedirs(output_path, exist_ok=True)
if area is None:
output_path += "area_all"
else:
output_path += "area_"+str(area)
pickle.dump(list(mean), open(output_path+"_mean.p", "wb"))
pickle.dump(list(std), open(output_path+"_std.p", "wb"))
# return list(mean), list(std)
def processImages(hyp_path: str,
ref_path: str,
areas: list = ["1", "2_1", "2_2", "3", "4", "5"],
return_mode: str = "pickle",
verbose=0):
"""Process hyperspectral images into training data and full image data.
For every area in `areas`, the following routine is performed:
1. Load tif file from that area.
2. Load reference data from that area
3. Extract training data with `getTrainingData()`
4. Mask full image.
5. Pre-processing with `processHeadwallData()`
In `pickle` mode:
6. Save training data for each area and save full image split up into
chunks for each area.
In `return` mode:
6. Return training data and full image.
Parameters
----------
hyp_path : str
Path to the hyperspectral images.
ref_path : str
Path to the shape files.
areas : list of str, optional
List of areas. The full list is `["1", "2_1", "2_2", "3", "4", "5"]`.
return_mode : str, optional (default="pickle")
Defines what to do with the data.
- "pickle": Saves the data in pickle binary files.
- "return": Returns the files.
- "csv": Saves csv file.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
hyp_data_pre_list : list
Full hyperspectral data
X_list : list
Training data (features).
y_list : list
Training data (labels).
"""
output_path = "data/processed/"
hyp_data_pre_list = []
X_list = []
y_list = []
df_list = []
hypbands = getHyperspectralBands(True)
for area in areas:
f = hyp_path + "Area" + area + "_multi_or_rf"
print("-"*40)
print("Area:", area)
if verbose:
print(f)
# load hyperspectral data
tif_file = rasterio.open(f)
hyp_data = tif_file.read()
if verbose:
print("| Hyp data:", tif_file.shape, tif_file.crs)
# load reference data
gdf = geopandas.read_file(
ref_path+"peru_soilmoisture_area"+str(area[0])+".shp")
# get training dataset
X, y, n_missing_points = getTrainingData(
gdf, tif_file, verbose=verbose)
print("| Missing points: {0} of {1}".format(
n_missing_points, gdf.shape[0]))
if n_missing_points == gdf.shape[0]:
print("| No data.")
continue
# calculate image mask & create list of all (!) pixels
if hyp_data.shape[0] == 170:
hyp_data = hyp_data.transpose(1, 2, 0)
width, height, n_features = hyp_data.shape
hyp_data_list = hyp_data.reshape((width*height, n_features))
image_mask = np.argwhere(np.mean(hyp_data_list, axis=1) != 0).flatten()
# print("There are {0:.2f} % non-zero pixels.".format(
# np.count_nonzero(image_mask)/hyp_data_list.shape[0]*100))
# pre-processing of training data X
X = processHeadwallData(X, verbose=verbose)
# pre-processing of full (masked) dataset hyp_data_list
hyp_data_pre = processHeadwallData(
hyp_data_list[image_mask], verbose=verbose)
X_list.append(X)
y_list.append(y)
if return_mode == "return":
hyp_data_pre_list.append(hyp_data_pre)
elif return_mode == "pickle":
hyp_data_pre_chunks = np.array_split(
hyp_data_pre, hyp_data_pre.shape[0]//1e5)
for i, c in enumerate(tqdm(hyp_data_pre_chunks)):
pickle.dump(c, open(
output_path+"hyp_data_pre_area"+str(area)+"_"+str(i)+".p",
"wb"))
pickle.dump(X, open(output_path+"area"+str(area)+"_X.p", "wb"))
pickle.dump(y, open(output_path+"area"+str(area)+"_y.p", "wb"))
print("| Saved all files for area", area)
elif return_mode == "csv":
df_temp = pd.DataFrame(X, columns=hypbands)
df_temp["soilmoisture"] = y
df_temp["area"] = area
df_list.append(df_temp)
# return
if return_mode == "return":
return hyp_data_pre_list, X_list, y_list
# save training data in pickle files
if return_mode == "pickle":
X_list = np.concatenate(X_list)
y_list = np.concatenate(y_list)
pickle.dump(X_list, open(output_path+"X_list.p", "wb"))
pickle.dump(y_list, open(output_path+"y_list.p", "wb"))
# save data to csv
elif return_mode == "csv":
df = pd.concat(df_list, axis=0, ignore_index=True)
df.to_csv(output_path+"peru_data.csv")
def genHypDataPre(area=None, scaling=False):
"""Generate hyperspectral data with optional pre-processing.
Parameters
----------
area : str or None, optional (default=None)
Name of measurement area as str. If `None`, all areas are used.
scaling : bool, optional (default=False)
If True, the data is scaled (mean 0, std 1).
Yields
-------
a : np.array
Hyperspectral training data.
"""
if area is None:
files = glob.glob("data/processed/hyp_data_pre_area*.p")
else:
files = glob.glob("data/processed/hyp_data_pre_area"+str(area)+"*.p")
while True:
f = np.random.choice(files, size=1, replace=False)[0]
a = pickle.load(open(f, "rb"))
if scaling:
a = preprocessingPixelHeadwall(a)
yield a, a
def getHypDataPre(area=None, scaling=False):
"""Get hyperspectral data with optional pre-processing.
Parameters
----------
area : str or None, optional (default=None)
Name of measurement area as str. If `None`, all areas are used.
scaling : bool, optional (default=False)
If True, the data is scaled (mean 0, std 1).
Returns
-------
output : np.array
Hyperspectral training data.
"""
if area is None:
files = glob.glob("data/processed/hyp_data_pre_area*.p")
else:
files = glob.glob("data/processed/hyp_data_pre_area"+str(area)+"*.p")
output = []
for f in files:
a = pickle.load(open(f, "rb"))
if scaling:
a = preprocessingPixelHeadwall(a)
output.append(a)
output = np.stack(a)
return output
def getEncoder(area, dim_red_mode="autoencoder"):
"""Load encoder by `area` and `mode`.
Parameters
----------
area : str
Measurement area.
dim_red_mode : str, optional (default="autoencoder")
Type of dimensionality reduction, from {None, "PCA", "autoencoder"}.
Returns
-------
keras model or pca model
Encoder model, either PCA or Autoencoder.
"""
if dim_red_mode == "autoencoder":
encoders = {
"1": "encoder_120191017_071814.h5",
"2_1": "encoder_2_120191017_073022.h5",
"2_2": "encoder_2_220191017_074235.h5",
"3_2": "encoder_3_220191017_075443.h5",
"4n": "encoder_4n20191017_080652.h5",
"5a": "encoder_5a20191017_081906.h5",
"5b": "encoder_5b20191017_083129.h5",
}
encoder = load_model("data/models/"+encoders[area], compile=False)
elif dim_red_mode == "pca":
encoders = {
'1': 'pca_1_20191115_020050.p',
'2_1': 'pca_2_1_20191115_020100.p',
'2_2': 'pca_2_2_20191115_020116.p',
'3_2': 'pca_3_2_20191115_020121.p',
'4n': 'pca_4n_20191115_020130.p',
'5a': 'pca_5a_20191115_020136.p',
'5b': 'pca_5b_20191115_020142.p',
}
encoder = pickle.load(open("data/models/"+encoders[area], "rb"))
return encoder
def trainAutoencoderPerArea(areas, bottleneck=5):
"""Train autoencoder for every area.
Parameters
----------
areas : list of str
List of measurement areas.
bottleneck : int, optional (default=5)
Number of components of the bottleneck layer.
"""
for area in areas:
print("Area", area)
_, _, _, en_path = trainAutoencoder(
data_pre_gen=genHypDataPre(area=area), verbose=0,
epochs=30, bottleneck=bottleneck, title=area)
print("Trained encoder at", en_path)
def trainPCAPerArea(areas, n_components=10):
"""Train principal component analysis (PCA) for every area.
Parameters
----------
areas : list of str
List of measurement areas.
n_components : int, optional (default=10)
Number of principal components to be included.
"""
for area in areas:
pca_path = trainPCA(
data=getHypDataPre(area=area),
n_components=n_components,
title=area)
print("'{area}': '{filename}',".format(
area=area, filename=pca_path.split("/")[1]))
def trainPCA(data, n_components, title, verbose=0):
"""Train principal component analysis (PCA).
Parameters
----------
data : np.array
Hyperspectral training data in the shape (n_datapoints, n_bands).
n_components : int
Number of principal components to be included.
title : str
Title of the saved model file.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
pca_path : str
Path to trained PCA model.
"""
run = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
# train PCA
pca = PCA(n_components=n_components)
pca.fit(data)
# save PCA
pca_path = "data/models/pca_"+str(title)+"_"+run+".p"
pickle.dump(pca, open(pca_path, "wb"))
if verbose:
print("PCA path:", pca_path)
return pca_path
def loadDataForSemiEstimation(area, scaling=False, max_sm=25., verbose=0):
"""Load data for estimation of semi-supervised learning.
Parameters
----------
area : str or list
Area(s) to be loaded for estimation.
scaling : bool, optional (default=False)
If True, the data is scaled (mean 0, std 1).
max_sm : float, optional (default=25)
Maximum soil moisture value.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
X_train_semi : np.array
Hyperspectral data with unlabeled and labeled data.
X_test : np.array
Hyperspectral data with labeled data.
y_train : np.array
Soil moisture data with real labels between 0-1 and dummy labels -1.
y_test : np.array
Soil moisture data with real labels between 0-1.
"""
X, _, y, _ = loadDataForEstimation(
area=area, dim_red_mode=None, scaling=scaling, max_sm=max_sm,
verbose=verbose)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, shuffle=True, random_state=0)
X_full = getHypDataPre(area=area, scaling=scaling)
X_train_semi = np.copy(X_full)
y_train_semi = np.full(shape=(X_full.shape[0], ), fill_value=-1)
X_train_semi = np.concatenate([X_train_semi, X_train], axis=0)
y_train_semi = np.concatenate([y_train_semi, y_train], axis=0)
return X_train_semi, X_test, y_train_semi, y_test
def loadDataForEstimation(area, dim_red_mode, scaling=False, max_sm=25.,
verbose=0):
"""Load data for supervised estimation.
Parameters
----------
area : str or list
Area(s) to be loaded for estimation.
dim_red_mode : str
Type of dimensionality reduction, from {None, "PCA", "autoencoder"}.
scaling : bool, optional (default=False)
If True, the data is scaled (mean 0, std 1).
max_sm : float, optional (default=25)
Maximum soil moisture value.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
X_train, X_test : np.array
Hyperspectral data with a shape (n_datapoints, n_bands).
y_train, y_test : np.array
Soil moisture data with a shape (n_datapoints, ).
"""
# get data
if isinstance(area, list):
X_list = []
y_list = []
for a in area:
X_list.append(pickle.load(
open("data/processed/area"+str(a)+"_X.p", "rb")))
y_list.append(pickle.load(
open("data/processed/area"+str(a)+"_y.p", "rb")))
X = np.concatenate(X_list, axis=0)
y = np.concatenate(y_list, axis=0)
else:
X = pickle.load(open("data/processed/area"+str(area)+"_X.p", "rb"))
y = pickle.load(open("data/processed/area"+str(area)+"_y.p", "rb"))
# pre-processing
if scaling:
X = preprocessingPixelHeadwall(X)
if verbose:
print("| X, y:", X.shape, y.shape)
# removal of outliers
valid_indices = np.argwhere(y < max_sm)[:, 0]
X = X[valid_indices]
y = y[valid_indices]
if verbose:
print("| After outlier removal:", X.shape, y.shape)
# encoding with encoder
if dim_red_mode != None:
encoder = getEncoder(area, dim_red_mode=dim_red_mode)
if dim_red_mode == "autoencoder":
X = encoder.predict(X)
elif dim_red_mode == "pca":
X = encoder.transform(X)
# "split" data
X_train = X
X_test = X
y_train = y
y_test = y
if verbose:
print("| Split:", X_train.shape, X_test.shape, y_train.shape,
y_test.shape)
return X_train, X_test, y_train, y_test
def trainSoilmoistureEstimator(area, dim_red_mode="autoencoder",
verbose=0):
"""Train estimator for soil moisture.
Parameters
----------
area : str
Name of measurement area.
dim_red_mode : [type], optional (default="autoencoder")
Type of dimensionality reduction, from {None, "PCA", "autoencoder"}.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
model : model
Trained regression model.
"""
X_train, X_test, y_train, y_test = loadDataForEstimation(
area=area, dim_red_mode=dim_red_mode, verbose=verbose)
# training
model = RandomForestRegressor(n_estimators=1000, n_jobs=-1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# evaluation
rmse = np.sqrt(me.mean_squared_error(y_test, y_pred))
r2 = me.r2_score(y_test, y_pred)
if verbose:
print("| R2: {0:.1f} %".format(r2*100))
print("| RMSE: {0:.2f}".format(rmse))
return model
def predictSoilmoistureMap(area, model, dim_red_mode=None, sm_range=None,
postfix="", verbose=0):
"""Predict soil moisture map for measurement `area`.
Parameters
----------
area : str
Name of measurement area.
model : model
Regression model.
dim_red_mode : [type], optional (default=None)
Type of dimensionality reduction, from {None, "PCA", "autoencoder"}.
sm_range : tuple of float, optional (default=None)
If not None, this is a tuple of (a, b) with a<b which defines the
range of soil moisture that should be considered in the prediction.
postfix : str, optional (default="")
Postfix for output file.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
hyp_data_map : np.array
Hyperspectral data map
"""
hyp_path = "/home/felix/data/"
tif_file = hyp_path + "Area" + str(area) + "_multi_or_rf"
# read in data and transform
if verbose:
print("| Reading in data ...")
tif_file = rasterio.open(tif_file)
width = tif_file.width
height = tif_file.height
n_features = 170
hyp_data = tif_file.read()
if hyp_data.shape[0] == 170:
hyp_data = hyp_data.transpose(1, 2, 0)
hyp_data_list = hyp_data.reshape((width*height, n_features))
# mask and preprocess
if verbose:
print("| Masking data ...")
image_mask = np.argwhere(np.mean(hyp_data_list, axis=1) != 0).flatten()
hyp_data_pre = processHeadwallData(
hyp_data_list[image_mask], verbose=True)
# predict
if verbose:
print("| Predicting ...")
if not dim_red_mode:
hyp_data_pred = model.predict(
preprocessingPixelHeadwall(hyp_data_pre))
else:
encoder = getEncoder(area, dim_red_mode=dim_red_mode)
if dim_red_mode == "autoencoder":
hyp_data_pred = model.predict(encoder.predict(
preprocessingPixelHeadwall(hyp_data_pre)))
elif dim_red_mode == "pca":
hyp_data_pred = model.predict(encoder.transform(
preprocessingPixelHeadwall(hyp_data_pre)))
# transform
hyp_data_map = | np.zeros(shape=(width*height, )) | numpy.zeros |
#!/usr/bin/python3
import argparse
import numpy as np
import pandas as pd
import scipy.interpolate
import scipy.stats
from flow_models.generate import X_VALUES, load_data
from flow_models.lib import mix
from flow_models.lib.util import logmsg
METHODS = ['first', 'threshold', 'sampling']
INTEGRATE_STEPS = 262144
def calculate_data(data, x_probs, x_val, method):
ad = {}
x = np.unique(np.rint(1 / x_probs)).astype('u8')
if x_val == 'size':
x *= 64
idx = data.index.values
idx_diff = np.concatenate([idx[:1], np.diff(idx)])
if method == 'first':
for what in ['flows', 'packets', 'fraction', 'octets']:
w = 'flows' if what == 'fraction' else what
cdf = data[w + '_sum'].cumsum() / data[w + '_sum'].sum()
cdf = scipy.interpolate.interp1d(cdf.index, cdf, 'previous', bounds_error=False)(x)
ad[what + '_mean'] = 1 - cdf
elif method == 'threshold':
for what in ['flows', 'packets', 'fraction', 'octets']:
w = 'flows' if what == 'fraction' else what
if what == 'flows':
toc = data[w + '_sum']
cdf = 1 - toc.cumsum() / data[w + '_sum'].sum()
ad[what + '_mean'] = scipy.interpolate.interp1d(cdf.index, cdf, 'previous', bounds_error=False)(x)
else:
toc = (data[w + '_sum'] / idx)[::-1].cumsum()[::-1] * idx_diff
cdf = 1 - toc.cumsum() / data[w + '_sum'].sum()
ad[what + '_mean'] = scipy.interpolate.interp1d(cdf.index, cdf, 'linear', bounds_error=False)(x)
else:
ps = []
if x_val == 'length':
for p in x_probs:
ps.append((1 - p) ** idx)
else:
packet_size = data['octets_sum'].cumsum() / data['packets_sum'].cumsum()
pks = np.clip(idx / packet_size, 1, np.trunc(idx / 64))
for p in x_probs:
ps.append((1 - np.clip(p * packet_size / 64, 0, 1)) ** (pks if x_val == 'size' else idx))
for what in ['flows', 'packets', 'fraction', 'octets']:
w = 'flows' if what == 'fraction' else what
if what == 'flows':
toc = data[w + '_sum']
else:
toc = (data[w + '_sum'] / idx)[::-1].cumsum()[::-1] * idx_diff
a = []
for p in ps:
cdf = 1 - (p * toc).sum() / data[w + '_sum'].sum()
a.append(cdf)
ad[what + '_mean'] = np.array(a)
ad['operations_mean'] = 1 / ad['flows_mean']
ad['occupancy_mean'] = 1 / ad['fraction_mean']
for what in ['flows', 'packets', 'fraction', 'octets']:
ad[what + '_mean'] *= 100
return pd.DataFrame(ad, x_probs if method == 'sampling' else x)
def calculate_mix(data, x_probs, x_val, method):
ad = {}
x = np.unique(np.rint(1 / x_probs)).astype('u8')
if x_val == 'size':
x *= 64
idx = np.geomspace(x.min(), x.max(), INTEGRATE_STEPS)
idx = np.unique(np.rint(idx)).astype('u8')
idx_diff = np.concatenate([idx[:1], np.diff(idx)])
if method == 'first':
for what in ['flows', 'packets', 'fraction', 'octets']:
w = 'flows' if what == 'fraction' else what
cdf = mix.cdf(data[w], x)
ad[what + '_mean'] = 1 - cdf
elif method == 'threshold':
for what in ['flows', 'packets', 'fraction', 'octets']:
w = 'flows' if what == 'fraction' else what
if what == 'flows':
cdf = mix.cdf(data[w], x)
ad[what + '_mean'] = 1 - cdf
else:
cdf = mix.cdf(data[w], idx)
pdf = np.concatenate([cdf[:1], np.diff(cdf)])
toc = (pdf / idx)[::-1].cumsum()[::-1] * idx_diff
cdf = 1 - toc.cumsum()
ad[what + '_mean'] = scipy.interpolate.interp1d(idx, cdf, 'linear', bounds_error=False)(x)
else:
ps = []
if x_val == 'length':
for p in x_probs:
ps.append((1 - p) ** idx)
else:
packet_size = (mix.cdf(data['octets'], idx) / mix.cdf(data['packets'], idx)) * (data['octets']['sum'] / data['packets']['sum'])
pks = np.clip(idx / packet_size, 1, np.trunc(idx / 64))
# Flows smaller than 128 bytes must be 1-packet long
packet_size[:64] = idx[:64]
for p in x_probs:
ps.append((1 - np.clip(p * packet_size / 64, 0, 1)) ** (pks if x_val == 'size' else idx))
for what in ['flows', 'packets', 'fraction', 'octets']:
w = 'flows' if what == 'fraction' else what
cdf = mix.cdf(data[w], idx)
pdf = np.concatenate([cdf[:1], np.diff(cdf)])
if what == 'flows':
toc = pdf
else:
toc = (pdf / idx)[::-1].cumsum()[::-1] * idx_diff
if x_val != 'length':
toc[64] += np.sum(toc[:64])
toc[:64] = 0
a = []
for p in ps:
cdf = 1 - (p * toc).sum()
a.append(cdf)
ad[what + '_mean'] = np.array(a)
ad['operations_mean'] = 1 / ad['flows_mean']
ad['occupancy_mean'] = 1 / ad['fraction_mean']
for what in ['flows', 'packets', 'fraction', 'octets']:
ad[what + '_mean'] *= 100
return pd.DataFrame(ad, x_probs if method == 'sampling' else x)
def calculate(obj, index=None, x_val='length', methods=tuple(METHODS)):
data = load_data(obj)
if index is None:
index = 1 / np.power(2, range(25))
elif isinstance(index, int):
index = 1 / np.logspace(0, 32, index, base=2)
else:
index = index
dataframes = {}
for method in methods:
if isinstance(data, pd.DataFrame):
df = calculate_data(data, | np.array(index) | numpy.array |
"""
SQN network, reproduced based on the SQN paper, check https://arxiv.org/abs/2104.04891
Author: <NAME>
Email: <EMAIL>
history:
- Oct. 15, 2021, create the file
difference from the codebase (RandLANet.py of Official RandLA-Net)
- add weak_labels relevant attributes
- delete the decoder part in its inference() and add query network
- add three_nearest_interpolation() based on tf_ops from official PointNet2 for the query network
- adjust the losses, modify the training() and evaluate() function correspondingly
"""
import sys
import time
import os, json
from os import makedirs
from os.path import exists, join
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from helper_tool import DataProcessing as DP, log_out
import helper_tf_util
# custom tf ops based on PointNet++ (https://github.com/charlesq34/pointnet2)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, 'tf_ops/3d_interpolation'))
from tf_interpolate import three_nn, three_interpolate
class SqnNet:
"""SQNetwork class based RandLA-Net's encoder and its query network
- __init__(): set the config, flat_inputs(all those batched data), inputs, logits, loss, optimizer, results and log using TF summarywriter.
- inference(): implement the network logic with encoder-decoder structure, need the follow function as core components:
- dilated_res_block(): the dilated residual block
- building_block(): build 1 simple block
- relative_pos_encoding(): relative position encoding for the LocSE
- random_sample(): RS
- nearest_interpolation(): nearest interpolation with inverse weighted distance
- gather neighbour(): gather nearest neighbours
- att_pooling(): attentive pooling
- three_nearest_interpolation(): three nearest interpolation for each weak point in the query network
- train(): training with an optimizer by running session for ops (following tensorflow 1.x pattern)
- evaluate(): evaluate on the val/test set.
"""
def __init__(self, dataset, config):
# obtain the dataset iterator's next element under the hood
flat_inputs = dataset.flat_inputs
self.config = config
# Path of the result folder
if self.config.saving:
if self.config.saving_path is None:
# self.saving_path = time.strftime('{}/Log_%Y-%m-%d_%H-%M-%S'.format(config.results_dir), time.gmtime())
self.saving_path = time.strftime('{}/Log_weak_{}_%Y-%m-%d_%H-%M-%S'.format(self.config.results_dir,dataset.weak_label_ratio), time.gmtime())
else:
self.saving_path = self.config.saving_path
makedirs(self.saving_path) if not exists(self.saving_path) else None
# use inputs(a dict) variable to map the flat_inputs
with tf.variable_scope('inputs'):
self.inputs = dict()
num_layers = self.config.num_layers
# correspond to the flat_inputs defined in get_tf_mapping2() in main_S3DIS_SQN.py
# HACK: for encoder, it needs the original points, so add it to the first element of this array.
self.inputs['original_xyz'] = flat_inputs[4 * num_layers] # features containing xyz and feature, (B,N,3+C)
self.inputs['xyz'] = (self.inputs['original_xyz'],) + flat_inputs[:num_layers] # xyz_original plus xyz(points) of sub_pc at all the sub_sampling stages, containing num_layers items
self.inputs['neigh_idx'] = flat_inputs[num_layers: 2 * num_layers] # neighbour id, containing num_layers items
self.inputs['sub_idx'] = flat_inputs[2 * num_layers:3 * num_layers] # sub_sampled idx, containing num_layers items
self.inputs['interp_idx'] = flat_inputs[3 * num_layers:4 * num_layers] # interpolation idx (nearest idx in the sub_pc for all raw pts), containing num_layers items
self.inputs['features'] = flat_inputs[4 * num_layers + 1] # features containing xyz and feature, (B,N,3+C)
self.inputs['labels'] = flat_inputs[4 * num_layers + 2]
self.inputs['weak_label_masks'] = flat_inputs[4 * num_layers + 3]
self.inputs['input_inds'] = flat_inputs[4 * num_layers + 4] # input_inds for each batch 's point in the sub_pc
self.inputs['cloud_inds'] = flat_inputs[4 * num_layers + 5] # cloud_inds for each batch
self.points = self.inputs['original_xyz'] # (B,N,3)
self.labels = self.inputs['labels'] # (B,N)
self.weak_label_masks = self.inputs['weak_label_masks'] # weak label mask for weakly semseg, (B,N)
self.is_training = tf.placeholder(tf.bool, shape=())
self.training_step = 1
self.training_epoch = 0
self.correct_prediction = 0
self.accuracy = 0
self.mIou_list = [0]
self.class_weights = DP.get_class_weights(dataset.name)
if self.config.saving:
# put the training log to the resutls dir
self.Log_file = open(os.path.join(self.saving_path,'log_train_' + dataset.name + str(dataset.val_split) + '.txt'), 'a')
else:
self.Log_file = open('log_train_' + dataset.name + str(dataset.val_split) + '.txt', 'a')
with tf.variable_scope('layers'):
self.logits, self.weak_labels = self.inference(self.inputs, self.is_training) # (n, num_classes), (n,)
#####################################################################
# Ignore the invalid point (unlabeled) when calculating the loss #
#####################################################################
with tf.variable_scope('loss'):
self.logits = tf.reshape(self.logits, [-1, config.num_classes]) # (n, num_classes)
self.weak_labels = tf.reshape(self.weak_labels, [-1]) # (n,)
# TODO: which to use, WCE, CE or smooth label
# self.loss = self.get_loss_Sqn(self.logits, self.weak_labels)
self.loss = self.get_loss(self.logits, self.weak_labels, self.class_weights)
with tf.variable_scope('optimizer'):
self.learning_rate = tf.Variable(config.learning_rate, trainable=False, name='learning_rate')
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.variable_scope('results'):
# self.correct_prediction = tf.nn.in_top_k(valid_logits, valid_labels, 1)
self.correct_prediction = tf.nn.in_top_k(self.logits, self.weak_labels, 1)
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.prob_logits = tf.nn.softmax(self.logits) # (n,C)
tf.summary.scalar('learning_rate', self.learning_rate)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.accuracy)
my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.saver = tf.train.Saver(my_vars, max_to_keep=100)
c_proto = tf.ConfigProto()
c_proto.gpu_options.allow_growth = True
self.sess = tf.Session(config=c_proto)
self.merged = tf.summary.merge_all()
# self.train_writer = tf.summary.FileWriter(config.train_sum_dir, self.sess.graph)
if hasattr(self, 'saving_path'):
self.train_writer = tf.summary.FileWriter(self.saving_path, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def inference(self, inputs, is_training):
"""similar to pytorch's forward() function where the SQN model architecture is implemented by an encoder-query structure
Args:
inputs ([type]): a dict containing all kinds of required inputs
is_training (bool): training or not
Returns:
tensor: logits for segmentation scores
"""
d_out = self.config.d_out # [16, 64, 128, 256], note the channels of LFA will be doubled.
feature = inputs['features'] # (B,N,6)
# feature = tf.layers.dense(feature, 8, activation=None, name='fc0') # (B,N,8)
# feature = tf.nn.leaky_relu(tf.layers.batch_normalization(feature, -1, 0.99, 1e-6, training=is_training))
feature = tf.expand_dims(feature, axis=2) # expand 1 more dim to use Conv2D ops, (B,N,1,8)
# ###########################Encoder############################
f_encoder_list = [] # in the end, collect num_layers + 1 items for a group of hierarchical point feature embeddings
for i in range(self.config.num_layers):
f_encoder_i = self.dilated_res_block(feature, inputs['xyz'][i], inputs['neigh_idx'][i], d_out[i],
'Encoder_layer_' + str(i), is_training) # similar to LAO for local feature learning
f_sampled_i = self.random_sample(f_encoder_i, inputs['sub_idx'][i]) # down-sampled the input using the idx
feature = f_sampled_i
if i == 0:
f_encoder_list.append(f_encoder_i)
f_encoder_list.append(f_sampled_i) # (B,N,1,32), (B,N/4,1,32), (B,N/16,1,128), (B,N/64,1,256), (B,N/256,1,512)
# ###########################Encoder############################
# ###########################Query Network############################
# obtain weakly points and labels for a batch using weak_label_masks
# method2 using the gather_nd
selected_idx = tf.where(tf.equal(self.weak_label_masks,1)) # (n,2)
weak_points = tf.gather_nd(self.points, selected_idx)
weak_points_labels=tf.gather_nd(self.labels, selected_idx)# (n,)
# or use method1 using boolean_mask
# weak_points = tf.boolean_mask(self.points,tf.cast(self.weak_label_masks,tf.bool)) # (n,3), e.g., one batch has 26 weak pts
# weakly_points_labels = tf.boolean_mask(self.labels,tf.cast(self.weak_label_masks,tf.bool)) # (n,)
# obtain batch indices to denote which batch is for every weakly point
batch_inds = selected_idx[:,0]
# query features for weak points
f_query_feature_list = []
for i in range(self.config.num_layers):
xyz_current = inputs['xyz'][i+1] # (B,N/4,3), index i plus 1 because the first element is the point_original
features_current = f_encoder_list[i+1] # (B,N/4,1,32), index plus 1 because the first one is the input of encoder
# if training, shape (n,1,3), otherwise (B,N,3) (main reason here is to avoid GPU OOM issue)
xyz_query = tf.cond(is_training,
lambda: tf.reshape(weak_points, (tf.shape(weak_points)[0],1,3)), # (n,1,3)
lambda: self.points)
xyz_support = tf.cond(is_training,
lambda: tf.gather(xyz_current, batch_inds, axis=0), # (B,m,3)->(n,m,3) as each weak pt might be from diff. batch
lambda: xyz_current)
features_support = tf.cond(is_training,
lambda: tf.gather(tf.squeeze(features_current,axis=2), batch_inds, axis=0), # (B,m,C)->(n,m,C)
lambda: tf.squeeze(features_current,axis=2))
# if training (n,1,C) else (B, N, C) where n is based on (B,N) and the weak_label_mask
f_query_feature_i = self.three_nearest_interpolation(xyz_query, xyz_support, features_support) # (B,N,C)
f_query_feature_list.append(f_query_feature_i)
# concat all features, (n, 1116, 1); the tricky here is n is as batch dim, 1116 as channel dim, 1 as num_pt dim
FC_LIST =[256, 128, 64]
if self.config.concat_type == '1234':
features_combined = tf.concat(f_query_feature_list[:], axis=-1) # (n,1,928)
elif self.config.concat_type == '123':
features_combined = tf.concat(f_query_feature_list[:3], axis=-1) # (n,1,x)
elif self.config.concat_type == '234':
features_combined = tf.concat(f_query_feature_list[1:], axis=-1) # (n,1,x)
elif self.config.concat_type == '12':
features_combined = tf.concat(f_query_feature_list[:2], axis=-1) # (n,1,x)
FC_LIST =[128, 64]
elif self.config.concat_type == '1':
features_combined = f_query_feature_list[0] # (n,1,x)
FC_LIST =[16]
else:
raise NotImplementedError("error")
# obtain classification scores using FCs, (n, 1, 928)-> ...-->(n, 1, num_classes) for training
# or obtain classification scores using FCs, (B, N, 928)-> ...-->(B, N, num_classes) for validation
f_current =features_combined
for i in range(len(FC_LIST)):
f_layer_fc_i = helper_tf_util.conv1d(f_current, FC_LIST[i], 1, f'fc{i+1}', 1, 'VALID', True, is_training)
# add a dropout to its last layer
if i == len(FC_LIST)-1:
f_layer_drop = helper_tf_util.dropout(f_layer_fc_i, keep_prob=0.5, is_training=is_training, scope='dp1')
f_current = f_layer_fc_i
logits = helper_tf_util.conv1d(f_current, self.config.num_classes, 1, f'fc{len(FC_LIST)+1}', 1, 'VALID', False, is_training, activation_fn=None)
# ###########################Query Network############################
# if training, logits's shape is like (n,1,C), if validation, shape like (B, N, C)
logits=tf.cond(is_training,
lambda: tf.squeeze(logits, [1]), # (n, num_classes)
lambda: tf.reshape(logits,[-1, tf.shape(logits)[-1]])) # (B*N, num_classes)
return logits, weak_points_labels # (n,num_classes), (n,)
def train(self, dataset):
# write current config to the log
dict_config = json.dumps(dict((name, getattr(self.config, name)) for name in dir(self.config) if not name.startswith('__') and not name.startswith('lr')))
log_out('****START TRAINING with {}****\n'.format(json.dumps(dict_config)), self.Log_file)
log_out('****EPOCH {}****'.format(self.training_epoch), self.Log_file)
# use session to start the dataset iterator
self.sess.run(dataset.train_init_op) # similar to sess.run(dataset.flat_inputs)??
while self.training_epoch < self.config.max_epoch:
t_start = time.time()
try:
ops = [self.train_op, # training optmizer
self.extra_update_ops,
self.merged, # tensorboard summary
self.loss,
self.logits,
self.labels,
self.weak_label_masks,
self.weak_labels,
self.accuracy]
# logits, weak_labels = self.sess.run([self.logits, self.weak_labels], {self.is_training: True})
# BUG: OOM issue reporting error OOM when allocating tensor with shape[40960,10240,32] for queired features concatenation step for the semantic query network(~line 450) , 1 batch contains about 400 weakly points
_, _, summary, l_out, probs, labels, _, _, acc = self.sess.run(ops, {self.is_training: True})
self.train_writer.add_summary(summary, self.training_step)
t_end = time.time()
if self.training_step % 50 == 0:
message = 'Step {:08d} L_out={:5.3f} Acc={:4.2f} ''---{:8.2f} ms/batch'
log_out(message.format(self.training_step, l_out, acc, 1000 * (t_end - t_start)), self.Log_file)
self.training_step += 1
except tf.errors.OutOfRangeError:
# each training_step is 500, so if above this number, will trigger this exception(the below code)
m_iou = self.evaluate(dataset)
if m_iou > np.max(self.mIou_list):
# Save the best model
snapshot_directory = join(self.saving_path, 'snapshots')
makedirs(snapshot_directory) if not exists(snapshot_directory) else None
self.saver.save(self.sess, snapshot_directory + '/snap', global_step=self.training_step)
self.mIou_list.append(m_iou)
log_out('Best m_IoU is: {:5.3f}'.format(max(self.mIou_list)), self.Log_file)
self.training_epoch += 1
self.sess.run(dataset.train_init_op)
# Update learning rate
op = self.learning_rate.assign(tf.multiply(self.learning_rate,
self.config.lr_decays[self.training_epoch]))
self.sess.run(op)
log_out('****EPOCH {}****'.format(self.training_epoch), self.Log_file)
except tf.errors.InvalidArgumentError as e:
print('Caught a NaN error :')
print(e.error_code)
print(e.message)
print(e.op)
print(e.op.name)
print([t.name for t in e.op.inputs])
print([t.name for t in e.op.outputs])
a = 1 / 0
print('finished')
self.sess.close()
def evaluate(self, dataset):
"""For Sqn model, all test sub-sampled points will be used for evaluations. Note: only test on sub-sampled points rather raw pts.
For each validation step:
obtain current input's preds w. shape (B,N,13) and gt_labels (B,N) by running session (i.e., calculate logits using trained model)
convert preds to hard labels w. shape (B,N)
compute confusion_matrix, then compute {gt_class,positive_classes,true_positive_classes} for current input, add to their list
use {gt_class,positive_classes,true_positive_classes} for the whole validation set to compute mIoU and save logs
"""
# Initialise iterator with validation data
self.sess.run(dataset.val_init_op)
gt_classes = [0 for _ in range(self.config.num_classes)]
positive_classes = [0 for _ in range(self.config.num_classes)]
true_positive_classes = [0 for _ in range(self.config.num_classes)]
val_total_correct = 0
val_total_seen = 0
for step_id in range(self.config.val_steps):
if step_id % 50 == 0:
print(str(step_id) + ' / ' + str(self.config.val_steps))
try:
ops = (self.prob_logits, self.weak_labels, self.accuracy)
stacked_prob, weak_lbls, acc = self.sess.run(ops, {self.is_training: False})
pred = np.argmax(stacked_prob, 1)
# if not self.config.ignored_label_inds:
# pred_valid = pred
# labels_valid = labels
# else:
# invalid_idx = np.where(labels == self.config.ignored_label_inds)[0]
# labels_valid = np.delete(labels, invalid_idx)
# labels_valid = labels_valid - 1
# pred_valid = np.delete(pred, invalid_idx)
pred_valid = pred
labels_valid = weak_lbls
correct = np.sum(pred_valid == labels_valid)
val_total_correct += correct
val_total_seen += len(labels_valid)
conf_matrix = confusion_matrix(labels_valid, pred_valid, np.arange(0, self.config.num_classes, 1))
gt_classes += | np.sum(conf_matrix, axis=1) | numpy.sum |
import cv2
from ketisdk.utils.proc_utils import ProcUtils
import os
import math
from scipy import optimize
import matplotlib.pyplot as plt
import numpy as np
class ArrayUtils():
def crop_oriented_rect_polar(self, im, center, angle, rx, ry):
xc, yc = center
# top, left, right, bottom = xc-rx, yc-ry, xc+rx, yc+ry
dx,dy = ProcUtils().rotateXY_float(-rx, ry, angle)
pt0 = (int(xc+dx), int(yc+dy))
dx,dy = ProcUtils().rotateXY_float(-rx, -ry, angle)
pt1 = (int(xc+dx), int(yc+dy))
dx,dy = ProcUtils().rotateXY_float(rx, -ry, angle)
pt2 = (int(xc+dx), int(yc+dy))
dx,dy = ProcUtils().rotateXY_float(rx, ry, angle)
pt3 = (int(xc+dx), int(yc+dy))
return self.crop_oriented_rect(im, (pt0, pt1, pt2, pt3))
def crop_oriented_rect(self, im, oriented_box):
# points for test.jpg
pt0, pt1, pt2, pt3 = oriented_box
cnt = np.array([[list(pt0)], [list(pt1)], [list(pt2)], [list(pt3)]])
rect = cv2.minAreaRect(cnt)
# the order of the box points: bottom left, top left, top right,
# bottom right
box = cv2.boxPoints(rect)
box = np.int0(box)
# cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
# get width and height of the detected rectangle
width = int(rect[1][0])
height = int(rect[1][1])
src_pts = box.astype("float32")
# coordinate of the points in box points after the rectangle has been
# straightened
dst_pts = np.array([[0, height - 1],
[0, 0],
[width - 1, 0],
[width - 1, height - 1]], dtype="float32")
# the perspective transformation matrix
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
# directly warp the rotated rectangle to get the straightened rectangle
warped = cv2.warpPerspective(im, M, (width, height))
return warped
def append_array(self, anArray, container=None, axis=0):
""" append `anArray` into `container`
"""
if container is None:
con_array = anArray
else:
con_array = np.concatenate((container, anArray), axis=axis)
return con_array
def concat_fixsize(self, im1, im2, data_type='uint8', axis=0, inter=cv2.INTER_CUBIC):
""" concatenate 2 ndarray, if sizes are different, scale array2 equal to array1
"""
im1, im2 = np.copy(im1), np.copy(im2)
isColor1 = (len(im1.shape) == 3)
isColor2 = (len(im2.shape) == 3)
not_same_color = isColor1 ^ isColor2
dtype1 = im1.dtype.name
dtype2 = im2.dtype.name
if dtype1 != dtype2:
range0 = (0, 255)
if dtype1 != data_type:
range1 = (np.iinfo(dtype1).min, np.iinfo(dtype1).max)
im1 = self.reval(im1, range1 + range0, data_type=data_type)
if dtype2 != data_type:
range2 = (np.iinfo(dtype2).min, np.iinfo(dtype2).max)
im2 = self.reval(im2, range2 + range0, data_type=data_type)
if not_same_color:
if not isColor1: im1 = cv2.cvtColor(im1, cv2.COLOR_GRAY2BGR)
if not isColor2: im2 = cv2.cvtColor(im2, cv2.COLOR_GRAY2BGR)
h1, w1 = im1.shape[:2]
h2, w2 = im2.shape[:2]
if axis == 0 and w1 != w2:
h, w = int(1. * h2 / w2 * w1), w1
im2 = cv2.resize(im2, (w, h), interpolation=inter)
if axis == 1 and h1 != h2:
h, w = h1, int(1. * w2 / h2 * h1)
im2 = cv2.resize(im2, (w, h), interpolation=inter)
return np.concatenate((im1, im2), axis=axis)
def save_array(self, array, folds=None, filename=None, ext='.png'):
if filename is None: filename = ProcUtils().get_current_time_str()
filepath = filename + ext
if folds is not None:
folder = ''
for fold in folds: folder = os.path.join(folder, fold)
if not os.path.exists(folder): os.makedirs(folder)
filepath = os.path.join(folder, filepath)
cv2.imwrite(filepath, array)
return filename
def save_array_v2(self, array, fold=None, filename=None, ext='.png'):
if filename is None: filename = ProcUtils().get_current_time_str()
filepath = filename + ext
if fold is not None:
if not os.path.exists(fold): os.makedirs(fold)
cv2.imwrite(filepath, array)
return filename
def save_array_v3(self, array, filepath=None):
if filepath is None: filepath = ProcUtils().get_time_name() + '.png'
fold, _ = os.path.split(filepath)
if not os.path.exists(fold): os.makedirs(fold)
cv2.imwrite(filepath, array)
def get_mat_normals(self, mat_in, grad_weight=True):
mat = mat_in.astype(np.float)
h, w = mat.shape[:2]
X, Y = np.meshgrid(np.arange(0, w), np.arange(0, h))
M00 = self.matto3Dmat(mat, Y, X, 0, 0)
Mm1m1 = self.matto3Dmat(mat, Y, X, -1, -1)
M0m1 = self.matto3Dmat(mat, Y, X, 0, -1)
M1m1 = self.matto3Dmat(mat, Y, X, 1, -1)
M10 = self.matto3Dmat(mat, Y, X, 1, 0)
M11 = self.matto3Dmat(mat, Y, X, 1, 1)
M01 = self.matto3Dmat(mat, Y, X, 0, 1)
Mm11 = self.matto3Dmat(mat, Y, X, -1, 1)
Mm10 = self.matto3Dmat(mat, Y, X, -1, 0)
v = np.zeros((h - 2, w - 2, 3, 8), np.float)
v[:, :, :, 0] = self.get_3point_normals(M00, Mm1m1, M0m1)
v[:, :, :, 1] = self.get_3point_normals(M00, M0m1, M1m1)
v[:, :, :, 2] = self.get_3point_normals(M00, M1m1, M10)
v[:, :, :, 3] = self.get_3point_normals(M00, M10, M11)
v[:, :, :, 4] = self.get_3point_normals(M00, M11, M01)
v[:, :, :, 5] = self.get_3point_normals(M00, M01, Mm11)
v[:, :, :, 6] = self.get_3point_normals(M00, Mm11, Mm10)
v[:, :, :, 7] = self.get_3point_normals(M00, Mm10, Mm1m1)
v_mean = np.mean(v, axis=3)
v_norm = np.linalg.norm(v_mean, axis=2)
v_norm = self.repmat(v_norm, (1, 1, 3))
v_norm = np.divide(v_mean, v_norm)
v = np.zeros((h, w, 3), np.float)
v[1:-1, 1:-1, :] = v_norm
# weighted mean
if grad_weight:
grad_x = self.get_gradient(mat, axis=1)
grad_y = self.get_gradient(mat, axis=0)
grad = np.abs(grad_x[1:-1, 1:-1]) + np.abs(grad_y[1:-1, 1:-1]) + 0.00001
weight = grad / np.sum(grad)
weight = self.repmat(weight, (1, 1, 3))
v_mean = np.sum(np.multiply(v_norm, weight), axis=(0, 1))
else:
v_mean = np.mean(v_norm, axis=(0, 1))
v_mean /= np.linalg.norm(v_mean)
return v, v_mean
def matto3Dmat(self, mat_in, Y_in, X_in, ry, rx):
mat = np.copy(mat_in)
X = | np.copy(X_in) | numpy.copy |
# -*- coding: utf-8 -*-
"""
This enables to parameterize the contributivity measurements to be performed.
"""
from __future__ import print_function
import bisect
import datetime
from itertools import combinations
from math import factorial
from timeit import default_timer as timer
import numpy as np
from loguru import logger
from scipy.stats import norm
from sklearn.linear_model import LinearRegression
from . import multi_partner_learning, constants
class KrigingModel:
def __init__(self, degre, covariance_func):
self.X = np.array([[]])
self.Y = np.array([[]])
self.cov_f = covariance_func
self.degre = degre
self.beta = np.array([[]])
self.H = np.array([[]])
self.K = np.array([[]])
self.invK = np.array([[]])
def fit(self, X, Y):
self.X = X
self.Y = Y
K = np.zeros((len(X), len(X)))
H = np.zeros((len(X), self.degre + 1))
for i, d in enumerate(X):
for j, b in enumerate(X):
K[i, j] = self.cov_f(d, b)
for j in range(self.degre + 1):
H[i, j] = np.sum(d) ** j
self.H = H
self.K = np.linalg.inv(K)
self.invK = np.linalg.inv(K)
Ht_invK_H = H.transpose().dot(self.invK).dot(H)
self.beta = np.linalg.inv(Ht_invK_H).dot(H.transpose()).dot(self.invK).dot(self.Y)
def predict(self, x):
gx = []
for i in range(self.degre + 1):
gx.append(np.sum(x) ** i)
gx = np.array(gx)
cx = []
for i in range(len(self.X)):
cx.append([self.cov_f(self.X[i], x)])
cx = np.array(cx)
pred = gx.transpose().dot(self.beta) + cx.transpose().dot(self.invK).dot(
self.Y - self.H.dot(self.beta)
)
return pred
class Contributivity:
def __init__(self, scenario, name=""):
self.name = name
self.scenario = scenario
nb_partners = len(self.scenario.partners_list)
self.contributivity_scores = np.zeros(nb_partners)
self.scores_std = np.zeros(nb_partners)
self.normalized_scores = np.zeros(nb_partners)
self.computation_time_sec = 0.0
self.first_charac_fct_calls_count = 0
self.charac_fct_values = {(): 0}
self.increments_values = [{} for _ in self.scenario.partners_list]
def __str__(self):
computation_time_sec = str(datetime.timedelta(seconds=self.computation_time_sec))
output = "\n" + self.name + "\n"
output += "Computation time: " + computation_time_sec + "\n"
output += (
"Number of characteristic function computed: "
+ str(self.first_charac_fct_calls_count)
+ "\n"
)
output += f"Contributivity scores: {np.round(self.contributivity_scores, 3)}\n"
output += f"Std of the contributivity scores: {np.round(self.scores_std, 3)}\n"
output += f"Normalized contributivity scores: {np.round(self.normalized_scores, 3)}\n"
return output
def not_twice_characteristic(self, subset):
if len(subset) > 0:
subset = np.sort(subset)
if tuple(subset) not in self.charac_fct_values:
# Characteristic_func(permut) has not been computed yet...
# ... so we compute, store, and return characteristic_func(permut)
self.first_charac_fct_calls_count += 1
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
if len(small_partners_list) > 1:
mpl = self.scenario._multi_partner_learning_approach(self.scenario,
partners_list=small_partners_list,
is_early_stopping=True,
save_folder=None,
**self.scenario.mpl_kwargs
)
else:
mpl = multi_partner_learning.SinglePartnerLearning(self.scenario,
partner=small_partners_list[0],
is_early_stopping=True,
save_folder=None,
**self.scenario.mpl_kwargs
)
mpl.fit()
self.charac_fct_values[tuple(subset)] = mpl.history.score
# we add the new increments
for i in range(len(self.scenario.partners_list)):
if i in subset:
subset_without_i = np.delete(subset, np.argwhere(subset == i))
if (
tuple(subset_without_i) in self.charac_fct_values
): # we store the new known increments
self.increments_values[i][tuple(subset_without_i)] = (
self.charac_fct_values[tuple(subset)]
- self.charac_fct_values[tuple(subset_without_i)]
)
else:
subset_with_i = np.sort(np.append(subset, i))
if (
tuple(subset_with_i) in self.charac_fct_values
): # we store the new known increments
self.increments_values[i][tuple(subset)] = (
self.charac_fct_values[tuple(subset_with_i)]
- self.charac_fct_values[tuple(subset)]
)
# else we will Return the characteristic_func(permut) that was already computed
return self.charac_fct_values[tuple(subset)]
# %% Generalization of Shapley Value computation
def compute_SV(self):
start = timer()
logger.info("# Launching computation of Shapley Value of all partners")
# Initialize list of all players (partners) indexes
partners_count = len(self.scenario.partners_list)
partners_idx = np.arange(partners_count)
# Define all possible coalitions of players
coalitions = [
list(j) for i in range(len(partners_idx)) for j in combinations(partners_idx, i + 1)
]
# For each coalition, obtain value of characteristic function...
# ... i.e.: train and evaluate model on partners part of the given coalition
characteristic_function = []
for coalition in coalitions:
characteristic_function.append(self.not_twice_characteristic(coalition))
# Compute Shapley Value for each partner
# We are using this python implementation: https://github.com/susobhang70/shapley_value
# It requires coalitions to be ordered - see README of https://github.com/susobhang70/shapley_value
list_shapley_value = shapley_value(partners_count, characteristic_function)
# Return SV of each partner
self.name = "Shapley"
self.contributivity_scores = np.array(list_shapley_value)
self.scores_std = np.zeros(len(list_shapley_value))
self.normalized_scores = list_shapley_value / np.sum(list_shapley_value)
end = timer()
self.computation_time_sec = end - start
# %% compute independent raw scores
def compute_independent_scores(self):
start = timer()
logger.info(
"# Launching computation of perf. scores of models trained independently on each partner"
)
# Initialize a list of performance scores
performance_scores = []
# Train models independently on each partner and append perf. score to list of perf. scores
for i in range(len(self.scenario.partners_list)):
performance_scores.append(self.not_twice_characteristic(np.array([i])))
self.name = "Independent scores raw"
self.contributivity_scores = np.array(performance_scores)
self.scores_std = np.zeros(len(performance_scores))
self.normalized_scores = performance_scores / np.sum(performance_scores)
end = timer()
self.computation_time_sec = end - start
# %% compute Shapley values with the truncated Monte-carlo method
def truncated_MC(self, sv_accuracy=0.01, alpha=0.9, truncation=0.05):
"""Return the vector of approximated Shapley value corresponding to a list of partner and
a characteristic function using the truncated monte-carlo method."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "<NAME>"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
contributions = np.array([[]])
permutation = np.zeros(n) # Store the current permutation
t = 0
q = norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
# Check if the length of the confidence interval
# is below the value of sv_accuracy*characteristic_all_partners
while (
t < 100 or t < q ** 2 * v_max / sv_accuracy ** 2
):
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
permutation = np.random.permutation(n) # Store the current permutation
char_partnerlists = np.zeros(
n + 1
) # Store the characteristic function on each ensemble built with the first elements of the permutation
char_partnerlists[-1] = characteristic_all_partners
for j in range(n):
# here we suppose the characteristic function is 0 for the empty set
if abs(characteristic_all_partners - char_partnerlists[j]) < truncation:
char_partnerlists[j + 1] = char_partnerlists[j]
else:
char_partnerlists[j + 1] = self.not_twice_characteristic(
permutation[: j + 1]
)
contributions[-1][permutation[j]] = (
char_partnerlists[j + 1] - char_partnerlists[j]
)
v_max = np.max(np.var(contributions, axis=0))
sv = np.mean(contributions, axis=0)
self.name = "TMC Shapley"
self.contributivity_scores = sv
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# %% compute Shapley values with the truncated Monte-carlo method with a small bias correction
def interpol_TMC(self, sv_accuracy=0.01, alpha=0.9, truncation=0.05):
"""Return the vector of approximated Shapley value corresponding to a list of partner and a characteristic
function using the interpolated truncated monte-carlo method."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "ITMCS"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
contributions = np.array([[]])
permutation = np.zeros(n) # Store the current permutation
t = 0
q = norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
while (
t < 100 or t < q ** 2 * v_max / (sv_accuracy) ** 2
): # Check if the length of the confidence interval
# is below the value of sv_accuracy*characteristic_all_partners
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
permutation = np.random.permutation(n) # Store the current permutation
char_partnerlists = np.zeros(
n + 1
) # Store the characteristic function on each ensemble built with the first elements of the permutation
char_partnerlists[-1] = characteristic_all_partners
first = True
for j in range(n):
# here we suppose the characteristic function is 0 for the empty set
if abs(characteristic_all_partners - char_partnerlists[j]) < truncation:
if first:
size_of_rest = 0
for i in range(j, n):
size_of_rest += len(self.scenario.partners_list[i].y_train)
a = (characteristic_all_partners - char_partnerlists[j]) / size_of_rest
first = False
size_of_S = len(self.scenario.partners_list[j].y_train)
char_partnerlists[j + 1] = char_partnerlists[j] + a * size_of_S
else:
char_partnerlists[j + 1] = self.not_twice_characteristic(
permutation[: j + 1]
)
contributions[-1][permutation[j]] = (
char_partnerlists[j + 1] - char_partnerlists[j]
)
v_max = np.max(np.var(contributions, axis=0))
sv = np.mean(contributions, axis=0)
self.name = "ITMCS"
self.contributivity_scores = sv
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the importance sampling method
def IS_lin(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley value corresponding to a list of partner and \
a characteristic function using the importance sampling method and a linear interpolation model."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "<NAME>"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
# definition of the original density
def prob(subset):
lS = len(subset)
return factorial(n - 1 - lS) * factorial(lS) / factorial(n)
# definition of the approximation of the increment
# compute the last and the first increments in performance \
# (they are needed to compute the approximated increments)
characteristic_no_partner = 0
last_increments = []
first_increments = []
for k in range(n):
last_increments.append(
characteristic_all_partners
- self.not_twice_characteristic(np.delete(np.arange(n), k))
)
first_increments.append(
self.not_twice_characteristic(np.array([k]))
- characteristic_no_partner
)
# ## definition of the number of data in all datasets
size_of_I = 0
for partner in self.scenario.partners_list:
size_of_I += len(partner.y_train)
def approx_increment(subset, k):
assert k not in subset, "" + str(k) + "is not in " + str(subset) + ""
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
# compute the size of subset : ||subset||
size_of_S = 0
for partner in small_partners_list:
size_of_S += len(partner.y_train)
beta = size_of_S / size_of_I
return (1 - beta) * first_increments[k] + beta * last_increments[k]
# ## compute the renormalization constant of the importance density for all datatsets
renorms = []
for k in range(n):
list_k = np.delete(np.arange(n), k)
renorm = 0
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as
# prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
renorm += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
renorms.append(renorm)
# sampling
t = 0
q = -norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
while (
t < 100 or t < 4 * q ** 2 * v_max / (sv_accuracy) ** 2
): # Check if the length of the confidence interval is below the value of
# sv_accuracy*characteristic_all_partners
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
for k in range(n):
# generate the new subset (for the increment) with the inverse method
u = np.random.uniform(0, 1, 1)[0]
cumSum = 0
list_k = np.delete(np.arange(n), k)
for length_combination in range(len(list_k) + 1):
for subset in combinations(list_k, length_combination):
cumSum += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
if cumSum / renorms[k] > u:
S = np.array(subset)
break
if cumSum / renorms[k] > u:
break
# compute the increment
SUk = np.append(S, k)
increment = self.not_twice_characteristic(
SUk
) - self.not_twice_characteristic(S)
# computed the weight p/g
contributions[t - 1][k] = (
increment * renorms[k] / np.abs(approx_increment(np.array(S), k))
)
v_max = np.max(np.var(contributions, axis=0))
shap = np.mean(contributions, axis=0)
self.name = "<NAME>"
self.contributivity_scores = shap
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the regression importance sampling method
def IS_reg(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley value corresponding
to a list of partner and a characteristic function using the
importance sampling method and a regression model."""
start = timer()
n = len(self.scenario.partners_list)
if n < 4:
self.compute_SV()
self.name = "IS_reg Shapley values"
else:
# definition of the original density
def prob(subset):
lS = len(subset)
return factorial(n - 1 - lS) * factorial(lS) / factorial(n)
# definition of the approximation of the increment
# compute some increments
permutation = np.random.permutation(n)
for j in range(n):
self.not_twice_characteristic(permutation[: j + 1])
permutation = np.flip(permutation)
for j in range(n):
self.not_twice_characteristic(permutation[: j + 1])
for k in range(n):
permutation = np.append(permutation[-1], permutation[:-1])
for j in range(n):
self.not_twice_characteristic(permutation[: j + 1])
# do the regressions
# make the datasets
def makedata(subset):
# compute the size of subset : ||subset||
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
size_of_S = 0
for partner in small_partners_list:
size_of_S += len(partner.y_train)
data = [size_of_S, size_of_S ** 2]
return data
datasets = []
outputs = []
for k in range(n):
x = []
y = []
for subset, incr in self.increments_values[k].items():
x.append(makedata(subset))
y.append(incr)
datasets.append(x)
outputs.append(y)
# fit the regressions
models = []
for k in range(n):
model_k = LinearRegression()
model_k.fit(datasets[k], outputs[k])
models.append(model_k)
# define the approximation
def approx_increment(subset, k):
return models[k].predict([makedata(subset)])[0]
# compute the renormalization constant of the importance density for all datatsets
renorms = []
for k in range(n):
list_k = np.delete(np.arange(n), k)
renorm = 0
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as
# prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
renorm += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
renorms.append(renorm)
# sampling
t = 0
q = -norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
while (
t < 100 or t < 4 * q ** 2 * v_max / (sv_accuracy) ** 2
): # Check if the length of the confidence interval is below the value of
# sv_accuracy*characteristic_all_partners
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
for k in range(n):
u = np.random.uniform(0, 1, 1)[0]
cumSum = 0
list_k = np.delete(np.arange(n), k)
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as
# prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
cumSum += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
if cumSum / renorms[k] > u:
S = np.array(subset)
break
if cumSum / renorms[k] > u:
break
SUk = np.append(S, k)
increment = self.not_twice_characteristic(
SUk
) - self.not_twice_characteristic(S)
contributions[t - 1][k] = (
increment * renorms[k] / np.abs(approx_increment(np.array(S), k))
)
v_max = np.max(np.var(contributions, axis=0))
shap = np.mean(contributions, axis=0)
self.name = "<NAME>"
self.contributivity_scores = shap
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the Kriging adaptive importance sampling method
def AIS_Kriging(self, sv_accuracy=0.01, alpha=0.95, update=50):
"""Return the vector of approximated Shapley value corresponding to a list of partner
and a characteristic function using the importance sampling method and a Kriging model."""
start = timer()
n = len(self.scenario.partners_list)
# definition of the original density
def prob(subset):
lS = len(subset)
return factorial(n - 1 - lS) * factorial(lS) / factorial(n)
# definition of the approximation of the increment
# compute some increments to fuel the Kriging
S = np.arange(n)
self.not_twice_characteristic(S)
for k1 in range(n):
for k2 in range(n):
S = np.array([k1])
self.not_twice_characteristic(S)
S = np.delete(np.arange(n), [k1])
self.not_twice_characteristic(S)
if k1 != k2:
S = np.array([k1, k2])
self.not_twice_characteristic(S)
S = np.delete(np.arange(n), [k1, k2])
self.not_twice_characteristic(S)
# ## do the regressions
def make_coordinate(subset, k):
assert k not in subset
# compute the size of subset : ||subset||
coordinate = np.zeros(n)
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
for partner, i in zip(small_partners_list, subset):
coordinate[i] = len(partner.y_train)
coordinate = np.delete(coordinate, k)
return coordinate
def dist(x1, x2):
return np.sqrt(np.sum((x1 - x2) ** 2))
# make the covariance functions
phi = np.zeros(n)
cov = []
for k in range(n):
phi[k] = np.median(make_coordinate(np.delete(np.arange(n), k), k))
def covk(x1, x2):
return np.exp(-dist(x1, x2) ** 2 / phi[k] ** 2)
cov.append(covk)
def make_models():
# make the datasets
datasets = []
outputs = []
for k in range(n):
x = []
y = []
for subset, incr in self.increments_values[k].items():
x.append(make_coordinate(subset, k))
y.append(incr)
datasets.append(x)
outputs.append(y)
# fit the kriging
models = []
for k in range(n):
model_k = KrigingModel(2, cov[k])
model_k.fit(datasets[k], outputs[k])
models.append(model_k)
all_models.append(models)
# define the approximation
def approx_increment(subset, k, j):
return all_models[j][k].predict(make_coordinate(subset, k))[0]
# sampling
t = 0
q = -norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
all_renorms = []
all_models = []
Subsets = [] # created like this to avoid pointer issue
# Check if the length of the confidence interval is below the value of sv_accuracy*characteristic_all_partners
while (
t < 100 or t < 4 * q ** 2 * v_max / (sv_accuracy) ** 2
):
if t == 0:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
subsets = []
if t % update == 0: # renew the importance density g
j = t // update
make_models()
# ## compute the renormalization constant of the new importance density for all datatsets
renorms = []
for k in range(n):
list_k = np.delete(np.arange(n), k)
renorm = 0
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
renorm += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k, j)
)
renorms.append(renorm)
all_renorms.append(renorms)
# generate the new increments(subset)
for k in range(n):
u = np.random.uniform(0, 1, 1)[0]
cumSum = 0
list_k = np.delete(np.arange(n), k)
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
cumSum += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k, j)
)
if cumSum / all_renorms[j][k] > u:
S = np.array(subset)
subsets.append(S)
break
if cumSum / all_renorms[j][k] > u:
break
SUk = np.append(S, k)
increment = self.not_twice_characteristic(
SUk
) - self.not_twice_characteristic(S)
contributions[t - 1][k] = (
increment * all_renorms[j][k] / np.abs(approx_increment(S, k, j))
)
Subsets.append(subsets)
shap = np.mean(contributions, axis=0)
# calcul des variances
v_max = np.max(np.var(contributions, axis=0))
t += 1
shap = np.mean(contributions, axis=0)
self.name = "<NAME>"
self.contributivity_scores = shap
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the stratified sampling method
def Stratified_MC(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley values using the stratified monte-carlo method."""
start = timer()
N = len(self.scenario.partners_list)
characteristic_all_partners = self.not_twice_characteristic(
np.arange(N)
) # Characteristic function on all partners
if N == 1:
self.name = "<NAME>"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
# initialization
gamma = 0.2
beta = 0.0075
t = 0
sigma2 = np.zeros((N, N))
mu = np.zeros((N, N))
e = 0.0
v_max = 0
continuer = []
contributions = []
for k in range(N):
contributions.append(list())
continuer.append(list())
for k in range(N):
for strata in range(N):
contributions[k].append(list())
continuer[k].append(True)
# sampling
while np.any(continuer) or (1 - alpha) < v_max / (
sv_accuracy ** 2
): # Check if the length of the confidence interval is below the value of sv_accuracy
t += 1
e = (
1
+ 1 / (1 + np.exp(gamma / beta))
- 1 / (1 + np.exp(-(t - gamma * N) / (beta * N)))
) # e is used in the allocation to each strata, here we take the formula adviced in the litterature
for k in range(N):
# select the strata to add an increment
if | np.sum(sigma2[k]) | numpy.sum |
#!/usr/bin/env python
# coding: utf-8
"""
File: scrutiny_plot.py
Author: <NAME> <<EMAIL>>
Description: This script generate a data popularity plot based on dbs and
phedex data on hdfs, Based on https://github.com/dmwm/CMSPopularity
"""
import os
import sys
import argparse
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import (
input_file_name,
regexp_extract,
concat,
col,
when,
lit,
last,
max as _max,
min as _min,
datediff,
countDistinct,
avg,
unix_timestamp,
from_unixtime,
)
from pyspark.sql.types import (
StringType,
StructField,
StructType,
LongType,
IntegerType,
DoubleType,
)
from pyspark.sql import Window
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
import seaborn as sns
class OptionParser:
def __init__(self):
"User based option parser"
desc = """
This app create a data popularity plot (scrutiny plot)
based on phedex and dbs hdfs data.
"""
self.parser = argparse.ArgumentParser("Scrutiny Plot", usage=desc)
self.parser.add_argument(
"end_date",
help="Date in yyyyMMdd format.",
nargs="?",
type=str,
default=datetime.strftime(datetime.now() - relativedelta(days=1), "%Y%m%d"),
)
self.parser.add_argument(
"--outputFolder", help="Output folder path", default="./output"
)
self.parser.add_argument(
"--outputFormat", help="Output format (png or pdf)", default="pdf"
)
self.parser.add_argument(
"--allTiers",
action="store_true",
help="By default the plot only takes into account T1 and T2 sites.",
default="False",
)
def fill_nulls(df):
"""
This script tries to fill the gid column, replacing the -1 values.
1. if gid is -1 is replaced with None.
2. if all columns in the row are null, then drop the row.
3. If the gid is none then replace it with the last gid
for that dataset in the same site.
"""
df_na = df.na.replace(-1, None, ["gid"]).na.drop(how="all")
ff = df_na.withColumn(
"gid",
when(col("gid").isNotNull(), col("gid")).otherwise(
last("gid", True).over(
Window.partitionBy("site", "dataset")
.orderBy("date")
.rowsBetween(-sys.maxsize, 0)
)
),
)
return ff
def merge_phedex(start_date, end_date, spark_session, base="hdfs:///cms/phedex"):
"""
Merge the phedex datasets for the given timerange to generate a dataframe with:
site,dateset,min_date,max_date,min_rdate,max_rdate,min_size,max_size,days
"""
_start = datetime.strptime(start_date, "%Y%m%d")
_end = datetime.strptime(end_date, "%Y%m%d")
_n_days = (_end - _start).days + 1
_dates = [
datetime.strftime(_start + timedelta(days=d), "%Y/%m/%d")
for d in range(0, _n_days)
]
_phedex_paths = ["{}/{}/part*".format(base, d) for d in _dates]
sc = spark_session.sparkContext
FileSystem = sc._gateway.jvm.org.apache.hadoop.fs.FileSystem
URI = sc._gateway.jvm.java.net.URI
Path = sc._gateway.jvm.org.apache.hadoop.fs.Path
fs = FileSystem.get(URI("hdfs:///"), sc._jsc.hadoopConfiguration())
l = [url for url in _phedex_paths if fs.globStatus(Path(url))]
schema = StructType(
[
StructField("date", StringType()),
StructField("site", StringType()),
StructField("dataset", StringType()),
StructField("size", LongType()),
StructField("rdate", StringType()),
StructField("gid", IntegerType()),
]
)
_agg_fields = ["date", "size"]
_agg_func = [_min, _max]
# if some column will be used as date, we can add
# .option('dateFormat','yyyyMMdd')
_df = (
spark_session.read.option("basePath", base)
.option("mode", "FAILFAST")
.option("nullValue", "null")
.option("emptyValue", "null")
.csv(l, schema=schema)
)
_df = fill_nulls(_df)
_grouped = (
_df.groupby("site", "dataset", "rdate", "gid").agg(
avg("size"),
countDistinct("date"),
*[fn(c) for c in _agg_fields for fn in _agg_func]
)
).withColumnRenamed("count(DISTINCT date)", "days")
_grouped = _grouped.selectExpr(
*[
"`{}` as {}".format(c, c.replace("(", "_").replace(")", ""))
if "(" in c
else c
for c in _grouped.columns
]
)
_grouped = _grouped.withColumn("avg_size", col("avg_size").astype(LongType()))
return _grouped
# ## The weight of the dataset in the given period is a weigthed average of the size of the replicas in that period
#
def weigthed_size(min_date, max_date, begin, end):
"""
A vectorized approach to calcule the weigth of a size in a given period.
@param x spark dataframe
@param begin first day of the period (as lit column).
@param end last day of the period (as lit column).
"""
_start = when(min_date >= begin, min_date).otherwise(begin)
_end = when((max_date < end), max_date).otherwise(end)
delta = datediff(
from_unixtime(unix_timestamp(_end, "yyyyMMdd")),
from_unixtime(unix_timestamp(_start, "yyyyMMdd")),
) + lit(1)
delta = when((max_date < begin) | (min_date > end), lit(0)).otherwise(delta)
period = datediff(
from_unixtime(unix_timestamp(end, "yyyyMMdd")),
from_unixtime(unix_timestamp(begin, "yyyyMMdd")),
) + lit(1)
x = delta.cast(DoubleType()) / period.cast(DoubleType())
return x
def generate_scrutiny_plot(
end_date,
output_format="pdf",
output_folder="./output",
eventsInputFolder="hdfs:///cms/dbs_events",
basePath_dbs="hdfs:///cms/dbs_condor/dataset",
onlyT1_T2=True,
):
"""
param onlyT1_T2: Take into account only replicas in T1 and T2 sites.
"""
start_date_d = datetime.strptime(end_date, "%Y%m%d") - relativedelta(
months=12, days=-1
)
start_date = datetime.strftime(start_date_d, "%Y%m%d")
midterm = datetime.strftime(start_date_d + relativedelta(months=6), "%Y%m%d")
lastQuarter = datetime.strftime(start_date_d + relativedelta(months=9), "%Y%m%d")
dbsInput = "{}/{{{},{}}}/*/*/part-*".format(
basePath_dbs, start_date[:4], end_date[:4]
)
sc = SparkContext(appName="scrutinyPlotReplicated")
spark = SparkSession.builder.config(conf=sc._conf).getOrCreate()
phedex_df = merge_phedex(start_date, end_date, spark)
if onlyT1_T2:
phedex_df = phedex_df.filter(
col("site").startswith("T1_") | col("site").startswith("T2_")
)
# ## Calculate the effective average size of each dataset in the given periods
# size of each dataset in each of the time periods
phedex_df = phedex_df.withColumn(
"weight_6Month",
weigthed_size(
phedex_df.min_date, phedex_df.max_date, lit(midterm), lit(end_date)
),
)
phedex_df = phedex_df.withColumn(
"weighted_size_6Month", col("avg_size") * col("weight_6Month")
)
phedex_df = phedex_df.withColumn(
"weight_3Month",
weigthed_size(
phedex_df.min_date, phedex_df.max_date, lit(lastQuarter), lit(end_date)
),
)
phedex_df = phedex_df.withColumn(
"weighted_size_3Month", col("avg_size") * col("weight_3Month")
)
phedex_df = phedex_df.withColumn(
"weight_12Month",
weigthed_size(
phedex_df.min_date, phedex_df.max_date, lit(start_date), lit(end_date)
),
)
phedex_df = phedex_df.withColumn(
"weighted_size_12Month", col("avg_size") * col("weight_3Month")
)
phedex_df = phedex_df.withColumn("min_date", col("rdate"))
_df_dsSzDur = (
phedex_df.groupby("dataset")
.agg(
{
"min_date": "min",
"max_date": "max",
"weighted_size_3Month": "sum",
"weighted_size_6Month": "sum",
"weighted_size_12Month": "sum",
}
)
.toPandas()
)
del phedex_df
# # Read dbs_condor dataset
#
# This dataset, stored in hdfs, will be the base to determine the use of the datasets.
dbs_df = (
spark.read.option("basePath", basePath_dbs)
.csv(dbsInput, header=True)
.select("dataset", "sum_evts")
.withColumn("filename", input_file_name())
)
# ## Filter the dataset
#
# We are only interested on records with datasets. There should be no records with dataset and without events (but currently there are).
# Are there records with dataset but without events (empty sum_evts in the original files)?
# - By default, spark takes empty string as null.
# - In the current version there are rendered as the "null" string instead of null value (this will change on another versions).
dbs_df = dbs_df.filter('dataset != "null" AND sum_evts !="null" AND sum_evts != ""')
zero = dbs_df.filter('sum_evts = "0.0"')
dbs_df = dbs_df.subtract(zero)
dbs_df = dbs_df.withColumn("events", dbs_df.sum_evts.cast("double") * 1000)
dbs_df = dbs_df.withColumn(
"days",
concat(
regexp_extract(dbs_df.filename, ".*/([0-9]{4})/([0-9]{2})/([0-9]{2})", 1),
regexp_extract(dbs_df.filename, ".*/([0-9]{4})/([0-9]{2})/([0-9]{2})", 2),
regexp_extract(dbs_df.filename, ".*/([0-9]{4})/([0-9]{2})/([0-9]{2})", 3),
),
)
dbs_df = dbs_df.filter("days between {} AND {}".format(start_date, end_date))
# # Use of each dataset per day
_df_agg = (
dbs_df.groupBy("dataset", "days").sum("events").alias("sum_events").toPandas()
)
_plain = _df_agg.rename(columns={"days": "day", "sum(events)": "sum_events"})
del dbs_df
del _df_agg
_plain[_plain.sum_events == 0].head()
_events_hadoop = spark.read.option("basePath", eventsInputFolder).csv(
"{}/part*.csv".format(eventsInputFolder), header=True
)
_events = _events_hadoop.select("dataset", "nevents")
df_dsSzDur = pd.merge(_df_dsSzDur, _events.toPandas(), on="dataset")
df_dsSzDur = df_dsSzDur.rename(
columns={
"sum(weighted_size_12Month)": "size12month",
"sum(weighted_size_3Month)": "size3month",
"sum(weighted_size_6Month)": "size6month",
"max(max_date)": "end",
"min(min_date)": "begin",
"nevents": "nEvents",
}
)
# ## Join the datasets
#
# A inner join to keep only the used datasets.
_merged = pd.merge(df_dsSzDur, _plain, on="dataset", sort=True)
# Rate of the events used over the number of events in the file
_merged["rate"] = _merged.sum_events / _merged.nEvents.astype(float)
# ## Create the desired datasets.
#
# The datasets sixMnts, threeMnts and twelveMnts contains only data for datasets that where used at least once in the given period.
_merged.day = _merged.day.astype("str")
full = _merged
sixMnts = full[full.day >= midterm][["dataset", "size6month", "day", "rate"]]
threeMnts = full[(full.day >= lastQuarter)][
["dataset", "size3month", "day", "rate"]
]
twelveMnts = full[["dataset", "size12month", "day", "rate"]][
np.logical_not( | np.isnan(full.rate) | numpy.isnan |
#!/usr/bin/env python
'''
Test the xml input
'''
import os
from siconos.tests_setup import working_dir
try:
import pytest
xfail = pytest.mark.xfail
except:
import py.test
xfail = py.test.mark.xfail
from siconos.fromXml import buildModelXML
import siconos.kernel as SK
import numpy as np
def test_xml1():
''' the BouncingBall '''
bouncingBall = buildModelXML(os.path.join(working_dir, 'data/BBallTS.xml'))
# --- Get the simulation ---
s = bouncingBall.simulation()
dsN = SK.dynamicalSystems(bouncingBall.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
ball = bouncingBall.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
N = 2000 # Number of time steps
# saved in a matrix dataPlot
outputSize = 4
dataPlot = np.zeros((N + 1, outputSize))
q = ball.q()
v = ball.velocity()
p = ball.p(1)
dataPlot[0, 0] = bouncingBall.t0()
dataPlot[0, 1] = q[0]
dataPlot[0, 2] = v[0]
dataPlot[0, 3] = p[0]
print("====> Start computation ...")
# --- Time loop ---
k = 1
while s.hasNextEvent():
s.computeOneStep()
# --- Get values to be plotted ---
dataPlot[k, 0] = s.nextTime()
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = v[0]
dataPlot[k, 3] = p[0]
s.nextStep()
k += 1
print("End of computation - Number of iterations done: {:}".format(k))
print("====> Output file writing ...")
dataPlot.resize(k, outputSize)
np.savetxt("BBallTS.dat", dataPlot)
# Comparison with a reference file
dataPlotRef = SK.getMatrix(SK.SimpleMatrix(os.path.join(working_dir, 'data/BBallTSXML.ref')))
if np.linalg.norm(dataPlot - dataPlotRef, ord=np.inf) > 1e-12:
print(dataPlot - dataPlotRef)
print("ERROR: The result is rather different from the reference file.")
def test_xml2():
''' BallInBowl '''
# --- buildModelXML loading from xml file ---
bouncingBall = buildModelXML(os.path.join(working_dir,
'data/BallInBowl.xml'))
# --- Get the simulation ---
s = bouncingBall.simulation()
k = 0
T = bouncingBall.finalT()
t0 = bouncingBall.t0()
h = s.timeStep()
N = np.ceil((T - t0) / h)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
dataPlot = np.zeros((N + 1, 6))
print("Prepare data for plotting ... ")
# For the initial time step:
# time
dataPlot[k, 0] = bouncingBall.t0()
# state q for the first dynamical system (ball)
dsN = SK.dynamicalSystems(bouncingBall.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
ball = bouncingBall.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
q = ball.q()
v = ball.velocity()
p = ball.p(1)
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = v[0]
dataPlot[k, 3] = q[1]
dataPlot[k, 4] = v[1]
dataPlot[k, 5] = p[0]
# --- Compute elapsed time ---
print("Computation ... ")
# --- Time loop ---
while s.hasNextEvent():
# solve ...
s.computeOneStep()
# --- Get values to be plotted ---
# time
dataPlot[k, 0] = s.nextTime()
# Ball: state q
dataPlot[k, 1] = q[0]
# Ball: velocity
dataPlot[k, 2] = v[0]
# Ground: q
dataPlot[k, 3] = q[1]
# Ground: velocity
dataPlot[k, 4] = v[1]
# Reaction
dataPlot[k, 5] = p[0]
# dataPlot[k, 6] = osi.computeResidu()
# transfer of state i+1 into state i and time incrementation
s.nextStep()
k += 1
# Number of time iterations
print("Number of iterations done: ")
# dataPlot (ascii) output
# ioMatrix::write(dataPlot,"noDim")
np.savetxt("BallInBowl.dat", dataPlot)
def test_xml3():
''' DryFriction '''
# --- buildModelXML loading from xml file ---
oscillator = buildModelXML(os.path.join(working_dir,
'data/DryFriction.xml'))
# --- Get the simulation ---
s = oscillator.simulation()
k = 0
T = oscillator.finalT()
t0 = oscillator.t0()
h = s.timeStep()
N = np.ceil((T - t0) / h)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
dataPlot = np.zeros((N + 1, 5))
print("Prepare data for plotting ... ")
# For the initial time step:
# time
dataPlot[k, 0] = t0
# state q for the first dynamical system (ball)
dsN = SK.dynamicalSystems(oscillator.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
oscillo = oscillator.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
inter = SK.interactions(oscillator.nonSmoothDynamicalSystem().topology().indexSet(0))[0]
dataPlot[k, 1] = oscillo.q()[0]
# velocity for the oscillo
dataPlot[k, 2] = oscillo.velocity()[0]
dataPlot[k, 3] = inter.lambda_(1)[0]
dataPlot[k, 4] = inter.lambda_(1)[1]
# --- Compute elapsed time ---
print("Computation ... ")
# --- Time loop ---
while k < N:
# get current time step
k += 1
# print( " Pas " << k
# solve ...
s.computeOneStep()
# --- Get values to be plotted ---
# time
dataPlot[k, 0] = s.nextTime()
# Oscillo: state q
dataPlot[k, 1] = oscillo.q()[0]
# Oscillo: velocity
dataPlot[k, 2] = oscillo.velocity()[0]
dataPlot[k, 3] = inter.lambda_(1)[0]
dataPlot[k, 4] = inter.lambda_(1)[1]
# transfer of state i+1 into state i and time incrementation
s.nextStep()
# Number of time iterations
print("Number of iterations done: {:}".format(k))
# dataPlot (ascii) output
np.savetxt("DryFriction.dat", dataPlot)
@xfail
def test_xml4():
''' CamFollower '''
# --- buildModelXML loading from xml file ---
CamFollower = buildModelXML(os.path.join(working_dir,
'data/CamFollower_TIDS.xml'))
# --- Get and initialize the simulation ---
S = CamFollower.simulation()
k = 0
T = CamFollower.finalT()
t0 = CamFollower.t0()
h = S.timeStep()
N = np.ceil((T - t0) / h)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
dataPlot = np.zeros((N + 1, 8))
print("Prepare data for plotting ... ")
# For the initial time step:
# time
dataPlot[k, 0] = t0
# state q for the Follower
dsN = CamFollower.nonSmoothDynamicalSystem().topology().dSG(0).dynamicalSystems()[0].number()
Follower = CamFollower.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
inter = CamFollower.nonSmoothDynamicalSystem().topology().dSG(0).interactions()[0]
# Position of the Follower
dataPlot[k, 1] = Follower.q()[0]
# Velocity for the Follower
dataPlot[k, 2] = Follower.velocity()[0]
# Reaction
dataPlot[k, 3] = inter.lambda_(1)[0]
# External Forcing
dataPlot[k, 4] = Follower.fExt()[0]
# State of the Cam
rpm = 358
CamEqForce = CamState(t0, rpm, CamPosition, CamVelocity, CamAcceleration)
# Position of the Cam
dataPlot[k, 5] = CamPosition
# Velocity of the Cam
dataPlot[k, 6] = CamVelocity
# Acceleration of the Cam
dataPlot[k, 7] = CamPosition + Follower.q()[0]
print("Computation ... ")
# --- Time loop ---
while k < N:
# get current time step
k += 1
S.computeOneStep()
# --- Get values to be plotted ---
dataPlot[k, 0] = S.nextTime()
# dataPlot[k, 1] = Follower.q()[0]
# dataPlot[k, 2] = ball.velocity()[0]
dataPlot[k, 1] = Follower.q()[0]
dataPlot[k, 2] = Follower.velocity()[0]
dataPlot[k, 3] = inter.lambda_(1)[0]
dataPlot[k, 4] = Follower.fExt()[0]
CamEqForce = CamState(S.nextTime(), rpm, CamPosition, CamVelocity, CamAcceleration)
dataPlot[k, 5] = CamPosition
dataPlot[k, 6] = CamVelocity
dataPlot[k, 7] = CamPosition + Follower.q()[0]
# transfer of state i+1 into state i and time incrementation
S.nextStep()
# Number of time iterations
print("Number of iterations done: {:}".format(k))
# dataPlot (ascii) output
np.savetxt("CamFollower.dat", dataPlot)
def test_xml5():
''' Bouncing Ball ED '''
# --- buildModelXML loading from xml file ---
bouncingBall = buildModelXML(os.path.join(working_dir, 'data/BBallED.xml'))
# --- Get and initialize the simulation ---
s = bouncingBall.simulation()
dsN = SK.dynamicalSystems(bouncingBall.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
ball = bouncingBall.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
N = 12368 # Number of saved points: depends on the number of events ...
outputSize = 5
dataPlot = | np.zeros((N + 1, outputSize)) | numpy.zeros |
"""Classes for managing a set of potentials. The potentials exert
forces which are a function of position.
"""
from __future__ import absolute_import
import numpy as N
class PotentialManager(list):
"""A container type for a list of potentials."""
def __init__(self, seq=None):
if seq is None:
return list.__init__(self)
else:
seq = self._makeiterablecheck(seq)
return list.__init__(self, seq)
def __repr__(self):
return self.__class__.__name__ + '(' + list.__repr__(self) + ')'
def _check(self, item):
if not isinstance(item, Potential):
raise TypeError('All managed potentials must be an instance of (a subclass of) Potenial.Potential')
def _makeiterablecheck(self, item):
def check(item):
for val in item:
self._check(val)
yield val
return check(item)
def __setitem__(self, key, value):
if hasattr(value, '__iter__'):
value = self._makeiterablecheck(value)
else:
self._checktype(value)
return list.__setitem__(self, key, value)
def __setslice__(self, i,j, seq):
self.__setitem__(slice(i,j), seq)
def append(self, value):
self._check(value)
return list.append(self, value)
def extend(self, iterable):
return list.extend(self, self._makeiterablecheck(iterable))
def insert(self, index, item):
self._check(item)
return list.insert(self, index, item)
def __add__(self, other):
ans = self.__class__(self)
ans.extend(other)
return ans
def __iadd__(self, other):
self.extend(other)
def addForces(self, F, r, **kwargs):
"""Calculate the force due to all managed Potentials, at the
points specified by 'r' and add it to the forces stored in
'F'. The keyword arguments can be used.
"""
for p in self:
p.addForce(F, r, **kwargs)
continue
return
pass
class Potential(object):
"""Base class for a potential."""
def __call__(self, r, **kwargs):
raise NotImplementedError('Potential must have form given')
def force(self, r, **kwargs):
raise NotImplementedError('Force must have form given')
def addForce(self, F, r, **kwargs):
F += self.force(r, **kwargs)
return
pass
class PeriodicBCPotential(Potential):
"""For cases with periodic boundary conditions. Subclasses are
responsible for setting the origin attribute."""
def minim(self, r, lattice):
"""Calculate the minimum image displacement from the origin."""
dr = r - self.origin.reshape((r.ndim-1)*(1,)+(3,))
for dim in range(3):
ind = N.where(dr[:,dim] > +0.5 * lattice.size[dim])
dr[ind, dim] -= lattice.size[dim]
ind = N.where(dr[:,dim] < -0.5 * lattice.size[dim])
dr[ind, dim] += lattice.size[dim]
continue
return dr
pass
class HarmonicPotential(PeriodicBCPotential):
"""Spherical, harmonic potential."""
def __init__(self, k, origin):
"""Potential is located at origin with spring constant k."""
self.k = k
self.origin = origin
return
def __call__(self, r, **kwargs):
# return the potential at the positions r
lattice = kwargs['lattice']
dr = self.minim(r, lattice)
return 0.5* self.k * N.sum(dr**2, axis=-1)
def force(self, r, **kwargs):
lattice = kwargs['lattice']
dr = self.minim(r, lattice)
return -self.k * dr
pass
class HarmonicSlotPotential(PeriodicBCPotential):
def __init__(self, k, origin, normal):
self.k = k
self.origin = origin
self.normal = normal / N.sqrt(N.sum(normal**2))
return
def __call__(self, r, **kwargs):
lattice = kwargs['lattice']
dr = self.minim(r, lattice)
rnotnsq= N.sum(dr * self.normal.reshape((r.ndim-1)*(1,)+(3,)), axis=-1)
return 0.5 * self.k * rdotnsq
def force(self, r, **kwargs):
lattice = kwargs['lattice']
dr = self.minim(r, lattice)
rnotnsq= N.sum(dr * self.normal.reshape((r.ndim-1)*(1,)+(3,)), axis=-1)
return -self.k * self.normal.reshape((r.ndim-1)*(1,)+(3,)) * \
N.sqrt(rdotnsq)
pass
class WallLJPotential(Potential):
"""A Lennard-Jones type potential for keeping swimmers away from walls.
This is rule B from my thesis."""
def __init__(self, epsilon, sigma, walldim=2):
self.epsilon = epsilon
self.sigma = sigma
self.rStar = sigma * 2**(1./6.)
self.walldim = walldim
return
def __call__(self, r, **kwargs):
lattice = kwargs['lattice']
z = r[..., self.dim] - 0.5
U = N.zeros(z.shape)
lowers = N.where(z < self.rStar)
sigma_z_6 = (self.sigma/z[lowers])**6
U[lowers] = (4*self.epsilon)*(sigma_z_6**2 - sigma_z_6) + self.epsilon
del lowers
uppers = N.where(z > lattice.size[self.dim]-self.rStar)
sigma_z_6 = (self.sigma/(lattice.size[self.dim] - z[uppers]))**6
U[uppers] = (4*self.epsilon)*(sigma_z_6**2 - sigma_z_6) + self.epsilon
return U
def force(self, r, **kwargs):
lattice = kwargs['lattice']
z = r[..., self.walldim] - 0.5
F = N.zeros(r.shape)
Fz = F[...,self.walldim]
lowers = N.where(z < self.rStar)
dz = z[lowers]
sigma_z_6 = (self.sigma/dz)**6
Fz[lowers] = (24*self.epsilon)*(2*sigma_z_6**2 - sigma_z_6) / \
dz
del lowers
uppers = | N.where(z > lattice.size[self.walldim]-self.rStar) | numpy.where |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
from IMLearn.metrics import loss_functions
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
pio.renderers.default = "browser"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
zip_dict = {}
def add_to_dict(x):
if x['zipcode'] not in zip_dict:
zip_dict[x['zipcode']] = [x['price']]
else:
zip_dict[x['zipcode']].append(x['price'])
df.apply(add_to_dict, axis=1)
for key in zip_dict:
zip_dict[key] = sum(zip_dict[key]) // len(zip_dict[key])
zips = df['zipcode'].copy()
for i in range(len(zips)):
if zips[i] > 0:
zips[i] = zip_dict[zips[i]]
df.insert(0, 'zipcodes', zips)
df = df.drop(columns=['id', 'date', 'lat', 'long', 'sqft_living15', 'sqft_lot15', 'zipcode'])
df['yr_renovated'] = df.apply(
lambda x: x['yr_built'] if x['yr_renovated'] == 0 else x['yr_renovated'], axis=1)
df = df.dropna()
prices = df['price'].copy()
df = df.drop(columns='price')
return df, prices
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
y_std = y.std()
corr_lst = []
for column in X:
df = X[[column]].copy()
df.insert(1, 'y', y)
c_std = X[column].std()
corr = (df.cov() / (y_std * c_std))[column][1]
corr_lst.append((column, corr))
fig = go.Figure(layout=dict(title=f'The correlation between {column} and prices is {corr}'))
fig.add_trace(go.Scatter(x=X[column], y=y, name=column, mode="markers"))
fig.update_yaxes(title_text='House Prices')
fig.update_xaxes(title_text=column)
fig.write_image(output_path + f'\\{column}.jpg')
# print(corr_lst)
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
X, y = load_data("G:\My Drive\school\year two\semester B\iml\IML.HUJI\datasets\house_prices.csv")
# Question 2 - Feature evaluation with respect to response
feature_evaluation(X, y, 'G:\My Drive\school\year two\semester B\iml\exercises\ex2')
# Question 3 - Split samples into training- and testing sets.
x_train, y_train, x_test, y_test = split_train_test(X, y)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
linreg = LinearRegression()
loss_lst = []
std_lst = []
for p in range(10, 101):
curr_loss = []
for i in range(10):
sample_x = x_train.sample(frac=p/100, random_state=i)
sample_y = y_train.sample(frac=p/100, random_state=i)
linreg.fit(sample_x.to_numpy(), sample_y.to_numpy())
curr_loss.append(linreg.loss(x_test.to_numpy(), y_test.to_numpy()))
mean = np.mean(curr_loss)
std = | np.std(curr_loss) | numpy.std |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.partial_infer.eltwise import eltwise_infer
from mo.middle.passes.fusing.resnet_optimization import stride_optimization
from mo.ops.convolution import Convolution
from mo.ops.pooling import Pooling
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
max_elt_lambda = lambda node: eltwise_infer(node, lambda a, b: np.maximum(a, b))
nodes_attributes = {
# Placeholders
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Concat1 operation
'eltwise_1': {'type': 'Maximum', 'kind': 'op', 'op': 'Maximum', 'infer': max_elt_lambda},
'eltwise_1_data': {'name': 'eltwise_1_data', 'value': None, 'shape': None, 'kind': 'data'},
# Convolutions
'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_1_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_2_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_3': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_3_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_3_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_3_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_4': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_4_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_4_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_4_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_5': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_5_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_5_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_5_data': {'value': None, 'shape': None, 'kind': 'data'},
# ReLU
'relu_1': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'relu_2': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'relu_3': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Pooling
'pool_1': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling',
'spatial_dims': np.array([2, 3]),
'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'infer': Pooling.infer},
'pool_1_data': {'value': None, 'shape': None, 'kind': 'data'},
}
# In description of unit tests below will be used next syntax: Operation(NxM,XxY), where NxM - kernel size, XxY - stride
class ResnetOptimizationTests(unittest.TestCase):
# Pl->Conv(1x1,1x1)->Conv(1x1,2x2) => Pl->Conv(1x1,2x2)->Conv(1x1,1x1)
def test_resnet_optimization_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl->Conv(3x3,2x2)->Conv(1x1,2x2) => Pl->Conv(3x3,4x4)->Conv(1x1,1x1)
def test_resnet_optimization_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 4, 4]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 56, 56])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl->Conv(3x3,2x2)->Conv(3x3,2x2) => Same
def test_resnet_optimization_3(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl--->Conv(3x3,2x2)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(3x3,4x4)->ReLU--->Eltwise-->Conv(1x1,1x1)
# `-->Conv(3x3,2x2)->ReLU---` `-->Conv(3x3,4x4)->ReLU---`
def test_resnet_optimization_4(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'relu_1'),
('relu_1', 'relu_1_data'),
('placeholder_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
('conv_2_data', 'relu_2'),
('relu_2', 'relu_2_data'),
('relu_1_data', 'eltwise_1'),
('relu_2_data', 'eltwise_1'),
('eltwise_1', 'eltwise_1_data'),
('eltwise_1_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'relu_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': | np.array([3]) | numpy.array |
import numpy as np
import unittest
from rbf.pde import geometry as geo
from rbf.pde.domain import Domain, circle, sphere
class Test(unittest.TestCase):
def test_intersection_count_2d(self):
vert, smp = circle()
pnt1 = np.random.normal(0.0, 2.0, (1000, 2))
pnt2 = np.random.normal(0.0, 2.0, (1000, 2))
out1 = geo.intersection_count(pnt1, pnt2, vert, smp)
dom = Domain(vert, smp)
out2 = dom.intersection_count(pnt1, pnt2)
dom.build_rtree()
out3 = dom.intersection_count(pnt1, pnt2)
self.assertTrue(np.all(out1 == out2))
self.assertTrue(np.all(out1 == out3))
def test_intersection_count_3d(self):
vert, smp = sphere()
pnt1 = | np.random.normal(0.0, 2.0, (1000, 3)) | numpy.random.normal |
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from numpy import array, mean, unique, vstack
from os.path import join
mpl.rcParams.update({'font.size': 18})
def my_plot(vector, xlabel_str=None, ylabel_str=None, title_str=None,
output_file=None):
plt.plot(vector)
if xlabel_str is not None:
plt.xlabel(xlabel_str)
if ylabel_str is not None:
plt.ylabel(ylabel_str)
if title_str is not None:
plt.title(title_str)
if output_file is not None:
plt.savefig(output_file)
def imshow_(x, **kwargs):
if x.ndim == 2:
im = plt.imshow(x, interpolation="nearest", **kwargs)
elif x.ndim == 1:
im = plt.imshow(x[:,None].T, interpolation="nearest", **kwargs)
plt.yticks([])
plt.axis("tight")
return im
def viz_sequence_predictions(nb_classes, split, y_pred, y_true, output_file):
# # Output all truth/prediction pairs
plt.figure(split, figsize=(20, 10))
n_test = len(y_true)
P_test_ = array(y_pred) / float(nb_classes - 1)
y_test_ = array(y_true) / float(nb_classes - 1)
values = []
for i in range(len(y_true)):
P_tmp = vstack([y_test_[i][:], P_test_[i][:]])
plt.subplot(n_test, 1, i + 1)
im = imshow_(P_tmp, vmin=0, vmax=1, cmap=plt.cm.jet)
plt.xticks([])
plt.yticks([])
acc = mean(y_true[i] == y_pred[i]) * 100
plt.ylabel("{:.01f}".format(acc))
values.append(unique(P_tmp.ravel()))
print("Visualized predictions")
plt.savefig(output_file)
plt.clf()
def plot_label_seq(label_seq, nb_classes, y_label=None, actions=None,
cmap='rainbow', output_file=None, title=None,
legend=None, figsize=None):
if figsize is None:
figsize = (20, 2)
# Output all truth/prediction pairs
actions_in_seq = | unique(label_seq) | numpy.unique |
import numpy as np
from knn_robustness.utils import kth_max, kth_min
from knn_robustness.utils import top_k_min_indices
from knn_robustness.utils import KnnPredictor
class ErgodicVerifier:
def __init__(self, X_train, y_train, n_neighbors):
self._X_train = X_train
self._y_train = y_train
self._n_neighbors = n_neighbors
self._predictor = KnnPredictor(X_train, y_train, n_neighbors)
def predict_batch(self, X_eval):
return self._predictor.predict_batch(X_eval)
def predict_individual(self, x_eval):
return self._predictor.predict_individual(x_eval)
def __call__(self, x_eval):
X_pos, X_neg = self._partition(x_eval)
bounds = np.empty(X_neg.shape[0])
for j, x_neg in enumerate(X_neg):
bounds[j] = kth_max(
np.maximum(
np.sum(
| np.multiply(2 * x_eval - X_pos - x_neg, X_pos - x_neg) | numpy.multiply |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import f #fisher
from . import dv, zero_finding
import lmfit
LinAlgError = np.linalg.LinAlgError
from .base_functions import (_fold_exp,
_coh_gaussian,
_fold_exp_and_coh)
import scipy.linalg as linalg
posv = linalg.get_lapack_funcs(('posv'))
def direct_solve(a, b):
c, x, info = posv(a, b, lower=False,
overwrite_a=True,
overwrite_b=False)
return x
alpha = 0.001
def solve_mat(A, b_mat, method='ridge'):
"""
Returns the solution for the least squares problem |Ax - b_i|^2.
"""
if method == 'fast':
#return linalg.solve(A.T.dot(A), A.T.dot(b_mat), sym_pos=True)
return direct_solve(A.T.dot(A), A.T.dot(b_mat))
elif method == 'ridge':
X = | np.dot(A.T, A) | numpy.dot |
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerFaucetCloseEnvV2(SawyerXYZEnv):
def __init__(self, view, train, random_init_obj_pos):
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
self.train = train
self.train_positions = dict(
obj_low = (-0.3, 0.80, 0.0),
obj_high = (-0.1, 0.85, 0.0),
)
self.test_positions = dict(
obj_low = (-0.5, 0.70, 0.0),
obj_high = (-0.1, 0.90, 0.0),
)
if self.train:
obj_low = self.train_positions['obj_low']
obj_high = self.train_positions['obj_high']
else:
obj_low = self.test_positions['obj_low']
obj_high = self.test_positions['obj_high']
self._handle_length = 0.175
self._target_radius = 0.07
super().__init__(
self.model_name,
view=view,
hand_low=hand_low,
hand_high=hand_high,
random_init_obj_pos=random_init_obj_pos,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.8, 0.0]),
'hand_init_pos': np.array([0.1, .4, .2])
}
self.hand_init_pos = self.init_config['hand_init_pos']
self.obj_init_pos = self.init_config['obj_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_faucet.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, _, target_to_obj, object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= 0.07),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
@property
def _target_site_config(self):
return [('goal_close', self._target_pos),
('goal_open', np.array([10., 10., 10.]))]
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('faucetBase')
def _get_pos_objects(self):
return self._get_site_pos('handleStartClose') + np.array(
[0., 0., -0.01])
def reset_model(self, seed=None):
self._reset_hand()
if seed is not None:
| np.random.seed(seed=seed) | numpy.random.seed |
'''
A short script to plot the outputs of the SSP Mutual Information sampling
Is currently assuming that the data is stored in the format:
/<path-to>/<test-function-name>/<selection-agent>
where <selection-agent> is one of {gp-mi,ssp-mi}
'''
import numpy as np
import numpy.matlib as matlib
import pandas as pd
import pytry
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('pgf')
mpl.rcParams.update({
'pgf.texsystem': 'pdflatex',
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pdf.fonttype': 42,
'ps.fonttype': 42,
'figure.autolayout': True
})
from argparse import ArgumentParser
import best
def get_data(data_frame):
regret = np.vstack(data_frame['regret'].values)
avg_regret = np.vstack(data_frame['avg_regret'].values)
time = | np.vstack(data_frame['times'].values) | numpy.vstack |
import argparse
import os
import os.path as osp
import numpy as np
import math
import itertools
import copy
import pickle
from sys import exit
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU, Sigmoid, Tanh, Dropout, LeakyReLU
from torch.autograd import Variable
from torch.distributions import normal, kl
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from torch_geometric.data import Data, InMemoryDataset, DataLoader
from torch_geometric.nn import NNConv, BatchNorm, EdgePooling, TopKPooling, global_add_pool
from torch_geometric.utils import get_laplacian, to_dense_adj
import matplotlib.pyplot as plt
from data_utils import MRDataset, create_edge_index_attribute, swap, cross_val_indices, MRDataset2
from model import Generator, Discriminator
from plot import plot, plot_matrix
torch.manual_seed(0) # To get the same results across experiments
if torch.cuda.is_available():
device = torch.device('cuda')
print('running on GPU')
else:
device = torch.device("cpu")
print('running on CPU')
# Parser
parser = argparse.ArgumentParser()
parser.add_argument('--lr_g', type=float, default=0.01, help='Generator learning rate')
parser.add_argument('--lr_d', type=float, default=0.0002, help='Discriminator learning rate')
parser.add_argument('--loss', type=str, default='BCE', help='Which loss to use for training',
choices=['BCE', 'LS'])
parser.add_argument('--batch', type=int, default=1, help='Batch Size')
parser.add_argument('--epoch', type=int, default=500, help='How many epochs to train')
parser.add_argument('--folds', type=int, default=3, help='How many folds for CV')
parser.add_argument('--tr_st', type=str, default='same', help='Training strategy',
choices=['same', 'turns', 'idle'])
parser.add_argument('--id_e', type=int, default=2, help='If training strategy is idle, for how many epochs')
parser.add_argument('--exp', type=int, default=0, help='Which experiment are you running')
parser.add_argument('--tp_c', type=float, default=0.0, help='Coefficient of topology loss')
parser.add_argument('--g_c', type=float, default=2.0, help='Coefficient of adversarial loss')
parser.add_argument('--i_c', type=float, default=2.0, help='Coefficient of identity loss')
parser.add_argument('--kl_c', type=float, default=0.001, help='Coefficient of KL loss')
parser.add_argument('--decay', type=float, default=0.0, help='Weight Decay')
opt = parser.parse_args()
# Datasets
h_data = MRDataset2("../data", "lh", subs=989)
# Parameters
batch_size = opt.batch
lr_G = opt.lr_g
lr_D = opt.lr_d
num_epochs = opt.epoch
folds = opt.folds
connectomes = 1
train_generator = 1
# Coefficients for loss
i_coeff = opt.i_c
g_coeff = opt.g_c
kl_coeff = opt.kl_c
tp_coeff = opt.tp_c
if opt.tr_st != 'idle':
opt.id_e = 0
# Training
loss_dict = {"BCE": torch.nn.BCELoss().to(device),
"LS": torch.nn.MSELoss().to(device)}
adversarial_loss = loss_dict[opt.loss.upper()]
identity_loss = torch.nn.L1Loss().to(device) # Will be used in training
msel = torch.nn.MSELoss().to(device)
mael = torch.nn.L1Loss().to(device) # Not to be used in training (Measure generator success)
counter_g, counter_d = 0, 0
tp = torch.nn.MSELoss().to(device) # Used for node strength
train_ind, val_ind = cross_val_indices(folds, len(h_data))
# Saving the losses for the future
gen_mae_losses_tr = None
disc_real_losses_tr = None
disc_fake_losses_tr = None
gen_mae_losses_val = None
disc_real_losses_val = None
disc_fake_losses_val = None
gen_mae_losses_tr2 = None
disc_real_losses_tr2 = None
disc_fake_losses_tr2 = None
gen_mae_losses_val2 = None
disc_real_losses_val2 = None
disc_fake_losses_val2 = None
k1_train_s = None
k2_train_s = None
k1_val_s = None
k2_val_s = None
tp1_train_s = None
tp2_train_s = None
tp1_val_s = None
tp2_val_s = None
gan1_train_s = None
gan2_train_s = None
gan1_val_s = None
gan2_val_s = None
# Cross Validation
for fold in range(folds):
train_set, val_set = h_data[list(train_ind[fold])], h_data[list(val_ind[fold])]
h_data_train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
h_data_test_loader = DataLoader(val_set, batch_size=batch_size, shuffle=True)
val_step = len(h_data_test_loader)
for data in h_data_train_loader: # Determine the maximum number of samples in a batch
data_size = data.x.size(0)
break
# Create generators and discriminators
generator = Generator().to(device)
generator2 = Generator().to(device)
discriminator = Discriminator().to(device)
discriminator2 = Discriminator().to(device)
optimizer_G = torch.optim.AdamW(generator.parameters(), lr=lr_G, betas=(0.5, 0.999), weight_decay=opt.decay)
optimizer_D = torch.optim.AdamW(discriminator.parameters(), lr=lr_D, betas=(0.5, 0.999), weight_decay=opt.decay)
optimizer_G2 = torch.optim.AdamW(generator2.parameters(), lr=lr_G, betas=(0.5, 0.999), weight_decay=opt.decay)
optimizer_D2 = torch.optim.AdamW(discriminator2.parameters(), lr=lr_D, betas=(0.5, 0.999), weight_decay=opt.decay)
total_step = len(h_data_train_loader)
real_label = torch.ones((data_size, 1)).to(device)
fake_label = torch.zeros((data_size, 1)).to(device)
# Will be used for reporting
real_losses, fake_losses, mse_losses, mae_losses = list(), list(), list(), list()
real_losses_val, fake_losses_val, mse_losses_val, mae_losses_val = list(), list(), list(), list()
real_losses2, fake_losses2, mse_losses2, mae_losses2 = list(), list(), list(), list()
real_losses_val2, fake_losses_val2, mse_losses_val2, mae_losses_val2 = list(), list(), list(), list()
k1_losses, k2_losses, k1_losses_val, k2_losses_val = list(), list(), list(), list()
tp_losses_1_tr, tp_losses_1_val, tp_losses_2_tr, tp_losses_2_val = list(), list(), list(), list()
gan_losses_1_tr, gan_losses_1_val, gan_losses_2_tr, gan_losses_2_val = list(), list(), list(), list()
for epoch in range(num_epochs):
# Reporting
r, f, d, g, mse_l, mae_l = 0, 0, 0, 0, 0, 0
r_val, f_val, d_val, g_val, mse_l_val, mae_l_val = 0, 0, 0, 0, 0, 0
k1_train, k2_train, k1_val, k2_val = 0.0, 0.0, 0.0, 0.0
r2, f2, d2, g2, mse_l2, mae_l2 = 0, 0, 0, 0, 0, 0
r_val2, f_val2, d_val2, g_val2, mse_l_val2, mae_l_val2 = 0, 0, 0, 0, 0, 0
tp1_tr, tp1_val, tp2_tr, tp2_val = 0.0, 0.0, 0.0, 0.0
gan1_tr, gan1_val, gan2_tr, gan2_val = 0.0, 0.0, 0.0, 0.0
# Train
generator.train()
discriminator.train()
generator2.train()
discriminator2.train()
for i, data in enumerate(h_data_train_loader):
data = data.to(device)
optimizer_D.zero_grad()
# Train the discriminator
# Create fake data
fake_y = generator(data).detach()
edge_i, edge_a, _, _ = create_edge_index_attribute(fake_y)
fake_data = Data(x=fake_y, edge_attr=edge_a, edge_index=edge_i).to(device)
swapped_data = Data(x=data.y, edge_attr=data.y_edge_attr, edge_index=data.y_edge_index).to(device)
# data: Real source and target
# fake_data: Real source and generated target
real_loss = adversarial_loss(discriminator(swapped_data, data), real_label[:data.x.size(0), :])
fake_loss = adversarial_loss(discriminator(fake_data, data), fake_label[:data.x.size(0), :])
loss_D = torch.mean(real_loss + fake_loss) / 2
r += real_loss.item()
f += fake_loss.item()
d += loss_D.item()
# Depending on the chosen training method, we might update the parameters of the discriminator
if (epoch % 2 == 1 and opt.tr_st == "turns") or opt.tr_st == "same" or counter_d >= opt.id_e:
loss_D.backward(retain_graph=True)
optimizer_D.step()
# Train the generator
optimizer_G.zero_grad()
# Adversarial Loss
fake_data.x = generator(data)
gan_loss = torch.mean(adversarial_loss(discriminator(fake_data, data), real_label[:data.x.size(0), :]))
gan1_tr += gan_loss.item()
# KL Loss
kl_loss = kl.kl_divergence(normal.Normal(fake_data.x.mean(dim=1), fake_data.x.std(dim=1)),
normal.Normal(data.y.mean(dim=1), data.y.std(dim=1))).sum()
# Topology Loss
tp_loss = tp(fake_data.x.sum(dim=-1), data.y.sum(dim=-1))
tp1_tr += tp_loss.item()
# Identity Loss is included in the end
loss_G = i_coeff * identity_loss(generator(swapped_data), data.y) + g_coeff * gan_loss + kl_coeff * kl_loss + tp_coeff * tp_loss
g += loss_G.item()
if (epoch % 2 == 0 and opt.tr_st == "turns") or opt.tr_st == "same" or counter_g < opt.id_e:
loss_G.backward(retain_graph=True)
optimizer_G.step()
k1_train += kl_loss.item()
mse_l += msel(generator(data), data.y).item()
mae_l += mael(generator(data), data.y).item()
# Training of the second part !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
optimizer_D2.zero_grad()
# Train the discriminator2
# Create fake data for t2 from fake data for t1
fake_data.x = fake_data.x.detach()
fake_y2 = generator2(fake_data).detach()
edge_i, edge_a, _, _ = create_edge_index_attribute(fake_y2)
fake_data2 = Data(x=fake_y2, edge_attr=edge_a, edge_index=edge_i).to(device)
swapped_data2 = Data(x=data.y2, edge_attr=data.y2_edge_attr, edge_index=data.y2_edge_index).to(device)
# fake_data: Data generated for t1
# fake_data2: Data generated for t2 using generated data for t1
# swapped_data2: Real t2 data
real_loss = adversarial_loss(discriminator2(swapped_data2, fake_data), real_label[:data.x.size(0), :])
fake_loss = adversarial_loss(discriminator2(fake_data2, fake_data), fake_label[:data.x.size(0), :])
loss_D = torch.mean(real_loss + fake_loss) / 2
r2 += real_loss.item()
f2 += fake_loss.item()
d2 += loss_D.item()
if (epoch % 2 == 1 and opt.tr_st == "turns") or opt.tr_st == "same" or counter_d >= opt.id_e:
loss_D.backward(retain_graph=True)
optimizer_D2.step()
# Train generator2
optimizer_G2.zero_grad()
# Adversarial Loss
fake_data2.x = generator2(fake_data)
gan_loss = torch.mean(adversarial_loss(discriminator2(fake_data2, fake_data), real_label[:data.x.size(0), :]))
gan2_tr += gan_loss.item()
# Topology Loss
tp_loss = tp(fake_data2.x.sum(dim=-1), data.y2.sum(dim=-1))
tp2_tr += tp_loss.item()
# KL Loss
kl_loss = kl.kl_divergence(normal.Normal(fake_data2.x.mean(dim=1), fake_data2.x.std(dim=1)),
normal.Normal(data.y2.mean(dim=1), data.y2.std(dim=1))).sum()
# Identity Loss
loss_G = i_coeff * identity_loss(generator(swapped_data2), data.y2) + g_coeff * gan_loss + kl_coeff * kl_loss + tp_coeff * tp_loss
g2 += loss_G.item()
if (epoch % 2 == 0 and opt.tr_st == "turns") or opt.tr_st == "same" or counter_g < opt.id_e:
loss_G.backward(retain_graph=True)
optimizer_G2.step()
k2_train += kl_loss.item()
mse_l2 += msel(generator2(fake_data), data.y2).item()
mae_l2 += mael(generator2(fake_data), data.y2).item()
# Validate
generator.eval()
discriminator.eval()
generator2.eval()
discriminator2.eval()
for i, data in enumerate(h_data_test_loader):
data = data.to(device)
# Train the discriminator
# Create fake data
fake_y = generator(data).detach()
edge_i, edge_a, _, _ = create_edge_index_attribute(fake_y)
fake_data = Data(x=fake_y, edge_attr=edge_a, edge_index=edge_i).to(device)
swapped_data = Data(x=data.y, edge_attr=data.y_edge_attr, edge_index=data.y_edge_index).to(device)
# data: Real source and target
# fake_data: Real source and generated target
real_loss = adversarial_loss(discriminator(swapped_data, data), real_label[:data.x.size(0), :])
fake_loss = adversarial_loss(discriminator(fake_data, data), fake_label[:data.x.size(0), :])
loss_D = torch.mean(real_loss + fake_loss) / 2
r_val += real_loss.item()
f_val += fake_loss.item()
d_val += loss_D.item()
# Adversarial Loss
fake_data.x = generator(data)
gan_loss = torch.mean(adversarial_loss(discriminator(fake_data, data), real_label[:data.x.size(0), :]))
gan1_val += gan_loss.item()
# Topology Loss
tp_loss = tp(fake_data.x.sum(dim=-1), data.y.sum(dim=-1))
tp1_val += tp_loss.item()
kl_loss = kl.kl_divergence(normal.Normal(fake_data.x.mean(dim=1), fake_data.x.std(dim=1)),
normal.Normal(data.y.mean(dim=1), data.y.std(dim=1))).sum()
# Identity Loss
loss_G = i_coeff * identity_loss(generator(swapped_data), data.y) + g_coeff * gan_loss * kl_coeff * kl_loss
g_val += loss_G.item()
mse_l_val += msel(generator(data), data.y).item()
mae_l_val += mael(generator(data), data.y).item()
k1_val += kl_loss.item()
# Second GAN
# Create fake data for t2 from fake data for t1
fake_data.x = fake_data.x.detach()
fake_y2 = generator2(fake_data)
edge_i, edge_a, _, _ = create_edge_index_attribute(fake_y2)
fake_data2 = Data(x=fake_y2, edge_attr=edge_a, edge_index=edge_i).to(device)
swapped_data2 = Data(x=data.y2, edge_attr=data.y2_edge_attr, edge_index=data.y2_edge_index).to(device)
# fake_data: Data generated for t1
# fake_data2: Data generated for t2 using generated data for t1
# swapped_data2: Real t2 data
real_loss = adversarial_loss(discriminator2(swapped_data2, fake_data), real_label[:data.x.size(0), :])
fake_loss = adversarial_loss(discriminator2(fake_data2, fake_data), fake_label[:data.x.size(0), :])
loss_D = torch.mean(real_loss + fake_loss) / 2
r_val2 += real_loss.item()
f_val2 += fake_loss.item()
d_val2 += loss_D.item()
# Adversarial Loss
fake_data2.x = generator2(fake_data)
gan_loss = torch.mean(adversarial_loss(discriminator2(fake_data2, fake_data), real_label[:data.x.size(0), :]))
gan2_val += gan_loss.item()
# Topology Loss
tp_loss = tp(fake_data2.x.sum(dim=-1), data.y2.sum(dim=-1))
tp2_val += tp_loss.item()
# KL Loss
kl_loss = kl.kl_divergence(normal.Normal(fake_data2.x.mean(dim=1), fake_data2.x.std(dim=1)),
normal.Normal(data.y2.mean(dim=1), data.y2.std(dim=1))).sum()
k2_val += kl_loss.item()
# Identity Loss
loss_G = i_coeff * identity_loss(generator(swapped_data2), data.y2) + g_coeff * gan_loss + kl_coeff * kl_loss
g_val2 += loss_G.item()
mse_l_val2 += msel(generator2(fake_data), data.y2).item()
mae_l_val2 += mael(generator2(fake_data), data.y2).item()
if opt.tr_st == 'idle':
counter_g += 1
counter_d += 1
if counter_g == 2 * opt.id_e:
counter_g = 0
counter_d = 0
print(f'Epoch [{epoch + 1}/{num_epochs}]')
print(f'[Train]: D Loss: {d / total_step:.5f}, G Loss: {g / total_step:.5f} R Loss: {r / total_step:.5f}, F Loss: {f / total_step:.5f}, MSE: {mse_l / total_step:.5f}, MAE: {mae_l / total_step:.5f}')
print(f'[Val]: D Loss: {d_val / val_step:.5f}, G Loss: {g_val / val_step:.5f} R Loss: {r_val / val_step:.5f}, F Loss: {f_val / val_step:.5f}, MSE: {mse_l_val / val_step:.5f}, MAE: {mae_l_val / val_step:.5f}')
print(f'[Train]: D2 Loss: {d2 / total_step:.5f}, G2 Loss: {g2 / total_step:.5f} R2 Loss: {r2 / total_step:.5f}, F2 Loss: {f2 / total_step:.5f}, MSE: {mse_l2 / total_step:.5f}, MAE: {mae_l2 / total_step:.5f}')
print(f'[Val]: D2 Loss: {d_val2 / val_step:.5f}, G2 Loss: {g_val2 / val_step:.5f} R2 Loss: {r_val2 / val_step:.5f}, F2 Loss: {f_val2 / val_step:.5f}, MSE: {mse_l_val2 / val_step:.5f}, MAE: {mae_l_val2 / val_step:.5f}')
real_losses.append(r / total_step)
fake_losses.append(f / total_step)
mse_losses.append(mse_l / total_step)
mae_losses.append(mae_l / total_step)
real_losses_val.append(r_val / val_step)
fake_losses_val.append(f_val / val_step)
mse_losses_val.append(mse_l_val / val_step)
mae_losses_val.append(mae_l_val / val_step)
real_losses2.append(r2 / total_step)
fake_losses2.append(f2 / total_step)
mse_losses2.append(mse_l2 / total_step)
mae_losses2.append(mae_l2 / total_step)
real_losses_val2.append(r_val2 / val_step)
fake_losses_val2.append(f_val2 / val_step)
mse_losses_val2.append(mse_l_val2 / val_step)
mae_losses_val2.append(mae_l_val2 / val_step)
k1_losses.append(k1_train / total_step)
k2_losses.append(k2_train / total_step)
k1_losses_val.append(k1_val / val_step)
k2_losses_val.append(k2_val / val_step)
tp_losses_1_tr.append(tp1_tr / total_step)
tp_losses_1_val.append(tp1_val / val_step)
tp_losses_2_tr.append(tp2_tr / total_step)
tp_losses_2_val.append(tp2_val / val_step)
gan_losses_1_tr.append(gan1_tr / total_step)
gan_losses_1_val.append(gan1_val / val_step)
gan_losses_2_tr.append(gan2_tr / total_step)
gan_losses_2_val.append(gan2_val / val_step)
# Plot losses
plot("BCE", "DiscriminatorRealLossTrainSet" + str(fold) + "_exp" + str(opt.exp), real_losses)
plot("BCE", "DiscriminatorRealLossValSet" + str(fold) + "_exp" + str(opt.exp), real_losses_val)
plot("BCE", "DiscriminatorFakeLossTrainSet" + str(fold) + "_exp" + str(opt.exp), fake_losses)
plot("BCE", "DiscriminatorFakeLossValSet" + str(fold) + "_exp" + str(opt.exp), fake_losses_val)
plot("MSE", "GeneratorMSELossTrainSet" + str(fold) + "_exp" + str(opt.exp), mse_losses)
plot("MSE", "GeneratorMSELossValSet" + str(fold) + "_exp" + str(opt.exp), mse_losses_val)
plot("MAE", "GeneratorMAELossTrainSet" + str(fold) + "_exp" + str(opt.exp), mae_losses)
plot("MAE", "GeneratorMAELossValSet" + str(fold) + "_exp" + str(opt.exp), mae_losses_val)
plot("BCE", "Discriminator2RealLossTrainSet" + str(fold) + "_exp" + str(opt.exp), real_losses2)
plot("BCE", "Discriminator2RealLossValSet" + str(fold) + "_exp" + str(opt.exp), real_losses_val2)
plot("BCE", "Discriminator2FakeLossTrainSet" + str(fold) + "_exp" + str(opt.exp), fake_losses2)
plot("BCE", "Discriminator2FakeLossValSet" + str(fold) + "_exp" + str(opt.exp), fake_losses_val2)
plot("MSE", "Generator2MSELossTrainSet" + str(fold) + "_exp" + str(opt.exp), mse_losses2)
plot("MSE", "Generator2MSELossValSet" + str(fold) + "_exp" + str(opt.exp), mse_losses_val2)
plot("MAE", "Generator2MAELossTrainSet" + str(fold) + "_exp" + str(opt.exp), mae_losses2)
plot("MAE", "Generator2MAELossValSet" + str(fold) + "_exp" + str(opt.exp), mae_losses_val2)
plot("KL Loss", "KL_Loss_1_TrainSet" + str(fold) + "_exp" + str(opt.exp), k1_losses)
plot("KL Loss", "KL_Loss_1_ValSet" + str(fold) + "_exp" + str(opt.exp), k1_losses_val)
plot("KL Loss", "KL_Loss_2_TrainSet" + str(fold) + "_exp" + str(opt.exp), k2_losses)
plot("KL Loss", "KL_Loss_2_ValSet" + str(fold) + "_exp" + str(opt.exp), k2_losses_val)
plot("TP Loss", "TP_Loss_1_TrainSet" + str(fold) + "_exp" + str(opt.exp), tp_losses_1_tr)
plot("TP Loss", "TP_Loss_1_ValSet" + str(fold) + "_exp" + str(opt.exp), tp_losses_1_val)
plot("TP Loss", "TP_Loss_2_TrainSet" + str(fold) + "_exp" + str(opt.exp), tp_losses_2_tr)
plot("TP Loss", "TP_Loss_2_ValSet" + str(fold) + "_exp" + str(opt.exp), tp_losses_2_val)
plot("BCE", "GAN_Loss_1_TrainSet" + str(fold) + "_exp" + str(opt.exp), gan_losses_1_tr)
plot("BCE", "GAN_Loss_1_ValSet" + str(fold) + "_exp" + str(opt.exp), gan_losses_1_val)
plot("BCE", "GAN_Loss_2_TrainSet" + str(fold) + "_exp" + str(opt.exp), gan_losses_2_tr)
plot("BCE", "GAN_Loss_2_ValSet" + str(fold) + "_exp" + str(opt.exp), gan_losses_2_val)
# Save the losses
if gen_mae_losses_tr is None:
gen_mae_losses_tr = mae_losses
disc_real_losses_tr = real_losses
disc_fake_losses_tr = fake_losses
gen_mae_losses_val = mae_losses_val
disc_real_losses_val = real_losses_val
disc_fake_losses_val = fake_losses_val
gen_mae_losses_tr2 = mae_losses2
disc_real_losses_tr2 = real_losses2
disc_fake_losses_tr2 = fake_losses2
gen_mae_losses_val2 = mae_losses_val2
disc_real_losses_val2 = real_losses_val2
disc_fake_losses_val2 = fake_losses_val2
k1_train_s = k1_losses
k2_train_s = k2_losses
k1_val_s = k1_losses_val
k2_val_s = k2_losses_val
tp1_train_s = tp_losses_1_tr
tp2_train_s = tp_losses_2_tr
tp1_val_s = tp_losses_1_val
tp2_val_s = tp_losses_2_val
gan1_train_s = gan_losses_1_tr
gan2_train_s = gan_losses_2_tr
gan1_val_s = gan_losses_1_val
gan2_val_s = gan_losses_2_val
else:
gen_mae_losses_tr = np.vstack([gen_mae_losses_tr, mae_losses])
disc_real_losses_tr = np.vstack([disc_real_losses_tr, real_losses])
disc_fake_losses_tr = np.vstack([disc_fake_losses_tr, fake_losses])
gen_mae_losses_val = np.vstack([gen_mae_losses_val, mae_losses_val])
disc_real_losses_val = np.vstack([disc_real_losses_val, real_losses_val])
disc_fake_losses_val = np.vstack([disc_fake_losses_val, fake_losses_val])
gen_mae_losses_tr2 = np.vstack([gen_mae_losses_tr2, mae_losses2])
disc_real_losses_tr2 = np.vstack([disc_real_losses_tr2, real_losses2])
disc_fake_losses_tr2 = np.vstack([disc_fake_losses_tr2, fake_losses2])
gen_mae_losses_val2 = | np.vstack([gen_mae_losses_val2, mae_losses_val2]) | numpy.vstack |
# -*- coding: utf-8 -*-
"""
Aquí vamos a meter todo lo relativo a la representación de las variables
de decisión así como la población (Punto, Poblacion, funciones de mutación,
funciones de cruce, funciones de selección)
"""
import numpy as np
from Estadisticas import Estadisticas
class Punto(np.ndarray):
'''Hereda de np.ndarray, representa una solución
En los puntos tenemos la mutación
Siempre vamos a considerar el propio punto como el genotipo
'''
def __new__(cls, dimensiones, initValue = None, rango = None, \
operadores = None, crowded_distance = None, generacion = 1, dist_fenotipo = None, **kwargs):
'''Para heredar de np.ndarray es necesario usar __new__ en lugar de __init__'''
obj = np.ndarray.__new__(cls, dimensiones, **kwargs)
obj.gen = generacion
obj.vals = None
obj.rest = None
obj.rgo = rango
obj.crwd = crowded_distance
obj.np = 0
obj.Sp = []
'''Operadores es un diccionario de operadores evolutivos'''
if not operadores is None:
Punto._mutar = operadores['mutador']
Punto._fenotipo = operadores['fenotipo']
if not dist_fenotipo is None:
Punto.dist_fenotipo = dist_fenotipo
obj.setPunto(vector = initValue)
return obj
def setPunto(self, vector = None):
if vector is None:
self[:] = 0
else:
for i in range(len(self)):
self[i] = vector[i]
def copy(self, **kwargs):
'''Devolvemos otro punto copia del actual'''
p = Punto(dimensiones = len(self), **kwargs)
p.gen = self.gen
p.vals = self.vals
p.rest = self.rest
p.rgo = self.rgo
p.crwd = self.crwd
p.np = self.np
p.Sp = self.Sp[:]
p.setPunto(vector = self)
return p
def fenotipo(self):
'''De momento trabajamos con representación real: fenotipo = genotipo'''
return self.__class__._fenotipo(self)
def rand(self, problema):
if problema.parametros.get('tipo_var', 'real') == 'real':
self[:] = (problema.lims[:, 1] - problema.lims[:, 0]) * np.random.rand(problema.dims) + problema.lims[:, 0]
else:
for i in range(problema.dims):
self[i] = np.random.choice(problema.lims[i])
def evaluado_en(self, problema):
'''Evaluamos el punto con las funciones que nos da el problema'''
if self.vals is None:
self.vals = problema.evaluador(self)
return self.vals
def violacion_restricciones(self, problema):
'''Calculamos el nivel de violación de las restricciones'''
if self.rest is None:
self.rest = problema.violacion_restricciones(self)
return self.rest
def mutar(self, problema):
'''Con esta orden le pedimos al punto que se mute'''
self.__class__._mutar(self, problema)
class Poblacion(list):
'''La población será una lista de Puntos que representará a las soluciones
En las poblaciones definimos el cruce y la selección'''
def __init__(self, size, operadores, generacion = 0, stats = None):
self.size = size
self.gen = generacion
if stats is None:
self.stats = Estadisticas('Estadisticas')
else:
self.stats = stats
self.stats.nuevo_Contador('gens') # Generación actual
if not operadores is None:
self.__class__._selector = operadores['selector']
self.__class__._cruzador = operadores['cruzador']
self.__class__._seleccionador = operadores['seleccionador']
def select_with(self, nomCaracteristica, valor):
'''Seleccionamos los puntos con cierta caracteristica'''
resultado = []
for p in self:
if p.__getattribute__(nomCaracteristica) == valor:
resultado.append(p)
return resultado
def selector(self, problema):
'''Seleccionamos para cruce'''
return self.__class__._selector(self, problema)
def cruzador(self, padre, madre, problema):
'''Cruzamos dos puntos'''
return self.__class__._cruzador(padre, madre, problema)
def seleccionador(self, subpoblacion, problema):
'''Seleccionamos de la población y quitamos los que no sirvan'''
return self.__class__._seleccionador(self, subpoblacion, problema)
def union(self, pop):
for p in pop:
self.append(p)
def borrar(self, conjunto):
for p in conjunto:
if p in self:
self.remove(p)
def fast_non_dominated_sort(self, problema):
'''Seguimos el algoritmo descrito en "A fast and elitist multiobjective GA: NSGA-II"'''
#TODO: Este procedimiento se puede mejorar no teniendo que calcular el rango de toda la población
frentes = [[]]
for p in self:
p.Sp, p.np = [], 0
for q in self:
dominio = problema.dominadoC(p, q)
if dominio == 1: # p domina a q
p.Sp.append(q)
elif dominio == -1: # q domina a p
p.np += 1
if p.np == 0:
p.rgo = 1
frentes[0].append(p)
i = 0
while True:
siguienteFrente = []
for p in frentes[i]:
for q in p.Sp:
q.np -= 1
if q.np == 0:
q.rgo = i + 2
siguienteFrente.append(q)
if siguienteFrente == []:
break
frentes.append(siguienteFrente[:])
i += 1
def __contains__(self, item):
for p in self:
if p is item:
return True
return False
def crowding_distance_assignment(I, problema):
'''Seguimos el algoritmo descrito en "A fast and elitist multiobjective GA: NSGA-II"'''
I.sort(reverse = True, key = lambda x: x[0])
extremos = [I[0], I[-1]]
for p in I:
p.crwd = 0
for p in extremos:
p.crwd = float('inf')
#TODO No encuentro la manera de hacer esto con numpy
objetivos = []
for p in I:
parcial = [p]
parcial.extend(p.evaluado_en(problema))
objetivos.append(parcial[:])
# objetivos[i] = [p_i, f1(p_i), f2(p_i), ..., fn(p_i)]
for i in range(1, len(problema.objetivos) + 1):
objetivos.sort(key=lambda x: x[i])
fmax = max(objetivos, key=lambda x: x[i])[i]
fmin = min(objetivos, key=lambda x: x[i])[i]
for j in range(1, len(objetivos) - 1):
objetivos[j][0].crwd += (objetivos[j+1][i] - objetivos[j-1][i]) / (fmax - fmin)
############################################
# FENOTIPOS
# Siempre tienen la misma firma:
# def nombre(punto)
# Devuelven
# el fenotipo que le corresponde al punto
############################################
def real(punto):
'''Representación real, fenotipo = genotipo'''
return punto
def binario(punto):
'''Representación binaria'''
fenotipo = []
for i in range(len(punto.dist_fenotipo)):
li = np.sum(punto.dist_fenotipo[:i])
ui = np.sum(punto.dist_fenotipo[:i + 1])
fenotipo.append(punto[li:ui])
return fenotipo
############################################
# OPERADORES DE MUTACIÓN
# Siempre tienen la misma firma:
# def nombre(punto, problema)
############################################
def mutador1(punto, problema):
'''Mutamos cualquier componente con probabilidad proporcional
a la dimensión del espacio y esa componente puede tomar cualquier punto'''
p = problema.parametros.get('pm', 1 / problema.dims)
mascara = np.random.rand(problema.dims) < p
punto[mascara] = (problema.lims[mascara, 1] - problema.lims[mascara, 0]) \
* np.random.rand(mascara.sum()) + problema.lims[mascara, 0]
def mutadorb(punto, problema):
'''Mutador de estados para variables discretas, se elige una componente
y se fuerza a que cambie a alguno de los otros estados'''
p = problema.parametros.get('pm', 1 / problema.dims)
mascara = np.random.rand(problema.dims) < p
for i in range(len(problema.lims)):
if not mascara[i]:
continue
nvalor = np.random.choice(problema.lims[i])
while nvalor == punto[i]:
nvalor = np.random.choice(problema.lims[i])
punto[i] = nvalor
def mutador_init(punto, problema):
'''Escogemos un punto cualquiera del espacio de decisión'''
punto.rand(problema)
def mutacion_aleatoria(punto, problema):
'''Cada componente es variada uniformemente con el rango máximo que se le permita'''
copy_lims = problema.lims.copy()
copy_lims[:,0] = np.abs(copy_lims[:,0] - punto)
copy_lims[:,1] = np.abs(copy_lims[:,1] - punto)
deltas = np.min(copy_lims, axis = 1) #máxima variabilidad permitida en cada componente
u = np.random.rand(problema.dims) * 2 - 1 # la variación que vamos a hacer en cada componente
punto.setPunto(vector = punto + u * deltas)
def mutacion_polinomial(punto, problema):
'''mutación polinomial'''
p = problema.parametros.get('pm', 1 / problema.dims)
eta = problema.parametros.get('mp', 2)
for i in range(problema.dims):
if np.random.rand() >= p:
#No mutamos este gen
continue
u = np.random.rand()
if u <= .5:
delta = np.power(2 * u, 1 / (eta + 1)) - 1
punto[i] += delta * (punto[i] - problema.lims[i,0])
else:
delta = 1 - np.power(2 * (1 - u), 1 / (eta + 1))
punto[i] += delta * (problema.lims[i,1] - punto[i])
############################################
# GENERADORES DE CRUCES
# Siempre tienen la misma firma:
# def nombre(seleccionParaCruce, cruzadorBasico)
# Devuelve una función con la siguiente firma
# def $(poblacion, problema)
# que a su vez devolverá:
# 2 hijos
############################################
def generar_cruzador(selector, cruzador):
def funcion(poblacion, problema):
p1 = selector(poblacion, problema)
p2 = selector(poblacion, problema)
return cruzador(p1, p2, problema)
return funcion
############################################
# CRUZADORES BÁSICOS
# Siempre tienen la misma firma:
# def nombre(padre, madre, problema)
# Devuelven:
# dos puntos soluciones
############################################
'''Cruces básicos'''
def line_recombination(padre, madre, problema):
pass
def intermediate_recombination(padre, madre, problema):
pass
def trivial(padre, madre, problema):
'''Recombinador trivial: devuelve al padre y la madre'''
return padre, madre
def blended_crossover(padre, madre, problema):
'''blended crossover BLX-alpha'''
'''uniform compound recombination'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
alpha = problema.parametros.get('blx', .5)
for i in range(problema.dims):
alpha = problema.parametros.get('blx', .5)
entrar = True
while entrar:
factor = | np.random.rand() | numpy.random.rand |
import logging
import numpy as np
from yass.geometry import order_channels_by_distance
from yass.templates.util import main_channels, amplitudes
# FIXME: keeping this just because make_training data is still using it
# use template processor instead, remove as soon as make_training_data
# is refactored
def crop_and_align_templates(big_templates, R, neighbors, geom,
crop_spatially=True):
"""Crop (spatially) and align (temporally) templates
Parameters
----------
Returns
-------
"""
logger = logging.getLogger(__name__)
logger.debug('crop and align input shape %s', big_templates.shape)
# copy templates to avoid modifying the original ones
big_templates = np.copy(big_templates)
n_templates, _, _ = big_templates.shape
# main channels ad amplitudes for each template
main_ch = main_channels(big_templates)
amps = amplitudes(big_templates)
# get a template on a main channel and align them
K_big = np.argmax(amps)
templates_mainc = np.zeros((n_templates, big_templates.shape[1]))
t_rec = big_templates[K_big, :, main_ch[K_big]]
t_rec = t_rec/np.sqrt(np.sum(np.square(t_rec)))
for k in range(n_templates):
t1 = big_templates[k, :, main_ch[k]]
t1 = t1/np.sqrt(np.sum(np.square(t1)))
shift = align_templates(t1, t_rec)
logger.debug('Template %i will be shifted by %i', k, shift)
if shift > 0:
templates_mainc[k, :(big_templates.shape[1]-shift)] = t1[shift:]
big_templates[k, :(big_templates.shape[1]-shift)
] = big_templates[k, shift:]
elif shift < 0:
templates_mainc[k, (-shift):] = t1[:(big_templates.shape[1]+shift)]
big_templates[k,
(-shift):] = big_templates[k,
:(big_templates.shape[1]
+ shift)]
else:
templates_mainc[k] = t1
# determin temporal center of templates and crop around it
R2 = int(R/2)
center = np.argmax(np.convolve(
np.sum(np.square(templates_mainc), 0), | np.ones(2*R2+1) | numpy.ones |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the wind_components.ResolveWindComponents plugin."""
import unittest
import iris
import numpy as np
from iris.coord_systems import OSGB
from iris.coords import DimCoord
from iris.tests import IrisTest
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.wind_calculations.wind_components import ResolveWindComponents
RAD_TO_DEG = 180.0 / np.pi
def set_up_cube(data_2d, name, unit):
"""Set up a 2D test cube of wind direction or speed"""
cube = set_up_variable_cube(
data_2d.astype(np.float32), name=name, units=unit, spatial_grid="equalarea"
)
cube.coord("projection_x_coordinate").points = np.linspace(
150000, 250000, data_2d.shape[1]
)
cube.coord("projection_y_coordinate").points = np.linspace(
0, 600000, data_2d.shape[0]
)
for axis in ["x", "y"]:
cube.coord(axis=axis).units = "metres"
cube.coord(axis=axis).coord_system = OSGB()
cube.coord(axis=axis).bounds = None
return cube
def add_new_dimension(cube, npoints, name, unit):
"""Add a new dimension with npoints by copying cube data"""
cubelist = iris.cube.CubeList([])
for i in range(npoints):
newcube = cube.copy(cube.data)
newcube.add_aux_coord(DimCoord(i, name, unit))
cubelist.append(newcube)
merged_cube = cubelist.merge_cube()
return merged_cube
class Test__repr__(IrisTest):
"""Tests the __repr__ method"""
def test_basic(self):
"""Tests the output string is as expected"""
result = str(ResolveWindComponents())
self.assertEqual(result, "<ResolveWindComponents>")
class Test_calc_true_north_offset(IrisTest):
"""Tests the calc_true_north_offset function"""
def setUp(self):
"""Set up a target cube with OSGB projection"""
wind_angle = np.zeros((3, 5), dtype=np.float32)
self.directions = set_up_cube(wind_angle, "wind_to_direction", "degrees")
self.plugin = ResolveWindComponents()
def test_basic(self):
"""Test function returns correct type"""
result = self.plugin.calc_true_north_offset(self.directions)
self.assertIsInstance(result, np.ndarray)
def test_values(self):
"""Test that for UK National Grid coordinates the angle adjustments
are sensible"""
expected_result = np.array(
[
[2.651483, 2.386892, 2.122119, 1.857182, 1.592121],
[2.921058, 2.629620, 2.337963, 2.046132, 1.754138],
[3.223816, 2.902300, 2.580523, 2.258494, 1.936247],
],
dtype=np.float32,
)
result = self.plugin.calc_true_north_offset(self.directions)
self.assertArrayAlmostEqual(RAD_TO_DEG * result, expected_result)
class Test_resolve_wind_components(IrisTest):
"""Tests the resolve_wind_components method"""
def setUp(self):
"""Set up some arrays to convert"""
self.plugin = ResolveWindComponents()
wind_speed = 10.0 * np.ones((4, 4), dtype=np.float32)
wind_angle = np.array(
[
[0.0, 30.0, 45.0, 60.0],
[90.0, 120.0, 135.0, 150.0],
[180.0, 210.0, 225.0, 240.0],
[270.0, 300.0, 315.0, 330.0],
],
dtype=np.float32,
)
self.wind_cube = set_up_cube(wind_speed, "wind_speed", "knots")
self.directions = set_up_cube(wind_angle, "wind_to_direction", "degrees")
self.adjustments = np.zeros((4, 4), dtype=np.float32)
def test_basic(self):
"""Test function returns correct type"""
uspeed, vspeed = self.plugin.resolve_wind_components(
self.wind_cube, self.directions, self.adjustments
)
self.assertIsInstance(uspeed, iris.cube.Cube)
self.assertIsInstance(vspeed, iris.cube.Cube)
def test_values(self):
"""Test correct values are returned for well-behaved angles"""
expected_uspeed = 5.0 * np.array(
[
[0.0, 1.0, np.sqrt(2.0), np.sqrt(3.0)],
[2.0, np.sqrt(3.0), np.sqrt(2.0), 1.0],
[0.0, -1.0, -np.sqrt(2.0), -np.sqrt(3.0)],
[-2.0, -np.sqrt(3.0), -np.sqrt(2.0), -1.0],
],
dtype=np.float32,
)
expected_vspeed = 5 * np.array(
[
[2.0, np.sqrt(3.0), np.sqrt(2.0), 1.0],
[0.0, -1.0, -np.sqrt(2.0), -np.sqrt(3.0)],
[-2.0, - | np.sqrt(3.0) | numpy.sqrt |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
import xarray as xr
# Local imports
from ..pyrf import cotrans, resample, sph2cart, ts_vec_xyz
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def _transformation_matrix(spin_axis, direction):
r_x, r_y, r_z = [spin_axis[:, i] for i in range(3)]
a = 1. / np.sqrt(r_y ** 2 + r_z ** 2)
out = np.zeros((len(a), 3, 3))
out[:, 0, :] = np.transpose(np.stack([a * (r_y ** 2 + r_z ** 2),
-a * r_x * r_y, -a * r_x * r_z]))
out[:, 1, :] = np.transpose(np.stack([0. * a, a * r_z, -a * r_y]))
out[:, 2, :] = np.transpose(np.stack([r_x, r_y, r_z]))
if direction == 1:
out = | np.transpose(out, [0, 2, 1]) | numpy.transpose |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
"""Convenient helper functions
Author: <NAME> <<EMAIL>>
"""
from __future__ import print_function
import sys
import os
import numpy as np
from PIL import Image
from scipy.io import savemat
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
from pycuda.elementwise import ElementwiseKernel
# pylint: disable=bad-builtin
# pylint: disable=no-member
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
Snippet adapted from:
http://code.activestate.com/recipes/577058-query-yesno/
Args:
question (str): Question presented to the user.
default (str): Default answer; 'yes', 'no' or `None`. The latter
requires the user to provide an answer.
Returns:
str: Either "yes" or "no", depending on the user input.
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes", "no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').")
def create_dir(dir_name, skip_question=True, default="no",
question="Directory already exists and files may be " +
"overwritten. Proceed?"):
"""Convenience function for creating a directory.
If skip_question is `False`, the user will be ask to overwrite the
directory if it already exists.
Args:
dir_name (str): Name of directory.
skip_question (bool): If ``True``, directory will either be overwritten
or not overwritten without asking the user, depending on the value
of `default`.
default (str): Default answer if directory already exists ("yes" or
"no").
question (str): Question prompted to the user if directory already
exists.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
elif not skip_question and query_yes_no(question, default) == "no":
print("Abort.")
exit()
def imwrite(fname, data):
"""Write image to file.
First scales the image to the interval [0 255].
Args:
fname (str): Filename.
data (ndarray): 2D image array.
"""
data = data.squeeze()
if data.max() != 0:
data = np.clip(255*data/data.max(), 0, 255).astype('uint8')
else:
data = np.zeros(data.shape, np.uint8)
image = Image.fromarray(data)
image.save(fname)
def save_image(img, out_dir, name, image_format='png'):
"""Save image to file.
More convenient function than `imwrite` because it is easier to provide the
directory and the image format.
Args:
img (ndarray): 2D image array.
out_dir (str): Output directory.
name (str): Filename.
image_format (str): Image extension/format (default: 'png')
"""
imwrite(str(out_dir) + "/" + str(name) + "." + image_format, img)
def save_matlab(img, out_dir, name):
"""Save a matrix as .mat file
The file will be saved in `<out_dir>/mat/img_<name>.mat` for some reason.
Args:
img (ndarray): 2D image array.
out_dir (str): Output directory.
name (str): Filename.
"""
create_dir(out_dir + "/mat")
savemat(out_dir + "/mat/img_" + str(name) + ".mat",
dict({"img_" + str(name): img}), oned_as='row')
def display(string):
"""Display a string without a trailing newline like the print command does.
Args:
string (str): String to print.
"""
sys.stdout.write(string)
sys.stdout.flush()
def dotc(x, y=None):
"""Calculate complex dot product
If y is not provided, <x, x> is calculated instead.
Args:
x (ndarray): Vector.
y (ndarray): Vector.
Returns:
ndarray: Complex dot product.
"""
if y is None:
y = x
return float(np.vdot(x, y))
# return float(np.dot(x.flatten().real, y.flatten().real)) +\
# float(np.dot(x.flatten().imag, y.flatten().imag))
def dual_energy(u, alpha0, show_text, on_gpu=True):
"""Calculate and print dual energy
Args:
u (gpuarray): Array.
alpha0 (float): Regularization parameter.
show_text (bool): If `True`, prints dual energy.
Returns:
float: Dual energy.
"""
if on_gpu:
dual_en = (u.get()**2).sum()/2.0
else:
dual_en = (u**2).sum()/2.0
if show_text:
print("TGV2-L2-2D-PD: alpha0 = " + str(alpha0) + " dual energy = " +
str(dual_en))
return dual_en
def next_power_of_2(x):
"""Determine the next larger number which is a power of two.
Args:
x (float): Input number.
Returns:
float: Next larger number which is a power of two.
"""
return pow(2.0, np.ceil(np.log2(x))).astype(np.int32)
def enlarge_next_power_of_2(shape):
"""Determine the next larger shape which is a power of two.
Args:
shape (array or tuple): Array of dimensions.
Returns:
ndarray: Dimensions with are a power of two.
"""
new_shape = np.array(shape)
new_shape[:] = pow(2.0, np.ceil(np.log2(new_shape[:]))).astype(np.int32)
return new_shape
###############################################################################
# FORWARD AND BACKWARD DIFFERENCES #
###############################################################################
def dyp(u):
"""Backward finite differences in y direction.
Args:
u (ndarray): 2D input array.
Returns:
ndarray: Finite difference.
"""
u = np.mat(u)
dy = np.vstack((u[1:, :], u[-1, :])) - u
return np.array(dy)
def dxp(u):
"""Backward finite differences in x direction.
Args:
u (ndarray): 2D input array.
Returns:
ndarray: Finite difference.
"""
u = np.mat(u)
dx = np.hstack((u[:, 1:], u[:, -1])) - u
return np.array(dx)
def dym(u):
"""Forward finite differences in y direction.
Args:
u (ndarray): 2D input array.
Returns:
ndarray: Finite difference.
"""
u = np.mat(u)
N = u.shape[1]
dy = np.vstack((u[0:-1, :], np.zeros((1, N), u.dtype))) - \
np.vstack((np.zeros((1, N), u.dtype), u[0:-1, :]))
return np.array(dy)
def dxm(u):
"""Forward finite differences in x direction.
Args:
u (ndarray): 2D input array.
Returns:
ndarray: Finite difference.
"""
u = np.mat(u)
N = u.shape[1]
dx = np.hstack((u[:, 0:-1], np.zeros((N, 1), u.dtype))) -\
np.hstack((np.zeros((N, 1), u.dtype), u[:, 0:-1]))
return np.array(dx)
###############################################################################
# GPU TOOLS #
###############################################################################
def format_tuple(tup, join_char="."):
"""Formats a tuple of Version numbers for printing.
Example:
(4, 2, 0) turns into 4.2.0
Args:
tup (tuple): Version as tuple.
join_char (char): Character by which numbers are joined (default: ".")
Returns:
str: Joined version number.
"""
return str(join_char.join(map(str, tup)))
def gpu_info():
"""Show GPU information
"""
print("CUDA Version: " + format_tuple(cuda.get_version()))
print("CUDA Driver Version: " + str(cuda.get_driver_version()))
print("Number of CUDA devices: " + str(cuda.Device.count()))
for i in range(0, cuda.Device(0).count()):
print("Device number " + str(i))
print(" Name of CUDA device: " + str(cuda.Device(i).name()))
print(" Compute capability: " +
format_tuple(cuda.Device(i).compute_capability()))
print(" Total Memory: " +
str(cuda.Device(i).total_memory()/(1024.0**2)) + " MB")
print(" Maximum number of threads per block: " +
str(cuda.Device(i).max_threads_per_block))
print(" PCI Bus ID: " + str(cuda.Device(i).pci_bus_id()))
for (k, v) in cuda.Device(i).get_attributes().items():
print(" " + str(k) + ": " + str(v))
# Definition of a generic blocksize for the TGV update kernels
#: Generic blocksize in x
block_size_x = 16
#: Generic blocksize in y
block_size_y = 16
#: Generic block definition
block = (block_size_x, block_size_y, 1)
def get_grid(u, offset=0):
"""Computes grid size based on block_size_x, block_size_y and the array
size.
Args:
u (ndarray): Input array for which gridsize should be calculated.
Returns:
tuple: CUDA grid.
"""
grid = (int(np.ceil((u.shape[0+offset] + block[0] - 1)/block[0])),
int(np.ceil((u.shape[1+offset] + block[1] - 1)/block[1])))
return grid
def gpuarray_copy(u):
"""Copes a gpuarray object.
Args:
u (gpuarray): Input array.
Returns:
gpuarra: Deep copy of input array.
"""
v = gpuarray.zeros_like(u)
v.strides = u.strides
cuda.memcpy_dtod(v.gpudata, u.gpudata, u.nbytes)
return v
def dotc_gpu(x, y=None):
"""Calculate complex dot product on GPU.
If y is not provided, <x, x> is calculated instead.
Args:
x (ndarray): Vector.
y (ndarray): Vector.
Returns:
ndarray: Absolute of complex dot product.
"""
if y is None:
y = x
return np.abs(gpuarray.dot(x.ravel(), y.ravel().conj()).get())
add_scaled_vector_func = \
ElementwiseKernel("pycuda::complex<float> *out, \
pycuda::complex<float> *in1, \
pycuda::complex<float> scal, \
pycuda::complex<float> *in2",
"out[i] = in1[i] + scal * in2[i]",
"add_scaled_vector",
preamble="#include <pycuda-complex.hpp>")
def add_scaled_vector(out, inp1, scal, inp2):
"""Perform the following (single precision) calculation on the GPU:
``out = inp1 + scal * inp2``
Args:
inp1 (gpuarray): First input array.
inp2 (gpuarray): Second input array.
scal (float): Scaling parameter.
Returns:
gpuarray: Output array.
"""
add_scaled_vector_func(out, inp1, np.float32(scal), inp2)
add_scaled_vector_double_func = \
ElementwiseKernel("pycuda::complex<double> *out, \
pycuda::complex<double> *in1, \
pycuda::complex<double> scal, \
pycuda::complex<double> *in2",
"out[i] = in1[i] + scal * in2[i]",
"add_scaled_vector_double",
preamble="#include <pycuda-complex.hpp>")
def add_scaled_vector_double(out, inp1, scal, inp2):
"""Perform the following (double precision) calculation on the GPU:
``out = inp1 + scal * inp2``
Args:
inp1 (gpuarray): First input array.
inp2 (gpuarray): Second input array.
scal (float): Scaling parameter.
Returns:
gpuarray: Output array.
"""
add_scaled_vector_double_func(out, inp1, np.float64(scal), inp2)
add_scaled_vector_vector_func = \
ElementwiseKernel("pycuda::complex<float> *out, \
pycuda::complex<float> *in1, \
pycuda::complex<float> *scal, \
pycuda::complex<float> *in2",
"out[i] = in1[i] + scal[i] * in2[i]",
"add_scaled_vector_vector",
preamble="#include <pycuda-complex.hpp>")
def add_scaled_vector_vector(out, inp1, scal, inp2):
"""Perform the following (single precision) calculation on the GPU:
``out = inp1 + scal * inp2``
Args:
inp1 (gpuarray): First input array.
inp2 (gpuarray): Second input array.
scal (gpuarray): Scaling array.
Returns:
gpuarray: Output array.
"""
add_scaled_vector_vector_func(out, inp1, scal, inp2)
add_scaled_vector_vector_double_func = \
ElementwiseKernel("pycuda::complex<double> *out, \
pycuda::complex<double> *in1, \
pycuda::complex<double> *scal, \
pycuda::complex<double> *in2",
"out[i] = in1[i] + scal[i] * in2[i]",
"add_scaled_vector_vector_double",
preamble="#include <pycuda-complex.hpp>")
def add_scaled_vector_vector_double(out, inp1, scal, inp2):
"""Perform the following (double precision) calculation on the GPU:
``out = inp1 + scal * inp2``
Args:
inp1 (gpuarray): First input array.
inp2 (gpuarray): Second input array.
scal (gpuarray): Scaling array.
Returns:
gpuarray: Output array.
"""
add_scaled_vector_vector_double_func(out, inp1, scal, inp2)
sub_scaled_vector_func = \
ElementwiseKernel("pycuda::complex<float> *out, \
pycuda::complex<float> *in1, \
pycuda::complex<float> scal, \
pycuda::complex<float> *in2",
"out[i] = in1[i] - scal * in2[i]",
"sub_scaled_vector",
preamble="#include <pycuda-complex.hpp>")
def sub_scaled_vector(out, inp1, scal, inp2):
"""Perform the following (single precision) calculation on the GPU:
``out = inp1 - scal * inp2``
Args:
inp1 (gpuarray): First input array.
inp2 (gpuarray): Second input array.
scal (float): Scaling parameter.
Returns:
gpuarray: Output array.
"""
sub_scaled_vector_func(out, inp1, | np.float32(scal) | numpy.float32 |
from __future__ import print_function
import argparse
import numpy as np
from numpy import argsort
from numpy.random import shuffle
from collections import defaultdict as ddict
from skge.param import Parameter, AdaGrad
import timeit
import pickle
import pdb
import logging
from sklearn.metrics import precision_recall_curve, auc, roc_auc_score
from skge import sample
from skge.util import to_tensor
import copy
import itertools
import sys
from skge.util import ccorr
from enum import Enum
from subgraphs import Subgraphs
import trident
SUBTYPE = Enum('SUBTYPE', 'SPO POS')
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('EX-KG')
_cutoff = 30
_DEF_NBATCHES = 100
_DEF_POST_EPOCH = []
_DEF_LEARNING_RATE = 0.1
_DEF_SAMPLE_FUN = None
_DEF_MAX_EPOCHS = 1000
_DEF_MARGIN = 1.0
_FILE_GRADIENTS = 'gradients.txt'
_FILE_EMBEDDINGS = 'embeddings.txt'
_FILE_TAIL_PREDICTIONS_UNFILTERED = 'tail-predictions-unfiltered.txt'
_FILE_TAIL_PREDICTIONS_FILTERED = 'tail-predictions-filtered.txt'
_FILE_HEAD_PREDICTIONS_UNFILTERED = 'head-predictions-unfiltered.txt'
_FILE_HEAD_PREDICTIONS_FILTERED = 'head-predictions-filtered.txt'
_FILE_INFO = 'info.txt'
_SIM_RANK_C = 0.6
_SIM_RANK_K = 5
all_true_triples = []
trident_db = None
np.random.seed(42)
graph = ddict()
def num_incoming_neighbours(entity, graph):
all_lists = list(graph['incoming'][entity].values())
incoming_neighbours = list(itertools.chain(*all_lists))
return len(incoming_neighbours)
def num_outgoing_neighbours(entity, graph):
all_lists = list(graph['outgoing'][entity].values())
outgoing_neighbours = list(itertools.chain(*all_lists))
return len(outgoing_neighbours)
def num_outgoing_relations(entity, graph):
all_relations = list(graph['outgoing'][entity].keys())
return len(all_relations)
def num_incoming_relations(entity, graph):
all_relations = list(graph['incoming'][entity].keys())
return len(all_relations)
def _is_converge(s1, s2, eps=1e-4):
for i in s1.keys():
for j in s1[i].keys():
if abs(s1[i][j] - s2[i][j]) >= eps:
return False
return True
class Experiment(object):
def __init__(self):
self.parser = argparse.ArgumentParser(prog='Knowledge Graph experiment', conflict_handler='resolve')
self.parser.add_argument('--margin', type=float, help='Margin for loss function')
self.parser.add_argument('--init', type=str, default='nunif', help='Initialization method')
self.parser.add_argument('--lr', type=float, help='Learning rate')
self.parser.add_argument('--me', type=int, help='Maximum number of epochs')
self.parser.add_argument('--ne', type=int, help='Numer of negative examples', default=1)
self.parser.add_argument('--nb', type=int, help='Number of batches')
self.parser.add_argument('--fout', type=str, help='Path to store model and results', default=None)
self.parser.add_argument('--finfo', type=str, help='Path to store additional debug info', default=None)
self.parser.add_argument('--fgrad', type=str, help='Path to store gradient vector updates for each entity', default=None)
self.parser.add_argument('--fpagerank', type=str, help='Path of the page ranks of all entities (in form of python dictionary)', default=None)
self.parser.add_argument('--fembed', type=str, help='Path to store final embeddings for every entity and relation', default=None)
self.parser.add_argument('--fin', type=str, help='Path to input data', default=None)
self.parser.add_argument('--ftax', type=str, help='Path to the taxonomy file', default=None)
self.parser.add_argument('--fsub', type=str, help='Path to the subgraphs file', default=None)
self.parser.add_argument('--embed', type=str, help='Strategy to assign embeddings', default='kognac')
self.parser.add_argument('--test-all', type=int, help='Evaluate Test set after x epochs', default=10)
self.parser.add_argument('--no-pairwise', action='store_const', default=False, const=True)
self.parser.add_argument('--incr', type=int, help='Percentage of training data to consider in first step', default=100)
self.parser.add_argument('--mode', type=str, default='rank')
self.parser.add_argument('--sampler', type=str, default='random-mode')
self.parser.add_argument('--norm', type=str, default='l1', help=' Normalization (l1(default) or l2)')
self.parser.add_argument('--subcreate', dest="subcreate", help='Create subgraphs', action='store_true')
self.parser.add_argument('--subtest', dest = "subtest", help='Test with subgraphs', action='store_true')
self.parser.add_argument('--minsubsize', type=int, help='Minimum subgraph size', default=50)
self.parser.add_argument('--topk', type=int, help='Number of top subgraphs to check for evaluation', default=5)
self.parser.add_argument('--subalgo', type=str, help='Algo to use to create subgraphs', default="transe")
self.parser.add_argument('--subdistance', type=str, help='Distance function to evaluate subgraphs on', default="avg")
self.neval = -1
self.best_valid_score = -1.0
self.exectimes = []
self.subgraphs = Subgraphs()
self.avg_embeddings = []
self.var_embeddings = []
def make_subgraphs(self, subType, sorted_triples, mincard, trn_model, sub_algo):
similar_entities = []
current = np.zeros(self.args.ncomp, dtype=np.float64)
count = 0
prevo = -1
prevp = -1
subgraph_logfile="subgraphs-test.log"
file_data = ""
cntTriples = len(sorted_triples)
for i, triple in enumerate(sorted_triples):
sub = triple[0]
obj = triple[1]
rel = triple[2]
ent = -1
other_ent = -1
ER = None
if subType == SUBTYPE.POS:
ent = obj
other_ent = sub
else:
#print ("subtype = " , subType)
ent = sub
other_ent = obj
#if sub_algo == "hole":
# ER = ccorr(trn_model.R[rel], trn_model.E)
if ent != prevo or rel != prevp:
if count > mincard:
mean = current/count
self.avg_embeddings.append(mean)
columnsSquareDiff = np.zeros(self.args.ncomp, dtype=np.float64)
for se in similar_entities:
columnsSquareDiff += (trn_model.E[se] - mean) * (trn_model.E[se] - mean)
if count > 2:
columnsSquareDiff /= (count-1)
else:
columnsSquareDiff = mean
self.var_embeddings.append(columnsSquareDiff)
# add subgraph
self.subgraphs.add_subgraphs(subType, prevo, prevp, count, similar_entities)
for se in similar_entities:
file_data += str(se) + "\n"
#print(similar_entities)
#else:
# print("count = ", count , " for ", str(prevo) , " : " , str(prevp))
count = 0
prevo = ent
prevp = rel
current.fill(0.0)
similar_entities.clear()
count += 1
if sub_algo == "transe":
current += trn_model.E[other_ent]
elif sub_algo == "hole":
if subType == SUBTYPE.POS:
if not np.any(current):
current = trn_model.E[other_ent]
else:
current = np.dot(trn_model.E[other_ent], current)
else:
if not np.any(current):
current = trn_model.E[other_ent]
else:
current = np.dot(current,trn_model.E[other_ent])
#current += trn_model.E[other_ent]
similar_entities.append(other_ent)
# After looping over all triples, add remaining entities to a subgraph
if count > mincard:
self.avg_embeddings.append(current/count)
columnsSquareDiff = np.zeros(self.args.ncomp, dtype=np.float64)
for se in similar_entities:
columnsSquareDiff += (trn_model.E[se] - mean) * (trn_model.E[se] - mean)
if count > 2:
columnsSquareDiff /= (count-1)
else:
columnsSquareDiff = mean
self.var_embeddings.append(columnsSquareDiff)
# add subgraph
self.subgraphs.add_subgraphs(subType, prevo, prevp, count, similar_entities)
print ("# of subgraphs : " , self.subgraphs.get_Nsubgraphs())
with open(subgraph_logfile, "w") as fout:
fout.write(file_data)
def run(self, *args, **kwargs):
# parse comandline arguments
self.args = self.parser.parse_args()
fi = self.args.finfo
self.file_info = None
if fi is not None:
self.file_info = open(fi, "w")
if self.args.mode == 'rank':
self.callback = self.ranking_callback
elif self.args.mode == 'lp':
self.callback = self.lp_callback
self.evaluator = LinkPredictionEval
else:
raise ValueError('Unknown experiment mode (%s)' % self.args.mode)
if self.args.subcreate:
self.subgraphs_create()
elif self.args.subtest:
self.subgraphs_test()
else:
self.train()
def subgraph_callback(self, trn_model, topk, sub_algo, sub_dist):
#TODO: use subgraphs to find ranks, scores
log.info("Computing SUBGRAPH positions and scores for TEST dataset...")
time_start = timeit.default_timer()
pos_test = self.ev_test.subgraph_positions(trn_model, self.subgraphs.subgraphs, sub_algo, sub_dist)
subgraph_ranking_scores(self.fresult, pos_test, 'TEST', topk)
time_end = timeit.default_timer()
log.info("Time spent in computing SUBGRAPH positions and scores for TEST dataset = %ds" % (time_end - time_start))
self.fresult.close()
def ranking_callback(self, trn, with_eval=False):
# print basic info
elapsed = timeit.default_timer() - trn.epoch_start
self.exectimes.append(elapsed)
if self.args.no_pairwise:
log.info("[%3d] time = %ds, loss = %f" % (trn.epoch, elapsed, trn.loss))
else:
log.info("[%3d] time = %ds, violations = %d" % (trn.epoch, elapsed, trn.nviolations))
self.fresult.write("[%3d] time = %ds, violations = %d\n" % (trn.epoch, elapsed, trn.nviolations))
# if we improved the validation error, store model and calc test error
if (trn.epoch % self.args.test_all == 0) or with_eval:
log.info("Computing positions and scores for VALIDATION dataset...")
time_start = timeit.default_timer()
plot = False
if trn.epoch == self.args.me:
#log.info("PLOT ME\n")
plot = True
pos_v, fpos_v = self.ev_valid.positions(trn.model)
fmrr_valid = ranking_scores(self.fresult, pos_v, fpos_v, trn.epoch, 'VALID')
time_end = timeit.default_timer()
log.info("At epoch %d , Time spent in computing positions and scores for VALIDATION dataset = %ds" % (trn.epoch, time_end - time_start))
self.fresult.write("At epoch %d , Time spent in computing positions and scores for VALIDATION dataset = %ds\n" % (trn.epoch, time_end - time_start))
log.debug("FMRR valid = %f, best = %f" % (fmrr_valid, self.best_valid_score))
if fmrr_valid > self.best_valid_score or plot:
self.best_valid_score = fmrr_valid
log.info("Computing positions and scores for TEST dataset...")
time_start = timeit.default_timer()
pos_t, fpos_t = self.ev_test.positions(trn.model, plot=plot, pagerankMap=self.pagerankMap)
ranking_scores(self.fresult, pos_t, fpos_t, trn.epoch, 'TEST')
time_end = timeit.default_timer()
log.info("At epoch %d, Time spent in computing positions and scores for TEST dataset = %ds" % (trn.epoch, time_end - time_start))
self.fresult.write("At epoch %d, Time spent in computing positions and scores for TEST dataset = %ds\n" % (trn.epoch, time_end - time_start))
if self.args.fout is not None:
st = {
'model': trn.model,
'pos test': pos_t,
'fpos test': fpos_t,
'pos valid': pos_v,
'fpos valid': fpos_v,
'exectimes': self.exectimes
}
with open(self.args.fout, 'wb') as fout:
pickle.dump(st, fout, protocol=2)
return True
def lp_callback(self, m, with_eval=False):
# print basic info
elapsed = timeit.default_timer() - m.epoch_start
self.exectimes.append(elapsed)
if self.args.no_pairwise:
log.info("[%3d] time = %ds, loss = %d" % (m.epoch, elapsed, m.loss))
else:
log.info("[%3d] time = %ds, violations = %d" % (m.epoch, elapsed, m.nviolations))
# if we improved the validation error, store model and calc test error
if (m.epoch % self.args.test_all == 0) or with_eval:
auc_valid, roc_valid = self.ev_valid.scores(m)
log.debug("AUC PR valid = %f, best = %f" % (auc_valid, self.best_valid_score))
if auc_valid > self.best_valid_score:
self.best_valid_score = auc_valid
auc_test, roc_test = self.ev_test.scores(m)
log.debug("AUC PR test = %f, AUC ROC test = %f" % (auc_test, roc_test))
if self.args.fout is not None:
st = {
'model': m,
'auc pr test': auc_test,
'auc pr valid': auc_valid,
'auc roc test': roc_test,
'auc roc valid': roc_valid,
'exectimes': self.exectimes
}
with open(self.args.fout, 'wb') as fout:
pickle.dump(st, fout, protocol=2)
return True
def bisect_list_by_percent(self, ll, percentage):
size = len(ll)
shuffle(ll)
first_half_len = (size * percentage) / 100
second_half_len = size - first_half_len
first_half = ll[:int(first_half_len)]
second_half = ll[int(first_half_len):]
return [first_half, second_half]
def subgraphs_test(self):
train_triples, valid_triples, test_triples, sz = self.get_all_triples()
true_triples = train_triples + test_triples + valid_triples
log.info("*"*80)
log.info(len(true_triples))
if self.args.mode == 'rank':
self.ev_test = self.evaluator(test_triples, true_triples, self.neval)
self.ev_valid = self.evaluator(valid_triples,true_triples, self.neval)
topk = self.args.topk
self.subgraphs = Subgraphs.load(self.args.fsub)
trn_model = Model.load(self.args.fout)
dataset = self.args.fin.split('/')[-1].split('.')[0]
algo = self.algo
epochs = self.args.me
sub_algo = self.args.subalgo
sub_dist = self.args.subdistance
mincard = self.args.minsubsize
subgraph_embeddings_home = "/var/scratch/uji300/hole/"
outfile = subgraph_embeddings_home + dataset + "_"+sub_algo+ "-dist-" + sub_dist + "-tau-" + str(mincard)+"-topK-"+ str(topk) + ".result"
fresult = open(outfile, "w")
self.fresult = fresult
self.subgraph_callback(trn_model, topk, sub_algo, sub_dist)
def remove_literals(self, triples):
global trident_db
clean_triples = []
for t in triples:
if -1 != trident_db.lookup_str(t[0]).find('"'):
continue
if -1 != trident_db.lookup_str(t[1]).find('"'):
continue
if -1 != trident_db.lookup_relstr(t[2]).find('"'):
continue
clean_triples.append(t)
return clean_triples
'''
input:
output:
Subgraph embeddings (array of objects of Subgraph class)
'''
def subgraphs_create(self):
train_triples, valid_triples, test_triples, sz = self.get_all_triples()
xs = train_triples + valid_triples + test_triples
print ("Trying to make subgraphs...")
clean_triples = self.remove_literals(xs)
mincard = self.args.minsubsize
sub_algo = self.args.subalgo
sub_dist = self.args.subdistance
dataset = self.args.fin.split('/')[-1].split('.')[0]
algo = self.algo
epochs = self.args.me
results = Model.load(self.args.fout)
trn_model = results['model']
sorted_ps = sorted(clean_triples, key=lambda l : (l[2], l[0]))
#print ("calling with type = ", SUBTYPE.SPO)
#print(sorted_ps)
self.make_subgraphs(SUBTYPE.SPO, sorted_ps, mincard, trn_model, sub_algo)
sorted_po = sorted(clean_triples, key=lambda l : (l[2], l[1]))
#print ("calling with type = ", SUBTYPE.POS)
self.make_subgraphs(SUBTYPE.POS, sorted_po, mincard, trn_model, sub_algo)
trn_model.add_param('SA', (self.subgraphs.get_Nsubgraphs(), self.args.ncomp))
trn_model.SA = self.avg_embeddings
#for sube in trn.model.S:
# print(type(sube) , " :" , sube)
#print(type(self.avg_embeddings) , " : " , self.avg_embeddings[self.subgraphs.get_Nsubgraphs()-1])
if sub_algo == "var" or sub_algo == "kl":
trn_model.add_param('SV', (self.subgraphs.get_Nsubgraphs(), self.args.ncomp))
trn_model.SV = self.var_embeddings
# Save subgraphs and model
subgraph_embeddings_home = "/var/scratch/uji300/hole/"
subgraph_file_name= subgraph_embeddings_home + dataset + "-HolE-epochs-" + str(epochs) + "-" + sub_dist + "-tau-" + str(mincard) + ".sub"
self.subgraphs.save(subgraph_file_name)
model_file_name= subgraph_embeddings_home + dataset + "-HolE-epochs-" + str(epochs) + "-" + sub_dist + "-tau-" + str(mincard) + ".mod"
trn_model.save(model_file_name)
def fit_model(self, xs, ys, sz, setup_trainer=True, trainer=None):
# create sampling objects
# Sample is given the array of triples.
# So that it can randomly create other triples that is not part of the original array
# This is useful to make negative samples
if self.args.sampler == 'corrupted':
# create type index, here it is ok to use the whole data
sampler = sample.CorruptedSampler(self.args.ne, xs, ti)
elif self.args.sampler == 'random-mode':
sampler = sample.RandomModeSampler(self.args.ne, [0, 1], xs, sz)
elif self.args.sampler == 'lcwa':
sampler = sample.LCWASampler(self.args.ne, [0, 1, 2], xs, sz)
else:
raise ValueError('Unknown sampler (%s)' % self.args.sampler)
if setup_trainer:
trn = self.setup_trainer(sz, sampler)
else:
trn = trainer
notUpdated = 0
for count in trn.model.E.updateCounts:
if count == 0:
notUpdated += 1
log.info("Fitting model %s with trainer %s and parameters %s" % (
trn.model.__class__.__name__,
trn.__class__.__name__,
self.args)
)
trn.fit(xs, ys)
# each x in xs is a tuple (SUB, OBJ, PREDicate)
self.callback(trn, with_eval=True)
return trn
def make_graph(self, triples, N, M):
graph_outgoing = [ddict(list) for _ in range(N)]
graph_incoming = [ddict(list) for _ in range(N)]
graph_relations_head = [ddict(list)for _ in range(M)]
graph_relations_tail = [ddict(list)for _ in range(M)]
for t in triples:
head = t[0]
tail = t[1]
relation = t[2]
graph_outgoing[head][relation].append(tail)
graph_incoming[tail][relation].append(head)
graph_relations_head[relation][head].append(tail)
graph_relations_tail[relation][tail].append(head)
return {'outgoing': graph_outgoing, 'incoming': graph_incoming, 'relations_head': graph_relations_head, 'relations_tail':graph_relations_tail}
def get_boundaries(self, classes, entity):
for c in classes:
if (int(c[2]) <= entity and entity <= int(c[3])):
return {'left': int(c[2]), 'right':int(c[3])}
return {'left' : -1, 'right' : -1}
#raise ValueError("Entity %d should not exist" % (entity))
def get_all_triples(self):
# read data
#with open(self.args.fin, 'rb') as fin:
# data = pickle.load(fin)
global all_true_triples
global trident_db
file_path = self.args.fin
trident_db = trident.Db(file_path)
batch_size = 1000
percent_valid_triples = 0.01
percent_test_triples = 0.01
all_true_triples = trident_db.all()
if trident_db.n_triples() < 1000:
batch_size = 100
batcher = trident.Batcher(file_path, batch_size, 1, percent_valid_triples, percent_test_triples)
N = trident_db.n_terms()
M = trident_db.n_relations()
#N = len(data['entities'])
#pdb.set_trace()
#M = len(data['relations'])
sz = (N, N, M)
if file_path[-1] != '/':
file_path = file_path + "/"
train_triples_series = batcher.load_triples(file_path+"_batch")
valid_triples_series = batcher.load_triples(file_path+"_batch_valid")
test_triples_series = batcher.load_triples(file_path+"_batch_test")
def parse_triples_series(series):
s_list = [int(x) for x in series[::3]]
p_list = [int(x) for x in series[1::3]]
o_list = [int(x) for x in series[2::3]]
result = []
for s,p,o in zip(s_list, p_list, o_list):
# Note that we are returing SUB,OBJ, PRED
result.append((s,o,p))
return result
train_triples = parse_triples_series(train_triples_series)
valid_triples = parse_triples_series(valid_triples_series)
test_triples = parse_triples_series(test_triples_series)
return train_triples, valid_triples, test_triples, sz
def train(self):
train_triples, valid_triples, test_triples, sz = self.get_all_triples()
N = sz[0]
M = sz[2]
print (type(train_triples))
print (len(train_triples))
#true_triples = data['train_subs'] + data['test_subs'] + data['valid_subs']
#test_triples = data['test_subs']
true_triples = train_triples + test_triples + valid_triples
if self.args.mode == 'rank':
self.ev_test = self.evaluator(test_triples, true_triples, self.neval)
self.ev_valid = self.evaluator(valid_triples,true_triples, self.neval)
#elif self.args.mode == 'lp':
# self.ev_test = self.evaluator(data['test_subs'], data['test_labels'])
# self.ev_valid = self.evaluator(data['valid_subs'], data['valid_labels'])
# Construct a name for the result file
# <dataset>-<size of training>-<strategy>-epochs-<number of epochs>-eval-<Evaluate after X epochs>-margin-<margin>.out
# lubm-full-transe-epochs-500-eval-50-margin-2.0.out
dataset = self.args.fin.split('/')[-1].split('.')[0]
size = "full" if self.args.incr == 100 else "exp"
strategy = self.args.embed
epochs = self.args.me
ev = self.args.test_all
margin = self.args.margin
outfile = dataset + "-" + size + "-" + strategy + "-epochs-" + str(epochs) + "-eval-" + str(ev) + "-margin-" + str(margin) + ".out"
fresult = open(outfile, "w")
self.fresult = fresult
self.pagerankMap = None
# If pagerank file is given, then extract the entity-pagerank map
if self.args.fpagerank is not None:
with open(self.args.fpagerank, 'r') as fp:
self.pagerankMap = eval(fp.read())
global _FILE_TAIL_PREDICTIONS_UNFILTERED
global _FILE_TAIL_PREDICTIONS_FILTERED
global _FILE_HEAD_PREDICTIONS_UNFILTERED
global _FILE_HEAD_PREDICTIONS_FILTERED
_FILE_TAIL_PREDICTIONS_UNFILTERED = dataset + "-" + _FILE_TAIL_PREDICTIONS_UNFILTERED
_FILE_TAIL_PREDICTIONS_FILTERED = dataset + "-" + _FILE_TAIL_PREDICTIONS_FILTERED
_FILE_HEAD_PREDICTIONS_UNFILTERED = dataset + "-" + _FILE_HEAD_PREDICTIONS_UNFILTERED
_FILE_HEAD_PREDICTIONS_FILTERED = dataset + "-" + _FILE_HEAD_PREDICTIONS_FILTERED
with open(_FILE_TAIL_PREDICTIONS_UNFILTERED, 'w') as fplot:
fplot.write("")
with open(_FILE_TAIL_PREDICTIONS_FILTERED, 'w') as fplot:
fplot.write("")
with open(_FILE_HEAD_PREDICTIONS_UNFILTERED, 'w') as fplot:
fplot.write("")
with open(_FILE_HEAD_PREDICTIONS_FILTERED, 'w') as fplot:
fplot.write("")
# Make a graph from edges in training triples.
graph_start = timeit.default_timer()
global graph
# TODO: for graph use dynamic dict instead of list
graph = self.make_graph(train_triples, N, M)
graph_end = timeit.default_timer()
log.info("Time to build the graph = %ds" %(graph_end - graph_start))
self.fresult.write("Time to build the graph = %ds\n" %(graph_end - graph_start))
#sim_start = timeit.default_timer()
#sim = simrank(graph, N)
#sim_end = timeit.default_timer()
#log.info("Time to compute simranks = %ds" %(sim_end - sim_start))
if self.args.incr != 100:
# Select 10% of the tuples here
time_start = timeit.default_timer()
triples = data['train_subs']
incremental_batches = self.bisect_list_by_percent(triples, self.args.incr)
time_end = timeit.default_timer()
log.info("Time to choose %d%% samples = %ds" % (self.args.incr, time_end-time_start))
log.info("Total size = %d, %d%% size = %d, %d%% size = %d" % (len(data['train_subs']), self.args.incr, len(incremental_batches[0]), 100-self.args.incr, len(incremental_batches[1])))
xs = incremental_batches[0]
ys = np.ones(len(xs))
time_start = timeit.default_timer()
trainer = self.fit_model(xs, ys, sz)
time_end = timeit.default_timer()
log.info("### Time to fit model for %d%% samples (%d epochs) = %ds" % (self.args.incr, self.args.me, time_end - time_start))
self.fresult.write("### Time to fit model for %d%% samples (%d epochs) = %ds\n" % (self.args.incr, self.args.me, time_end - time_start))
log.info("First step finished : ######################")
time_start = timeit.default_timer()
countEntities = [0] * N
for x in xs:
countEntities[x[0]] += 1
countEntities[x[1]] += 1
considered = 0;
if self.file_info is not None:
self.file_info.write("Entity (is given) => (embedding of) Entity)\n")
if self.args.embed is "kognac":
with open (self.args.ftax, 'r') as ftax:
lines = ftax.readlines()
ranges = [l.split() for l in lines]
classes = []
for r in ranges:
if (len(r) == 4):
classes.append(r)
classes.sort(key=lambda x:int(x[2]))
# Apply proper strategy to assign embeddings here
# If kognac, then read the taxonomy file and based on boundaries of classes, assign embeddings of neighbouring entities.
# If not, choose other strategy
# Else, choose random assignment
lonely = 0
for entity, count in enumerate(countEntities):
if count != 0:
considered += 1
else:
if self.args.embed is "kognac":
# Find the six closest entities that were considered before and take their average
boundary = self.get_boundaries(classes, entity)
quorum = 6
log.info("entity (%d): " % (entity))
if (boundary['left'] == -1 and boundary['right'] == -1):
# This entitiy is not a part of any class
lonely += 1
continue
neighbours = []
if (boundary['left'] == entity):
e = entity + 1
while(countEntities[e] != 0 and e != boundary['right']-1):
neighbours.append(e)
if (len(neighbours) == quorum):
break
e += 1
elif (boundary['right'] == entity):
e = entity - 1
while (countEntities[e] != 0 and e != boundary['left']):
neighbours.append(e)
if (len(neighbours) == quorum):
break;
e -= 1
else:
e = entity + 1
while(countEntities[e] != 0 and e != boundary['right']-1):
neighbours.append(e)
if (len(neighbours) == (quorum/2)):
break
e += 1
required = quorum - (len(neighbours))
e = entity - 1
while (countEntities[e] != 0 and e != boundary['left']):
neighbours.append(e)
if (len(neighbours) == required):
break;
e -= 1
if len(neighbours) > quorum:
log.info("More neighbours than the quorum : %d" % (len(neighbours)))
quorum = len(neighbours)
log.info(" %d neighbours found\n" % (quorum))
if quorum != 0:
total = np.full((50), 0, dtype=float)
for n in neighbours:
total += trainer.model.E[n]
total /= quorum
if self.file_info is not None:
for n in neibhours:
self.file_info.write("%d, " % (n))
self.file_info.write("\n")
trainer.model.E[entity] = total
time_end = timeit.default_timer()
log.info("Time spent in assigning new embeddings (Strategy %s) = %ds" % (self.args.embed, time_end - time_start))
self.fresult.write("Time spent in assigning new embeddings (Strategy %s) = %ds\n" % (self.args.embed, time_end - time_start))
log.info("!!!!!!!!!!! %d / %d entities were considered in first batch. !!!!!!!!!!!!!!" % (considered, N))
log.info("@@@@@@@@ %d entities were lonley (i.e. not a part of any class" % (lonely))
# Select all tuples
xs = incremental_batches[0] + incremental_batches[1]
ys = np.ones(len(xs))
# Here the trainer is already set-up. So we don't call setup_trainer again.
# setup_trainer methods initializes the max_epochs parameter which is the number of iterations.
# We have added a method to the PairwiseStochasticTrainer class which will set the max_epoch for us
trainer.set_max_epochs(self.args.me/5)
time_start= timeit.default_timer()
self.fit_model(xs, ys, sz, setup_trainer=False, trainer=trainer)
time_end = timeit.default_timer()
log.info("Time to fit model for 100%% samples (%d epochs) = %ds" % (trainer.max_epochs, time_end - time_start))
self.fresult.write("Time to fit model for 100%% samples (%d epochs) = %ds\n" % (trainer.max_epochs, time_end - time_start))
else:
xs = train_triples
ys = np.ones(len(xs))
time_start= timeit.default_timer()
trainer = self.fit_model(xs, ys, sz)
time_end = timeit.default_timer()
log.info("Time to fit model for 100%% samples (%d epochs) = %ds" % (trainer.max_epochs, time_end - time_start))
self.fresult.write("Time to fit model for 100%% samples (%d epochs) = %ds\n" % (trainer.max_epochs, time_end - time_start))
#self.subgraphs_create(xs, ys, sz, trainer)
class FilteredRankingEval(object):
def __init__(self, xs, true_triples, neval=-1):
idx = ddict(list)
tt = ddict(lambda: {'ss': ddict(list), 'os': ddict(list)})
self.neval = neval
self.sz = len(xs)
for s, o, p in xs:
idx[p].append((s, o))
for s, o, p in true_triples:
tt[p]['os'][s].append(o)
tt[p]['ss'][o].append(s)
self.idx = dict(idx)
self.tt = dict(tt)
self.neval = {}
for p, sos in self.idx.items():
if neval == -1:
self.neval[p] = -1
else:
self.neval[p] = np.int(np.ceil(neval * len(sos) / len(xs)))
def get_matching_entities(self, sub_type, e, r):
global all_true_triples
entities = []
for triple in all_true_triples:
if sub_type == SUBTYPE.SPO and triple[0] == e and triple[1] == r:
entities.append(triple[2])
if len(entities) == 10:
return entities
elif triple[2] == e and triple[1] == r:
entities.append(triple[0])
if len(entities) == 10:
return entities
return entities
def get_skip_subgraphs(self, subgraphs, ent, rel, sub_type):
for i in range(len(subgraphs)):
#print("Subgraphs ", str(i+1), ": ", subgraphs[i].ent , " , ", subgraphs[i].rel)
if subgraphs[i].subType == sub_type and \
subgraphs[i].ent == ent and \
subgraphs[i].rel == rel:
return i
def get_kl_divergence_scores(self, model, subgraphs, ent, rel, sub_type):
'''
Get the entities with this ent and rel from db.
sample some entites for trueAvg and trueVar embeddings
now find KL divergence with these trueAvg and trueVar embeddings
with all other subgraphs
'''
summation = np.zeros(50, dtype=np.float64)
count = 0
scores = []
ER = ccorr(model.R[rel], model.E)
# TODO: find true entities with this ent and rel in the database
me = self.get_matching_entities(sub_type, ent, rel)
for e in me:
summation += model.E[e]
count += 1
mean = summation / count
columnsSquareDiff = np.zeros(50, dtype=np.float64)
for e in me:
columnsSquareDiff += (model.E[e] - mean) * (model.E[e] - mean)
if count > 2:
columnsSquareDiff /= (count - 1)
else:
columnsSquareDiff = mean
true_avg_emb = mean
true_var_emb = columnsSquareDiff
# Calculate kl scores with all subgraphs
def calc_kl(sa, sv, qa, qv):
#sa = np.ndarray(50, dtype=np.float64)
#sv = np.ndarray(50, dtype=np.float64)
#sa = tempa
#sv = tempv
temp = ((qa - sa)**2 + qv**2 / (2*sv*sv))
sv[sv<0] = sv[sv<0]*-1
qv[qv<0] = qv[qv<0]*-1
temp2 = np.log(np.sqrt(sv)/qv)
temp3 = 0.5
ans = np.sum(temp + temp2 - temp3)
return np.sum(temp + temp2 - temp3)
for i in range(len(subgraphs)):
scores.append(calc_kl(model.SA[i], model.SV[i], true_avg_emb, true_var_emb))
return scores
def subgraph_positions(self, mdl, subgraphs, sub_algo, sub_dist):
pos = {}
# do equivalent of self.prepare_global(mdl)
count = 0
sumTailRanks = 0
sumHeadRanks = 0
total = 0
failfile = "failed.log"
data = ""
for p, sos in self.idx.items():
# dictionary with 'tail' as the key, will store positions of H after keeping T and P constant
ppos = {'head': [], 'tail': []}
# do self.prepare(mdl, p ) # calculate ccorr(p , all subgraphs)
# mdl.S should contain all subgraph embeddings
if sub_dist == "var":
SR = ccorr(mdl.R[p], mdl.SV)
else:
SR = ccorr(mdl.R[p], mdl.SA)
# for var1 var 2,
# calculate variance based scores with subgraphs' variance embeddings
# and rearrange ranks
for s, o in sos:#[:self.neval[p]]:
count += 1
if sub_dist == "kl":
kl_ts = timeit.default_timer()
scores_o = self.get_kl_divergence_scores(mdl, subgraphs, s, p, SUBTYPE.SPO)
kl_te = timeit.default_timer()
#print("Time to compute KL div scores = %ds" % (kl_te-kl_ts))
else:
scores_o = np.dot(SR, mdl.E[s]).flatten()
#print(scores_o)
# are there any subgraphs with this S and P with subgraph type SPO
# then set their scores to Inf
skip_sub_index = self.get_skip_subgraphs(subgraphs, s, p, SUBTYPE.SPO)
scores_o[skip_sub_index] = -np.Inf
#scores_o should contain scores for each subgraph using dot product
sortidx_o = argsort(scores_o)[::-1]
# sortidx_o has the indices for sorted subgraph scores
# Choose topk from this and find out if the answer lies in any of these subgraphs
found = False
for rank, index in enumerate(sortidx_o):
#print("index = ", index)
#print (subgraphs[index].entities)
if o in subgraphs[index].entities:
found = True
break
sumTailRanks += rank
if False == found:
data += str(o) + "\n"
#print ("For ", str(s) , ", ", str(p), " subgraph rank(o) = " , rank, " expected o = ", o)
# rank could be 0 which leads to a possible divide by 0 error
ppos['tail'].append(rank + 1)
ocrr = ccorr(mdl.R[p], mdl.E[o])
scores_s = np.dot(SR, ocrr).flatten()
#print(scores_s)
#scores_o should contain scores for each subgraph using dot product
sortidx_s = argsort(scores_s)[::-1]
# sortidx_o has the indices for sorted subgraph scores
# Choose topk from this and find out if the answer lies in any of these subgraphs
found = False
for rank, index in enumerate(sortidx_s):
if s in subgraphs[index].entities:
found = True
break
sumHeadRanks += rank
total += 1
if False == found:
data += str(s) + "\n"
#print ("For ", str(o) , ", ", str(p), " subgraph rank(s) = " , rank, " expected s = ", s)
# rank could be 0 which leads to a possible divide by 0 error
ppos['head'].append(rank + 1)
pos[p] = ppos
print("Mean tail rank = ", sumTailRanks / total)
print("Mean head rank = ", sumHeadRanks / total)
with open(failfile, "w") as fout:
fout.write(data)
return pos
def positions(self, mdl, plot=False, pagerankMap=None):
pos = {}
fpos = {}
if hasattr(self, 'prepare_global'):
self.prepare_global(mdl)
count = 0
for p, sos in self.idx.items():
#pdb.set_trace()
# There will be just one item in the idx dictionary in case of the un-labelled graph (single-relation graph)
# So, there will be just one iteration of outer for loop
# f stands for filtered
# For unfiltered evaluation, we consider all entities to compute scores with
# For filtered evaluation, we exclude the neighbours of the entity to compute scores with
# p might stand for predicate
# ppos = positions for predicates, where
# dictionary with 'head' as the key, will store positions of T after keeping H and P constant
# dictionary with 'tail' as the key, will store positions of H after keeping T and P constant
ppos = {'head': [], 'tail': []}
pfpos = {'head': [], 'tail': []}
# prepare() method adds embeddings of p to embeddings of every entity
if hasattr(self, 'prepare'):
#pdb.set_trace()
# Add the embeddings of p to every entity of this model
self.prepare(mdl, p)
#log.info("Prepared\n")
# For some reason, skip last tuple from all the tuples for relation 'P'
# neval for every relation is -1
# self.neval[p] will access the last element and we are skipping the last one by
# array[:-1]
#log.info("sos len = %d" % (len(sos)))
for s, o in sos:#[:self.neval[p]]:
count += 1
scores_o = self.scores_o(mdl, s, p).flatten()
sortidx_o = argsort(scores_o)[::-1]
# Sort all the entities (As objects) and find out the index of the "O" in picture
# Store the index+1 in the ppos['tail]
rank = np.where(sortidx_o == o)[0][0] + 1
ppos['tail'].append(rank)
if plot:
inDegree_of_o = num_incoming_neighbours(o, graph)
outDegree_of_o = num_outgoing_neighbours(o, graph)
totalDegree_of_o = inDegree_of_o + outDegree_of_o
inRelations = num_incoming_relations(o, graph)
if pagerankMap:
with open(_FILE_TAIL_PREDICTIONS_UNFILTERED, 'a') as fplot:
fplot.write("%f %d %d %d\n" % (float(pagerankMap[o]) * 100000, 1 if rank <= 10 else 2, totalDegree_of_o, inRelations))
else:
with open(_FILE_TAIL_PREDICTIONS_UNFILTERED, 'a') as fplot:
fplot.write("%d %d %d %d\n" % (inDegree_of_o, 1 if rank <= 10 else 2, totalDegree_of_o, inRelations))
# In the real data, for relation "P", which entities appear as objects for subject "S"
rm_idx = self.tt[p]['os'][s]
# rm_idx is the list of such entities
# Remove the object "O" that we are currently considering from this list
rm_idx = [i for i in rm_idx if i != o]
# Set the scores of KNOWN objects (known truths) to infinity = Filter the entities that already appear as neighbours
scores_o[rm_idx] = -np.Inf
sortidx_o = argsort(scores_o)[::-1]
rank = np.where(sortidx_o == o)[0][0] + 1
pfpos['tail'].append(rank)
if plot:
if pagerankMap:
with open(_FILE_TAIL_PREDICTIONS_FILTERED, 'a') as fplot:
fplot.write("%f %d %d %d\n" % (float(pagerankMap[o]) * 100000, 1 if rank <= 10 else 2, totalDegree_of_o, inRelations))
else:
with open(_FILE_TAIL_PREDICTIONS_FILTERED, 'a') as fplot:
fplot.write("%d %d %d %d\n" % (inDegree_of_o, 1 if rank <= 10 else 2, totalDegree_of_o, inRelations))
################ HEAD predictions : Keep TAIL/OBJECT constant #######################
# Unfiltered scores: calculate scores with all entities and sort them
scores_s = self.scores_s(mdl, o, p).flatten()
sortidx_s = argsort(scores_s)[::-1]
rank = np.where(sortidx_s == s)[0][0] + 1
ppos['head'].append(rank)
if plot:
outDegree_of_s = num_outgoing_neighbours(s, graph)
inDegree_of_s= num_incoming_neighbours(s, graph)
totalDegree_of_s = outDegree_of_s + inDegree_of_s
outRelations = num_outgoing_relations(s, graph)
# If pagerank file is provided, write the pagerank of the node instead of the degree
if pagerankMap:
with open(_FILE_HEAD_PREDICTIONS_UNFILTERED, 'a') as fplot:
fplot.write("%f %d %d %d\n" % (float(pagerankMap[s]) * 100000, 1 if rank <= 10 else 2, totalDegree_of_s, outRelations))
else:
with open(_FILE_HEAD_PREDICTIONS_UNFILTERED, 'a') as fplot:
fplot.write("%d %d %d %d\n" % (outDegree_of_s, 1 if rank <= 10 else 2, totalDegree_of_s, outRelations))
rm_idx = self.tt[p]['ss'][o]
rm_idx = [i for i in rm_idx if i != s]
scores_s[rm_idx] = -np.Inf
sortidx_s = argsort(scores_s)[::-1]
rank = np.where(sortidx_s == s)[0][0] + 1
pfpos['head'].append(rank)
if plot:
# If pagerank file is provided, write the pagerank of the node instead of the degree
if pagerankMap:
with open(_FILE_HEAD_PREDICTIONS_FILTERED, 'a') as fplot:
fplot.write("%f %d %d %d\n" % (float(pagerankMap[s]) * 100000, 1 if rank <= 10 else 2, totalDegree_of_s, outRelations))
else:
with open(_FILE_HEAD_PREDICTIONS_FILTERED, 'a') as fplot:
fplot.write("%d %d %d %d\n" % (outDegree_of_s, 1 if rank <= 10 else 2, totalDegree_of_s, outRelations))
pos[p] = ppos
fpos[p] = pfpos
if count != self.sz:
log.info("cnt = %d, self.sz = %d" % (count, self.sz))
return pos, fpos
class LinkPredictionEval(object):
def __init__(self, xs, ys):
ss, os, ps = list(zip(*xs))
self.ss = list(ss)
self.ps = list(ps)
self.os = list(os)
self.ys = ys
def scores(self, mdl):
scores = mdl._scores(self.ss, self.ps, self.os)
pr, rc, _ = precision_recall_curve(self.ys, scores)
roc = roc_auc_score(self.ys, scores)
return auc(rc, pr), roc
def ranking_scores(fresult, pos, fpos, epoch, txt):
hpos = [p for k in pos.keys() for p in pos[k]['head']]
tpos = [p for k in pos.keys() for p in pos[k]['tail']]
fhpos = [p for k in fpos.keys() for p in fpos[k]['head']]
ftpos = [p for k in fpos.keys() for p in fpos[k]['tail']]
fmrr = _print_pos(fresult,
np.array(hpos + tpos),
np.array(fhpos + ftpos),
epoch, txt)
return fmrr
def subgraph_ranking_scores(fresult, pos, txt, topk):
hpos = [p for k in pos.keys() for p in pos[k]['head']]
tpos = [p for k in pos.keys() for p in pos[k]['tail']]
head_mrr, head_mean_pos, head_ans_hits = compute_scores(np.array(hpos), hits=topk)
tail_mrr, tail_mean_pos, tail_ans_hits = compute_scores(np.array(tpos), hits=topk)
log.info("Subgraph ranking scores : ")
log.info(
"%s: MRR(H) = %.2f, Mean Rank(H) = %.2f, Hits@%d(H) = %.2f" %
(txt, head_mrr, head_mean_pos, topk, head_ans_hits )
)
log.info(
"%s: MRR(T) = %.2f, Mean Rank(T) = %.2f, Hits@%d(T) = %.2f" %
(txt, tail_mrr, tail_mean_pos, topk, tail_ans_hits )
)
fresult.write(
"%s: MRR(H) = %.2f, Mean Rank(H) = %.2f, Hits@%d(H) = %.2f\n" %
(txt, head_mrr, head_mean_pos, topk, head_ans_hits)
)
fresult.write(
"%s: MRR(T) = %.2f, Mean Rank(T) = %.2f, Hits@%d(T) = %.2f\n" %
(txt, tail_mrr, tail_mean_pos, topk, tail_ans_hits)
)
def _print_pos(fresult, pos, fpos, epoch, txt):
mrr, mean_pos, hits = compute_scores(pos)
fmrr, fmean_pos, fhits = compute_scores(fpos)
log.info(
"[%3d] %s: MRR = %.2f/%.2f, Mean Rank = %.2f/%.2f, Hits@10 = %.2f/%.2f" %
(epoch, txt, mrr, fmrr, mean_pos, fmean_pos, hits, fhits)
)
fresult.write(
"[%3d] %s: MRR = %.2f/%.2f, Mean Rank = %.2f/%.2f, Hits@10 = %.2f/%.2f\n" %
(epoch, txt, mrr, fmrr, mean_pos, fmean_pos, hits, fhits)
)
return fmrr
def compute_scores(pos, hits=10):
mrr = np.mean(1.0 / pos)
mean_pos = np.mean(pos)
ans_hits = np.mean(pos <= hits).sum() * 100
return mrr, mean_pos, ans_hits
def cardinalities(xs, ys, sz):
T = to_tensor(xs, ys, sz)
c_head = []
c_tail = []
for Ti in T:
sh = Ti.tocsr().sum(axis=1)
st = Ti.tocsc().sum(axis=0)
c_head.append(sh[np.where(sh)].mean())
c_tail.append(st[np.where(st)].mean())
cards = {'1-1': [], '1-N': [], 'M-1': [], 'M-N': []}
for k in range(sz[2]):
if c_head[k] < 1.5 and c_tail[k] < 1.5:
cards['1-1'].append(k)
elif c_head[k] < 1.5:
cards['1-N'].append(k)
elif c_tail[k] < 1.5:
cards['M-1'].append(k)
else:
cards['M-N'].append(k)
return cards
class Config(object):
def __init__(self, model, trainer):
self.model = model
self.trainer = trainer
def __getstate__(self):
return {
'model': self.model,
'trainer': self.trainer
}
class Model(object):
"""
Base class for all Knowledge Graph models
Implements basic setup routines for parameters and serialization methods
Subclasses need to implement:
- scores(self, ss, ps, os)
- _gradients(self, xys) for StochasticTrainer
- _pairwise_gradients(self, pxs, nxs) for PairwiseStochasticTrainer
C++ : Use virtual functions, make the Model class abstract by having pure virtual functions
"""
def __init__(self, *args, **kwargs):
#super(Model, self).__init__(*args, **)
self.params = {}
self.hyperparams = {}
# C++ : No named parameters. Emulate.
self.add_hyperparam('init', kwargs.pop('init', 'nunif'))
def add_param(self, param_id, shape, post=None, value=None):
if value is None:
value = Parameter(shape, self.init, name=param_id, post=post)
setattr(self, param_id, value)
self.params[param_id] = value
def add_hyperparam(self, param_id, value):
setattr(self, param_id, value)
self.hyperparams[param_id] = value
def __getstate__(self):
return {
'hyperparams': self.hyperparams,
'params': self.params
}
def __setstate__(self, st):
self.params = {}
self.hyperparams = {}
for pid, p in st['params'].items():
self.add_param(pid, None, None, value=p)
for pid, p in st['hyperparams'].items():
self.add_hyperparam(pid, p)
def save(self, fname, protocol=pickle.HIGHEST_PROTOCOL):
with open(fname, 'wb') as fout:
pickle.dump(self, fout, protocol=protocol)
@staticmethod
def load(fname):
with open(fname, 'rb') as fin:
mdl = pickle.load(fin)
return mdl
class StochasticTrainer(object):
"""
Stochastic gradient descent trainer with scalar loss function.
Models need to implement
_gradients(self, xys)
to be trained with this class.
"""
def __init__(self, *args, **kwargs):
self.model = args[0]
self.hyperparams = {}
self.add_hyperparam('max_epochs', kwargs.pop('max_epochs', _DEF_MAX_EPOCHS))
self.add_hyperparam('nbatches', kwargs.pop('nbatches', _DEF_NBATCHES))
self.add_hyperparam('learning_rate', kwargs.pop('learning_rate', _DEF_LEARNING_RATE))
self.post_epoch = kwargs.pop('post_epoch', _DEF_POST_EPOCH)
self.samplef = kwargs.pop('samplef', _DEF_SAMPLE_FUN)
pu = kwargs.pop('param_update', AdaGrad)
self._updaters = {
key: pu(param, self.learning_rate)
for key, param in self.model.params.items()
}
def set_max_epochs(self, epoch):
self.max_epochs = epoch
def __getstate__(self):
return self.hyperparams
def __setstate__(self, st):
for pid, p in st['hyperparams']:
self.add_hyperparam(pid, p)
def add_hyperparam(self, param_id, value):
setattr(self, param_id, value)
self.hyperparams[param_id] = value
def fit(self, xs, ys):
self._optim(list(zip(xs, ys)))
def _pre_epoch(self):
self.loss = 0
def _optim(self, xys):
# idx = [0,1,2,...., k] where len(xys) = k
idx = np.arange(len(xys))
#pdb.set_trace();
self.batch_size = len(xys) // self.nbatches
#print (type(self.batch_size))
#print(type(xys))
# A-range (start, stop, jump)
# For batch size 10 and nbatches 100 and len(xys) = 1000
# batch_idx = [10,20,30,40,....100,110,....990,1000]
batch_idx = np.arange(self.batch_size, len(xys), self.batch_size)
#pdb.set_trace()
for self.epoch in range(1, self.max_epochs + 1):
# shuffle training examples
self._pre_epoch()
shuffle(idx)
# store epoch for callback
self.epoch_start = timeit.default_timer()
# process mini-batches
# Split the array idx by indexes given in batch_idx
# batch_idx contains [1414, 2828, 4242, 5656, 7070,...]
# Thus, batch will contain array of 1414 elements each time
# entities with ids 0-1413, 1414-2827, 2828-4241 etc.
#log.info("%d) " % self.epoch)
for batch in np.split(idx, batch_idx):
'''
xys is array of tuple pairs as follows
((S1, O1, P1), 1.0 )
((S2, O2, P2), 1.0 )
((S3, O3, P3), 1.0 )
..
..
((Sn, On, Pn), 1.0 )
xys[index] will access one of these pairs.
xys[index][0] will access the triplet.
xys[index][0][0] will access the subject entity.
'''
#log.info("length of minibatch[%d] " % len(batch))
bxys = [xys[z] for z in batch]
self._process_batch(bxys)
# check callback function, if false return
# post_epoch is the self.callback. It was set in setup_trainer() method
# of TransEExp
for f in self.post_epoch:
if not f(self):
break
def _process_batch(self, xys):
# if enabled, sample additional examples
if self.samplef is not None:
xys += self.samplef(xys)
if hasattr(self.model, '_prepare_batch_step'):
self.model._prepare_batch_step(xys)
# take step for batch
grads = self.model._gradients(xys)
self.loss += self.model.loss
self._batch_step(grads)
def _batch_step(self, grads):
for paramID in self._updaters.keys():
#pdb.set_trace();
# *grads[paramID] unpacks argument list when calling the function, in this case CTOR
# Because _updaters is a dictionary
# _updaters[param] will be the value of type AdaGrad
# AdaGrad is subclass of ParameterUpdate
# ParameterUpdate class has a __call__ method
# This method is called when the instance of ParameterUpdate is called.
# C++ : functors, overload operator()
self._updaters[paramID](*grads[paramID])
#pdb.set_trace();
class PairwiseStochasticTrainer(StochasticTrainer):
"""
Stochastic gradient descent trainer with pairwise ranking loss functions.
Models need to implement
_pairwise_gradients(self, pxs, nxs)
to be trained with this class.
"""
def __init__(self, *args, **kwargs):
super(PairwiseStochasticTrainer, self).__init__(*args, **kwargs)
self.model.add_hyperparam('margin', kwargs.pop('margin', _DEF_MARGIN))
fg = kwargs.pop('file_grad', _FILE_GRADIENTS)
fe = kwargs.pop('file_embed', _FILE_EMBEDDINGS)
self.file_gradients = None
self.file_embeddings = None
self.pickle_file_embeddings = None
if fg is not None:
self.file_gradients = open(fg, "w")
if fe is not None:
self.file_embeddings = open(fe, "w")
self.pickle_file_embeddings = open(fe+".pkl", "wb")
def fit(self, xs, ys):
# samplef is RandomModeSample set by setup_trainer() method
if self.samplef is None:
pidx = np.where(np.array(ys) == 1)[0]
nidx = np.where( | np.array(ys) | numpy.array |
import numpy as np
import tensorflow as tf
from kerascv.layers.anchor_generators.anchor_generator import AnchorGenerator
from kerascv.layers.matchers.greedy_bipartite import target_assign_func
from kerascv.layers.matchers.greedy_bipartite import target_assign_tf_func
def test_single_gt_best_match():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.2],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
ground_truth_boxes = tf.constant([[0.14, 0.64, 0.34, 0.84]])
ground_truth_labels = tf.constant([[8]])
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes, ground_truth_labels, anchors
)
expected_matched_gt_boxes = np.asarray(
[anchors[0, :], ground_truth_boxes[0, :], anchors[2, :], anchors[3, :]]
)
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.zeros((4, 1))
expected_matched_gt_labels[1] = ground_truth_labels[0]
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([0, 1, 0, 0]).astype(np.int)
expected_negative_mask = np.asarray([1, 0, 1, 1]).astype(np.int)
np.testing.assert_equal(expected_positive_mask, positive_mask)
np.testing.assert_equal(expected_negative_mask, negative_mask)
def test_single_gt_no_intersect():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.2],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
ground_truth_boxes = tf.constant([[0.4, 0.65, 0.6, 0.85]])
ground_truth_labels = tf.constant([[8]])
# Since it does not intersect with any anchor, it will be matched with the first gt.
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes, ground_truth_labels, anchors
)
expected_matched_gt_boxes = np.asarray(
[ground_truth_boxes[0, :], anchors[1, :], anchors[2, :], anchors[3, :]]
)
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.zeros((4, 1))
expected_matched_gt_labels[0] = ground_truth_labels[0]
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([1, 0, 0, 0]).astype(np.int)
expected_negative_mask = np.asarray([0, 1, 1, 1]).astype(np.int)
np.testing.assert_equal(expected_positive_mask, positive_mask)
np.testing.assert_equal(expected_negative_mask, negative_mask)
def test_single_gt_single_match_single_neutral():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.5],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
ground_truth_boxes = tf.constant([[0.24, 0.5, 0.74, 1.0]])
ground_truth_labels = tf.constant([[8]])
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes, ground_truth_labels, anchors
)
expected_matched_gt_boxes = np.asarray(
[anchors[0, :], ground_truth_boxes[0, :], anchors[2, :], anchors[3, :]]
)
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.zeros((4, 1))
expected_matched_gt_labels[1] = ground_truth_labels[0]
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([0, 1, 0, 0]).astype(np.int)
expected_negative_mask = np.asarray([1, 0, 1, 0]).astype(np.int)
np.testing.assert_equal(expected_positive_mask, positive_mask)
np.testing.assert_equal(expected_negative_mask, negative_mask)
def test_single_gt_single_match_zero_neutral():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.5],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
ground_truth_boxes = tf.constant([[0.24, 0.5, 0.74, 1.0]])
ground_truth_labels = tf.constant([[8]])
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes, ground_truth_labels, anchors, negative_iou_threshold=1 / 3
)
expected_matched_gt_boxes = np.asarray(
[anchors[0, :], ground_truth_boxes[0, :], anchors[2, :], anchors[3, :]]
)
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.zeros((4, 1))
expected_matched_gt_labels[1] = ground_truth_labels[0]
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([0, 1, 0, 0]).astype(np.int)
expected_negative_mask = np.asarray([1, 0, 1, 1]).astype(np.int)
np.testing.assert_equal(expected_positive_mask, positive_mask)
np.testing.assert_equal(expected_negative_mask, negative_mask)
def test_single_gt_four_match():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.5],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
ground_truth_boxes = tf.constant([[0.25, 0.25, 0.75, 0.75]])
ground_truth_labels = tf.constant([[8]])
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes,
ground_truth_labels,
anchors,
positive_iou_threshold=1 / 7,
negative_iou_threshold=1 / 8,
)
expected_matched_gt_boxes = np.tile(ground_truth_boxes, (4, 1))
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.tile(ground_truth_labels, (4, 1))
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([1, 1, 1, 1]).astype(np.int)
expected_negative_mask = np.asarray([0, 0, 0, 0]).astype(np.int)
np.testing.assert_equal(expected_positive_mask, positive_mask)
np.testing.assert_equal(expected_negative_mask, negative_mask)
def test_single_gt_single_match_three_negative():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.5],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
ground_truth_boxes = tf.constant([[0.25, 0.25, 0.75, 0.75]])
ground_truth_labels = tf.constant([[8]])
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes, ground_truth_labels, anchors
)
expected_matched_gt_boxes = np.asarray(
[ground_truth_boxes[0, :], anchors[1, :], anchors[2, :], anchors[3, :]]
)
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.zeros((4, 1))
expected_matched_gt_labels[0] = ground_truth_labels[0]
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([1, 0, 0, 0]).astype(np.int)
expected_negative_mask = np.asarray([0, 1, 1, 1]).astype(np.int)
np.testing.assert_equal(expected_positive_mask, positive_mask)
np.testing.assert_equal(expected_negative_mask, negative_mask)
def test_single_gt_single_match_three_neutral():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.5],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
ground_truth_boxes = tf.constant([[0.25, 0.25, 0.75, 0.75]])
ground_truth_labels = tf.constant([[8]])
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes, ground_truth_labels, anchors, negative_iou_threshold=1 / 7
)
expected_matched_gt_boxes = np.asarray(
[ground_truth_boxes[0, :], anchors[1, :], anchors[2, :], anchors[3, :]]
)
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.zeros((4, 1))
expected_matched_gt_labels[0] = ground_truth_labels[0]
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([1, 0, 0, 0]).astype(np.int)
expected_negative_mask = np.asarray([0, 0, 0, 0]).astype(np.int)
np.testing.assert_equal(expected_positive_mask, positive_mask)
np.testing.assert_equal(expected_negative_mask, negative_mask)
def test_two_gt_two_matches():
anchor_gen = AnchorGenerator(
image_size=(300, 300),
scales=[0.2],
aspect_ratios=[1.0],
clip_boxes=False,
normalize_coordinates=True,
)
anchors = anchor_gen((2, 2))
# The first box will be matched to the second anchor
# The second box will be matched to the first anchor
ground_truth_boxes = tf.constant([
[0.15, 0.65, 0.35, 0.85],
[0.14, 0.64, 0.34, 0.84],
])
ground_truth_labels = tf.constant([[8], [6]])
matched_gt_boxes, matched_gt_labels, positive_mask, negative_mask = target_assign_func(
ground_truth_boxes, ground_truth_labels, anchors
)
expected_matched_gt_boxes = np.asarray(
[ground_truth_boxes[1, :], ground_truth_boxes[0, :], anchors[2, :], anchors[3, :]]
)
np.testing.assert_allclose(expected_matched_gt_boxes, matched_gt_boxes)
expected_matched_gt_labels = np.zeros((4, 1))
expected_matched_gt_labels[1] = ground_truth_labels[0]
expected_matched_gt_labels[0] = ground_truth_labels[1]
np.testing.assert_allclose(expected_matched_gt_labels, matched_gt_labels)
expected_positive_mask = np.asarray([1, 1, 0, 0]).astype(np.int)
expected_negative_mask = np.asarray([0, 0, 1, 1]).astype(np.int)
| np.testing.assert_equal(expected_positive_mask, positive_mask) | numpy.testing.assert_equal |
import os
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_validate
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.linear_model import Lasso, LassoCV, LogisticRegressionCV, LogisticRegression
from sklearn.linear_model import ElasticNet, ElasticNetCV, enet_path
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import auc, roc_curve
from utils import kernel
from mics import classifier_mics
'''
函数名尽量保持了和scikit-learn相同的函数名,便于理解函数的作用
没有写留一法实现的函数,如果要用留一法直接在K折交叉验证参数中将折数设置为样本个数即实现了留一法(scikit-learn官方文件推荐)
不推荐在网格搜索法中使用留一法,当待选参数较多时会让模型开销极大
'''
class lasso():
'''LASSO特征选择的方法集锦,直接在class中选择是否进行交叉验证
输入:
X_train, X_test, y_train, y_test: 训练集和测试集的特征与标签
feature_name: 特征名称,顺序和X的列必须对应
path: 记录文件的存储路径,自行定义
cv_val:布尔型,是否进行网格搜索交叉验证
'''
def __init__(self, X_train, X_test, y_train, y_test, feature_name, path, cv_val=True):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.name = feature_name
self.cv_val = cv_val
self.path = path
def lasso(self, alpha, cv):
'''使用LASSO进行特征选择,只进行一次,选择特征系数不为0的特征作为结果
得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的
输入:
alpha: 参数alpha
cv: int, 如果进行交叉验证,cv的折数
输出:
best_alpha(只有使用交叉验证时才有): 最优lasso惩罚参数
new_train_feature: 选择的训练集特征矩阵
new_test_feature: 选择后的测试集特征矩阵
new_feature_name: 选择后的特征名称
feature_weight: 选择后特征对应的系数
'''
if self.cv_val is True:
model_lasso = LassoCV(alphas=alpha, cv=cv)
model_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(model_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lassoCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = model_lasso.alpha_
print('-----------------------------')
print('Best LASSO alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(model_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
model_lasso = Lasso(alpha=alpha)
model_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(model_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lasso_only')
os.makedirs(img_path, exist_ok=True)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(model_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return new_train_feature, new_test_feature, new_feature_name, feature_weight
def lasso_shuffle(self, shuffle_time, alpha_range, cv=10):
'''通过多次循环,每次循环都将数据集进行打乱,最后统计每个特征出现的次数
输入:
shuffle_time: 进行shuffle循环的次数
alpha_range: alpha的值,如果不进行网格搜索为int,如果进行网格搜索为list
cv: 如果进行交叉验证的话,折数
输出:
new_train_feature: 特征选择后的训练集特征(其实这个和下面的特征矩阵不重要,最后还是要用索引重新对原始特征矩阵进行抽取)
new_test_feature: 特征选择后的测试集特征
select_feature_name: 选择出来的特征名
select_feature_name_freq: 对应特征名,每个特征在多次shuffle循环中出现的次数
feature_weight: 对应特征名,每个特征的系数
select_feature_index: 对应特征名,每个特征在原始特征矩阵中的索引,可以在特征选择完成后直接进行矩阵特征的抽取
'''
# 将返回的值存入txt文件中
lasso_txt = open(os.path.join(self.path, 'lasso_shuffle.txt'), 'w')
lasso_txt.write('LASSO parameters set:\n')
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('Grid search: % s' % self.cv_val)
lasso_txt.write('\nAlpha range: % s' % alpha_range)
lasso_txt.write('\nShuffle time: % s' % shuffle_time)
lasso_txt.write('\nGrid search cv-fold: % s' % cv)
lasso_txt.write('\n---------------------------------------------\n')
if self.cv_val is True:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 初始化最佳参数alpha
alpha_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
kfold = StratifiedKFold(n_splits=cv, shuffle=False)
model_lasso = LassoCV(alphas=alpha_range, cv=cv)
model_lasso.fit(X, y)
coef = pd.Series(model_lasso.coef_)
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 交叉验证得到的最佳lasso惩罚参数
alpha = model_lasso.alpha_
alpha_list.append(alpha)
print('best alpha value is % s' % alpha)
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数
alpha_value = []
alpha_value_freq = []
for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True):
# alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中
alpha_value_freq.append(alpha_freq[k])
# 将alpha的值存在alpha_value中,list形式
alpha_value.append(k)
print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k]))
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
else:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
model_lasso = Lasso(alpha=alpha_range)
model_lasso.fit(X, y)
coef = pd.Series(model_lasso.coef_)
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
def logis_lasso(self, alpha, cv):
'''使用logistic LASSO进行特征选择,可以选择是否使用交叉验证选择惩罚参数alpha
得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的
输入:
alpha: 惩罚参数,这里因为是LASSO所以就相当于是alpha
cv:如果进行交叉验证,次数
输出:
best alpha(只有使用交叉验证时才有): 最优lasso惩罚参数
new_train_feature: 训练集特征选择后的特征矩阵
new_train_feature: 测试集特征选择后的特征矩阵
new_feature_name: 特征选择后的特征名称
feature_weight: 选择后每个特征对应的权重
'''
if self.cv_val is True:
logis_lasso = LogisticRegressionCV(Cs=alpha, cv=cv, penalty='l1')
logis_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(np.ravel(logis_lasso.coef_))
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lassoCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = logis_lasso.Cs_
print('-----------------------------')
print('Best LASSO alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(logis_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
logis_lasso = LogisticRegression(C=alpha, penalty='l1')
logis_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(logis_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lasso_only')
os.makedirs(img_path, exist_ok=True)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(logis_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return new_train_feature, new_test_feature, new_feature_name, feature_weight
def logis_lasso_shuffle(self, alpha_range, shuffle_time=100, cv=10):
'''使用logistic lasso进行特征选择,通过多次循环,每次循环都将数据集进行打乱,最后统计每个特征出现的次数
输入:
shuffle_time: 进行shuffle循环的次数
alpha_range: alpha的值,如果不进行网格搜索为int,如果进行网格搜索为list
cv: 如果进行交叉验证的话,折数
输出:
new_train_feature: 特征选择后的训练集特征(其实这个和下面的特征矩阵不重要,最后还是要用索引重新对原始特征矩阵进行抽取)
new_test_feature: 特征选择后的测试集特征
select_feature_name: 选择出来的特征名
select_feature_name_freq: 对应特征名,每个特征在多次shuffle循环中出现的次数
feature_weight: 对应特征名,每个特征的系数
select_feature_index: 对应特征名,每个特征在原始特征矩阵中的索引,可以在特征选择完成后直接进行矩阵特征的抽取
'''
# 将返回的值存入txt文件中
lasso_txt = open(os.path.join(self.path, 'logistic lasso_shuffle.txt'), 'w')
lasso_txt.write('LASSO parameters set:\n')
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('Grid search: % s' % self.cv_val)
lasso_txt.write('\nAlpha range: % s' % alpha_range)
lasso_txt.write('\nShuffle time: % s' % shuffle_time)
lasso_txt.write('\nGrid search cv-fold: % s' % cv)
lasso_txt.write('\n---------------------------------------------\n')
if self.cv_val is True:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 初始化最佳参数alpha
alpha_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
kfold = StratifiedKFold(n_splits=cv, shuffle=False)
model_lasso = LogisticRegressionCV(Cs=alpha_range, cv=cv, penalty='l1')
model_lasso.fit(X, y)
coef = pd.Series(np.ravel(model_lasso.coef_))
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 交叉验证得到的最佳lasso惩罚参数
alpha = model_lasso.Cs_
alpha_list.append(alpha)
print('best alpha value is % s' % alpha)
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数
alpha_value = []
alpha_value_freq = []
for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True):
# alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中
alpha_value_freq.append(alpha_freq[k])
# 将alpha的值存在alpha_value中,list形式
alpha_value.append(k)
print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k]))
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
else:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
model_lasso = LogisticRegression(C=alpha_range, penalty='l1')
model_lasso.fit(X, y)
coef = pd.Series(np.ravel(model_lasso.coef_))
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
class elastic_net():
'''elastic net用于特征选择,可以选择组特征
输入:
X_train: 输入的训练集特征矩阵
X_test: 输入的测试集特征矩阵
y_train: 输入的训练集标签
y_test: 输入的测试集标签
feature_name: 特征矩阵对应的特征名
cv_val:布尔型,是否进行网格搜索交叉验证
path: 结果存储的路径
'''
def __init__(self, X_train, X_test, y_train, y_test, feature_name, cv_val, path):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.name = feature_name
self.cv_val = cv_val
self.path = path
def elastic_net(self, l1, alphas, cv):
if self.cv_val is True:
elas = ElasticNetCV(l1_ratio=l1, alphas=alphas, cv=cv)
elas.fit(self.X_train, self.y_train)
coef = pd.Series(elas.coef_)
print("Elastic Net picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'ElasticNetCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = elas.alpha_
best_l1_ratio = elas.l1_ratio_
best_coef = elas.coef_
best_alphas = elas.alphas_
best_mse_path = elas.mse_path_
print('-----------------------------')
print('Best Elastic Net alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(elas, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# print(X_new_test.shape)
# print(model.get_support())
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
elas = ElasticNet(l1_ratio=l1, alpha=alphas)
elas.fit(self.X_train, self.y_train)
coef = pd.Series(elas.coef_)
print("Elastic Net picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'ElasticNetCV')
os.makedirs(img_path, exist_ok=True)
coef1 = elas.coef_
sparse = elas.sparse_coef_
# 将elas中权重不为0的特征选择出来
model = SelectFromModel(elas, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return new_train_feature, new_test_feature, new_feature_name, feature_weight
def elasticnet_shuffle(self, l1_range, alphas_range, shuffle_time=100, cv=10, freq_seq=False):
'''通过多次shuffle循环来求特征的权重,最后通过每次循环被筛选特征出现的频率来选择
输入:
freq_seq: 是否根据每个特征出现的频率对特征排序,False使用原始特征顺序,只是抽调部分特征
'''
# 将返回的值存入txt文件中
elas_txt = open(os.path.join(self.path, 'elastic net_shuffle.txt'), 'w')
elas_txt.write('Elastic Net parameters set:\n')
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('Grid search: % s' % self.cv_val)
elas_txt.write('\nL1_ratio range: % s' % l1_range)
elas_txt.write('\nAlpha range: % s' % alphas_range)
elas_txt.write('\nShuffle time: % s' % shuffle_time)
elas_txt.write('\nGrid search cv-fold: % s' % cv)
elas_txt.write('\n---------------------------------------------\n')
if self.cv_val is True:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 初始化最佳参数alpha
alpha_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
kfold = StratifiedKFold(n_splits=cv, shuffle=False)
model_elas = ElasticNetCV(l1_ratio=l1_range, alphas=alphas_range, cv=cv)
model_elas.fit(X, y)
coef = pd.Series(model_elas.coef_)
print("% s th shuffle, Elastic net picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 交叉验证得到的最佳lasso惩罚参数
alpha = model_elas.alpha_
l1_ratio = model_elas.l1_ratio_
alphas = model_elas.alphas_
mse_path = model_elas.mse_path_
alpha_list.append(alpha)
print('best alpha value is % s' % alpha)
# 将每一次循环的coef都进行相加
coef_sum += model_elas.coef_
# 提取非零特征的mask
model = SelectFromModel(model_elas, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值,这里的平均值只是为了返回每个特征的权重均值,特征选择过程中不使用
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
alpha_freq = dict(zip(* | np.unique(alpha_list, return_counts=True) | numpy.unique |
import numpy as np
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox import bbox_overlaps, center_to_point, point_to_center
from mmdet.models.losses import weighted_l1
from .anchor_head import AnchorHead
from ..registry import HEADS
class YOLOv3Output(nn.Module):
def __init__(self,
in_channel,
anchors,
stride,
num_classes,
alloc_size=(128, 128)):
super(YOLOv3Output, self).__init__()
anchors = | np.array(anchors) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from scipy.linalg import toeplitz
from numpy.linalg import inv
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=False)
rc('mathtext', fontset='cm')
# auxiliar function for plot ticks of equal length in x and y axis despite its scales.
def convert_display_to_data_coordinates(transData, length=10):
# create a transform which will take from display to data coordinates
inv = transData.inverted()
# transform from display coordinates to data coordinates in x axis
data_coords = inv.transform([(0, 0), (length, 0)])
# get the length of the segment in data units
yticks_len = data_coords[1, 0] - data_coords[0, 0]
# transform from display coordinates to data coordinates in y axis
data_coords = inv.transform([(0, 0), (0, length)])
# get the length of the segment in data units
xticks_len = data_coords[1, 1] - data_coords[0, 1]
return xticks_len, yticks_len
#####################################
# PARAMETERS - This can be modified #
#####################################
# number of samples
N = 50
# autocorrelation parameters
r0 = 1.81
r2 = 0.9
#####################
# END OF PARAMETERS #
#####################
# abscissa values
xmin = 0
xmax = 0.5
num_samples = 2000
f = np.linspace(xmin, xmax, num_samples)
Pww = r0 + 2 * r2 * np.cos(4 * math.pi * f)
# autocorrelation vector
r = np.zeros((N,))
r[0] = r0
r[2] = r2
# covariance matrix
C = toeplitz(r)
C_inv = inv(C)
Ns = np.arange(N)
ns = np.arange(num_samples)
var_A = np.zeros((num_samples,))
for n in ns:
s = np.cos(2 * math.pi * f[n] * Ns)
var_A[n] = 1 / s.dot(C_inv.dot(s))
# axis parameters
dx = 0.04
xmin_ax = xmin - dx
xmax_ax = xmax + dx
dy = 0.6
ymax_ax = | np.amax(Pww) | numpy.amax |
from __future__ import print_function, division, absolute_import
import numpy as np
from scipy.misc import imsave, imread, imresize
from sklearn.feature_extraction.image import reconstruct_from_patches_2d, extract_patches_2d
from scipy.ndimage.filters import gaussian_filter
from keras import backend as K
import os
import time
'''
_image_scale_multiplier is a special variable which is used to alter image size.
The default image size is 32x32. If a true upscaling model is used, then the input image size is 16x16,
which not offer adequate training samples.
'''
_image_scale_multiplier = 1
img_size = 128 * _image_scale_multiplier
stride = 64 * _image_scale_multiplier
assert (img_size ** 2) % (stride ** 2) == 0, "Number of images generated from strided subsample of the image needs to be \n" \
"a positive integer. Change stride such that : \n" \
"(img_size ** 2) / (stride ** 2) is a positive integer."
input_path = r"D:\Yue\Documents\Datasets\train2014\train2014\\" # r"input_images/"
validation_path = r"val_images/" # r"D:\Yue\Documents\Datasets\MSCOCO\val\valset\\" # r"val_images/"
validation_set5_path = validation_path + "set5/"
validation_set14_path = validation_path + "set14/"
base_dataset_dir = os.path.expanduser("~") + "/Image Super Resolution Dataset/"
output_path = base_dataset_dir + "train_images/train/"
validation_output_path = base_dataset_dir + r"train_images/validation/"
if not os.path.exists(output_path):
os.makedirs(output_path)
# def transform_images(directory, output_directory, scaling_factor=2, max_nb_images=-1, true_upscale=False):
# index = 1
#
# if not os.path.exists(output_directory + "X/"):
# os.makedirs(output_directory + "X/")
#
# if not os.path.exists(output_directory + "y/"):
# os.makedirs(output_directory + "y/")
#
# # For each image in input_images directory
# nb_images = len([name for name in os.listdir(directory)])
#
# if max_nb_images != -1:
# print("Transforming %d images." % max_nb_images)
# else:
# assert max_nb_images <= nb_images, "Max number of images must be less than number of images in path"
# print("Transforming %d images." % (nb_images))
#
# if nb_images == 0:
# print("Extract the training images or images from imageset_91.zip (found in the releases of the project) "
# "into a directory with the name 'input_images'")
# print("Extract the validation images or images from set5_validation.zip (found in the releases of the project) "
# "into a directory with the name 'val_images'")
# exit()
#
# for file in os.listdir(directory):
# img = imread(directory + file, mode='RGB')
#
# # Resize to 256 x 256
# img = imresize(img, (img_size, img_size))
#
# # Create patches
# hr_patch_size = (16 * scaling_factor * _image_scale_multiplier)
# nb_hr_images = (img_size ** 2) // (stride ** 2)
#
# hr_samples = np.empty((nb_hr_images, hr_patch_size, hr_patch_size, 3))
#
# image_subsample_iterator = subimage_generator(img, stride, hr_patch_size, nb_hr_images)
#
# stride_range = np.sqrt(nb_hr_images).astype(int)
#
# i = 0
# for j in range(stride_range):
# for k in range(stride_range):
# hr_samples[i, :, :, :] = next(image_subsample_iterator)
# i += 1
#
# lr_patch_size = 16 * _image_scale_multiplier
#
# t1 = time.time()
# # Create nb_hr_images 'X' and 'Y' sub-images of size hr_patch_size for each patch
# for i in range(nb_hr_images):
# ip = hr_samples[i]
# # Save ground truth image X
# imsave(output_directory + "/y/" + "%d_%d.png" % (index, i + 1), ip)
#
# # Apply Gaussian Blur to Y
# op = gaussian_filter(ip, sigma=0.5)
#
# # Subsample by scaling factor to Y
# op = imresize(op, (lr_patch_size, lr_patch_size), interp='bicubic')
#
# if not true_upscale:
# # Upscale by scaling factor to Y
# op = imresize(op, (hr_patch_size, hr_patch_size), interp='bicubic')
#
# # Save Y
# imsave(output_directory + "/X/" + "%d_%d.png" % (index, i+1), op)
#
# print("Finished image %d in time %0.2f seconds. (%s)" % (index, time.time() - t1, file))
# index += 1
#
# if max_nb_images > 0 and index >= max_nb_images:
# print("Transformed maximum number of images. ")
# break
#
# print("Images transformed. Saved at directory : %s" % (output_directory))
def transform_images_temp(directory, output_directory, scaling_factor=2, max_nb_images=-1, true_upscale=False,
id_advance=0):
index = 1
if not os.path.exists(output_directory + "X/"):
os.makedirs(output_directory + "X/")
if not os.path.exists(output_directory + "y/"):
os.makedirs(output_directory + "y/")
# For each image in input_images directory
nb_images = len([name for name in os.listdir(directory)])
if max_nb_images != -1:
print("Transforming %d images." % max_nb_images)
else:
assert max_nb_images <= nb_images, "Max number of images must be less than number of images in path"
print("Transforming %d images." % (nb_images))
if nb_images == 0:
print("Extract the training images or images from imageset_91.zip (found in the releases of the project) "
"into a directory with the name 'input_images'")
print("Extract the validation images or images from set5_validation.zip (found in the releases of the project) "
"into a directory with the name 'val_images'")
exit()
for file in os.listdir(directory):
img = imread(directory + file, mode='RGB')
# Resize to 256 x 256
img = imresize(img, (img_size, img_size))
# Create patches
hr_patch_size = 64
lr_patch_size = 32
nb_hr_images = (img_size ** 2) // (stride ** 2)
hr_samples = np.empty((nb_hr_images, hr_patch_size, hr_patch_size, 3))
image_subsample_iterator = subimage_generator(img, stride, hr_patch_size, nb_hr_images)
stride_range = np.sqrt(nb_hr_images).astype(int)
i = 0
for j in range(stride_range):
for k in range(stride_range):
hr_samples[i, :, :, :] = next(image_subsample_iterator)
i += 1
t1 = time.time()
# Create nb_hr_images 'X' and 'Y' sub-images of size hr_patch_size for each patch
for i in range(nb_hr_images):
ip = hr_samples[i]
# Save ground truth image X
imsave(output_directory + "/y/" + "%d_%d.png" % (index + id_advance, i + 1), ip)
# Apply Gaussian Blur to Y
#op = gaussian_filter(ip, sigma=0.5)
# Subsample by scaling factor to Y
op = imresize(ip, (lr_patch_size, lr_patch_size), interp='bicubic')
if not true_upscale:
# Upscale by scaling factor to Y
op = imresize(op, (hr_patch_size, hr_patch_size), interp='bicubic')
# Save Y
imsave(output_directory + "/X/" + "%d_%d.png" % (index + id_advance, id_advance + i + 1), op)
print("Finished image %d in time %0.2f seconds. (%s)" % (index + id_advance, time.time() - t1, file))
index += 1
if max_nb_images > 0 and index >= max_nb_images:
print("Transformed maximum number of images. ")
break
print("Images transformed. Saved at directory : %s" % (output_directory))
def image_count():
return len([name for name in os.listdir(output_path + "X/")])
def val_image_count():
return len([name for name in os.listdir(validation_output_path + "X/")])
def subimage_generator(img, stride, patch_size, nb_hr_images):
for _ in range(nb_hr_images):
for x in range(0, img_size, stride):
for y in range(0, img_size, stride):
subimage = img[x : x + patch_size, y : y + patch_size, :]
yield subimage
def make_patches(x, scale, patch_size, upscale=True, verbose=1):
'''x shape: (num_channels, rows, cols)'''
height, width = x.shape[:2]
if upscale: x = imresize(x, (height * scale, width * scale))
patches = extract_patches_2d(x, (patch_size, patch_size))
return patches
def combine_patches(in_patches, out_shape, scale):
'''Reconstruct an image from these `patches`'''
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon
def image_generator(directory, scale_factor=2, target_shape=None, channels=3, small_train_images=False, shuffle=True,
batch_size=32, nb_inputs=1, seed=None):
if not target_shape:
if small_train_images:
if K.image_dim_ordering() == "th":
image_shape = (channels, 16 * _image_scale_multiplier, 16 * _image_scale_multiplier)
y_image_shape = (channels, 16 * scale_factor * _image_scale_multiplier,
16 * scale_factor * _image_scale_multiplier)
else:
# image_shape = (16 * _image_scale_multiplier, 16 * _image_scale_multiplier, channels)
# y_image_shape = (16 * scale_factor * _image_scale_multiplier,
# 16 * scale_factor * _image_scale_multiplier, channels)
image_shape = (32 * _image_scale_multiplier, 32 * _image_scale_multiplier, channels)
y_image_shape = (32 * scale_factor * _image_scale_multiplier,
32 * scale_factor * _image_scale_multiplier, channels)
else:
if K.image_dim_ordering() == "th":
image_shape = (channels, 32 * scale_factor * _image_scale_multiplier, 32 * scale_factor * _image_scale_multiplier)
y_image_shape = image_shape
else:
image_shape = (32 * scale_factor * _image_scale_multiplier, 32 * scale_factor * _image_scale_multiplier,
channels)
y_image_shape = image_shape
else:
if small_train_images:
if K.image_dim_ordering() == "th":
y_image_shape = (3,) + target_shape
target_shape = (target_shape[0] * _image_scale_multiplier // scale_factor,
target_shape[1] * _image_scale_multiplier // scale_factor)
image_shape = (3,) + target_shape
else:
y_image_shape = target_shape + (channels,)
target_shape = (target_shape[0] * _image_scale_multiplier // scale_factor,
target_shape[1] * _image_scale_multiplier // scale_factor)
image_shape = target_shape + (channels,)
else:
if K.image_dim_ordering() == "th":
image_shape = (channels,) + target_shape
y_image_shape = image_shape
else:
image_shape = target_shape + (channels,)
y_image_shape = image_shape
file_names = [f for f in sorted(os.listdir(directory + "X/"))]
X_filenames = [os.path.join(directory, "X", f) for f in file_names]
y_filenames = [os.path.join(directory, "y", f) for f in file_names]
nb_images = len(file_names)
print("Found %d images." % nb_images)
index_generator = _index_generator(nb_images, batch_size, shuffle, seed)
while 1:
index_array, current_index, current_batch_size = next(index_generator)
batch_x = np.zeros((current_batch_size,) + image_shape)
batch_y = np.zeros((current_batch_size,) + y_image_shape)
for i, j in enumerate(index_array):
x_fn = X_filenames[j]
img = imread(x_fn, mode='RGB')
if small_train_images:
img = imresize(img, (32 * _image_scale_multiplier, 32 * _image_scale_multiplier))
img = img.astype('float32') / 255.
if K.image_dim_ordering() == "th":
batch_x[i] = img.transpose((2, 0, 1))
else:
batch_x[i] = img
y_fn = y_filenames[j]
img = imread(y_fn, mode="RGB")
img = img.astype('float32') / 255.
if K.image_dim_ordering() == "th":
batch_y[i] = img.transpose((2, 0, 1))
else:
batch_y[i] = img
if nb_inputs == 1:
yield (batch_x, batch_y)
else:
batch_x = [batch_x for i in range(nb_inputs)]
yield batch_x, batch_y
def _index_generator(N, batch_size=32, shuffle=True, seed=None):
batch_index = 0
total_batches_seen = 0
while 1:
if seed is not None:
np.random.seed(seed + total_batches_seen)
if batch_index == 0:
index_array = | np.arange(N) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 13:40:56 2015
Line stores a start and end points of a line. The module also provides many of
the important line checking fucntions such as hceking intersection and offsets.
Line start/end points are immutable but the extrusion rate and freezeExRate
can be changed.
@author: lvanhulle
"""
import point as p
import numpy as np
import constants as c
import time
logger = c.logging.getLogger(__name__)
class Line(object):
def __init__(self, start, end, oldLine = None):
"""
Takes in the start and end points of a line plus an optional "oldLine".
oldLine is used when a new line is created from an exhisting line and
the operator wants the new line to have the same extrusion properties
as the exhisting line. This is used for all of the line transformations.
"""
self.__start = start
self.__end = end
if(self.__start == self.__end):
"""
If a zero length line is created that most likely means there is a
logic problem somewhere in the program. This does not throw and error
so that the output can still be examined to help diagnose the problem.
"""
# raise Exception('Zero length line')
logger.warning('A line was created with no length at: ' +
str(self.start))
""" The Point which is the upper left corner of the line's bounding box """
self.__upperLeft = None
""" The Point of the lower right corner of the bounding box. """
self.__lowerRight = None
if oldLine is not None:
self.extrusionFactor = oldLine.extrusionFactor
else:
self.extrusionFactor = None
self.vector = np.array([self.end.x-self.start.x,
self.end.y-self.start.y])
@property
def upperLeft(self):
if self.__upperLeft is None:
tempList = [[self.start.x, self.end.x],
[self.start.y, self.end.y]]
for row in tempList:
row.sort()
self.__upperLeft = p.Point(tempList[0][0], tempList[1][1])
self.__lowerRight = p.Point(tempList[0][1], tempList[1][0])
return self.__upperLeft
@property
def lowerRight(self):
if self.__lowerRight is None:
tempList = [[self.start.x, self.end.x],
[self.start.y, self.end.y]]
for row in tempList:
row.sort()
self.__upperLeft = p.Point(tempList[0][0], tempList[1][1])
self.__lowerRight = p.Point(tempList[0][1], tempList[1][0])
return self.__lowerRight
@property
def start(self):
return self.__start
@property
def end(self):
return self.__end
@property
def length(self):
return self.start - self.end
def __iter__(self):
yield self.start
yield self.end
@property
def angle(self):
angle = np.arctan2(self.end.y-self.start.y, self.end.x-self.start.x)
return angle if angle >= 0 else angle + 2*np.pi
def calcT(self, point):
""" Returns a constant representing point's location along self.
The point is assumed to be on self. T is the constant along self
where point is located with start being 0, end being 1, <0 is behind
the line and >0 is past the line segment
Parameters
----------
point - The test point.
Return
------
A constant representing point's location along self.
"""
index = np.argmax(np.abs(self.start.normalVector[:2]-self.end.normalVector[:2]))
return (point[index] - self.start[index])/(self.end[index]-self.start[index])
def areParallel(self, line):
"""
returns True if the two lines are parallel
This method tests if two lines are parallel by finding the angle
between the perpendicular vector of the first line and the second line.
If the dot product between perpVect and the vect of line2 is zero then
line1 and line2 are parallel. Farin and Hansford recommend checking within
a physically meaningful tolerance so equation 3.14 from pg 50 of
Farin-Hansford Geometry Toolbox is used to compute the cosine of the angle
and compare that to our ANGLE_EPS. If cosTheda < ANGLE_EPS then the lines
are parallel.
Parameters
----------
line1 - the first line
line2 - the second line
Return
------
True if lines are parallel within ANGLE_EPS else False
"""
# A vector perpendicular to line1
perpVect = np.array([-self.vector[c.Y], self.vector[c.X]])
# Farin-Hansford eq 3.14
cosTheda = (np.dot(perpVect, line.vector)/
(np.linalg.norm(perpVect)*np.linalg.norm(line.vector)))
# if cosTheda is < c.EPSILON then the lines are parallel and we return True
return abs(cosTheda) < c.EPSILON
def segmentsIntersect(self, other, allowProjInt = False):
"""
Probably the most important method in the Line module. This tests to
see if two line segments intersect and returns a tuple containing
a number code for the type of intersection and the Point of intersection
or projected point of intersection if there is one and it was allowed.
The calculation of t and u is stable but testing to make sure that the
point in on the line is the difficult part of this method. A better
solution should probably be found.
-3 = bounding boxes do not intersect
3 = lines were colinear and shared an end point
3 = lines were colinear and did not share an end point
-1 = Projected intersection of non-colinear lines
1 = intersection of non-colinear lines
"""
"""
If we are not allowing projected intersection and the bounding boxes
do not intersect then return -3, None.
"""
if(not(allowProjInt) and not(self.doBoundingBoxesIntersect(other))): return -3, None #return if bounding boxes do not intersect
""" A special case for colinear lines. """
if(self.areColinear(other)):
"""
First place all four endpoint into a set. This will elliminate shared
end points. Next, convert the set back into a list so it can
finally be sorted.
"""
pointList = sorted(list(set([self.start, self.end, other.start, other.end])), key=self.calcT)
if len(pointList) == 3:
"""
if there are only three points in the list then return 2, the
middle point in the list since it is the shared point of the
two lines.
"""
return 2, pointList[1] #if they are colinear and two ends have the same point return that point
elif len(pointList) == 2:
""" If the two lines have the same endpoints. """
return 2.5, self.getMidPoint()
else:
"""
If the length was not three then we know it is length 4 in which case
we turn the two middle points into a line and return 3, the line's
midpoint.
"""
tempLine = Line(pointList[1], pointList[2])
return 3, tempLine.getMidPoint() #If they are colinear return half way inbetween middle two points
"""
To calculate the intersection of two points we put the lines into the
form P+tr and Q+us where P and Q are the starting points of the lines
r and s are vectors form the starting point to the end point, and
t and u are scalars. Set the two equations equal to each other and
then solve for t and u. If t and u are in the range [0-1] then the
intersection point lines on the lines, else it is a projected point.
"""
r = np.subtract(self.end.get2DPoint(), self.start.get2DPoint())
s = np.subtract(other.end.get2DPoint(), other.start.get2DPoint())
Q_Less_P = np.subtract(other.start.get2DPoint(), self.start.get2DPoint())
denom = np.cross(r, s)*1.0
t = np.cross(Q_Less_P, s)/denom
u = np.cross(Q_Less_P, r)/denom
point = p.Point(self.start.x + r[c.X]*t, self.start.y+r[c.Y]*t)
#If t or u are not in the range 0-1 then the intersection is projected
if(t > 1 or u > 1 or t < 0 or u < 0):
"""
Due to floating point problems sometimes if t or u is outside the 0-1
range we end up inside this if statement but are actually at the end
of one of the lines. I can't figure out how to properly add in a tolerance
so we are taking the four end points putting them into a list,
then comparing them to the calculated point. The Point module is
properly handling tolerances so if the point == any of the end
points then we should not return a projected point.
"""
if not any(point == lineEnd for lineEnd in (self.start, self.end,
other.start, other.end)):
return -1, point #return for projected intersection of non-colinear lines
return 1, point #lines intersect at given point
def isOnLine(self, point):
""" Tests to see if a point is on the line. """
if((point < self.start and point < self.end) or (
point > self.start and point > self.end)):
return False #point is not between the start and end of self
if(self.getArea(self.start, self.end, point) > c.EPSILON):
return False #points are not co-linear
return True
def getArea(self, p1, p2, p3):
"""
Uses the determinant of a matrix containing the three to find the area
of the triangle formed by the three points.
"""
matrix = [p1.normalVector, p2.normalVector, p3.normalVector, [1,1,1,1]]
matrix = np.rot90(matrix)
return abs(np.linalg.det(matrix))/2.0
def areColinear(self, other):
"""returns True if the two lines are colinear
This method tests if two lines are colinear by finding the angle
between the perpendicular vector of self and lines created from self to
both ends of other.
If the dot product between perpVect and both of the new vectors is zero then
they are colinear. Farin and Hansford recommend checking within
a physically meaningful tolerance so equation 3.14 from pg 50 of
Farin-Hansford Geometry Toolbox is used to compute the cosine of the angle
and compare that to our ANGLE_EPS. If cosTheda < ANGLE_EPS then the lines
"""
perpVect = np.array([-self.vector[c.Y], self.vector[c.X]])
vect1 = other.end[:2]-self.start[:2]
vect2 = other.start[:2]-self.start[:2]
cosTheda1 = (np.dot(perpVect, vect1)/
(np.linalg.norm(perpVect)*np.linalg.norm(vect1)))
if abs(cosTheda1) > 0.0001:
return False
cosTheda2 = (np.dot(perpVect, vect2)/
(np.linalg.norm(perpVect)*np.linalg.norm(vect2)))
return not(abs(cosTheda2) > 0.0001)
def doBoundingBoxesIntersect(self, other):
"""
The bounding box of the line is represented be the upper left and
lower right corners of the smallest box which contains the line. If
the bounding boxes for two lines do not intersect then we know that the
two lines do not intersect and we can save a bunch of work.
"""
if(self.upperLeft.x <= other.lowerRight.x and
self.lowerRight.x >= other.upperLeft.x and
self.upperLeft.y >= other.lowerRight.y and
self.lowerRight.y <= other.upperLeft.y):
return True
return False
def translate(self, shiftX, shiftY, shiftZ=0):
newStart = self.start.translate(shiftX, shiftY, shiftZ)
newEnd = self.end.translate(shiftX, shiftY, shiftZ)
return Line(newStart, newEnd, self)
def mirror(self, axis):
newStart = self.start.mirror(axis)
newEnd = self.end.mirror(axis)
return Line(newStart, newEnd, self)
def rotate(self, angle, point):
if(point is None): point = p.Point(0,0)
newStart = self.start.rotate(angle, point)
newEnd = self.end.rotate(angle, point)
return Line(newStart, newEnd, self)
def fliped(self):
""" returns a line with the start and end points flipped form self. """
return Line(self.end, self.start, self)
def getOffsetLine(self, distance, side=c.INSIDE):
""" Calculates and returns the two lines on either side of self offset distance."""
StartA = np.array([self.start.x, self.start.y])
EndA = np.array([self.end.x, self.end.y])
r = StartA - EndA #The slope vector of self
rn = np.array([-r[c.Y], r[c.X]]) #flip x and y and inverse y to get the normal vector of the slope
rn = rn/np.linalg.norm(rn)*distance #normalize by dividing by its magnitude and multipy by distance to get the correct length
if side == c.INSIDE:
return self.translate(-rn[c.X], -rn[c.Y]) #the "minus" side line is the left side which is inside.
return self.translate(rn[c.X], rn[c.Y]) #the "Plus" side of the line is the right side which is outside.
def sideOfLine(self, point):
dist = self.pointToLineDist(point)
if abs(dist) < c.EPSILON:
return 0
return c.LEFT if dist < 0 else c.RIGHT
def pointToLineDist(self, point):
perpVect = | np.array([self.vector[c.Y], -self.vector[c.X]]) | numpy.array |
import numpy as np
from scipy.spatial.transform import Rotation as R
import math
import plotting
#import time
def umba(x,y,z,Vtot,Theta, Psi, SpinRate, TiltH, Tiltm, SpinE, Yang, Zang, LorR, i, seamsOn, FullRot):
"""
The inputs are:
x,
y,
z,
Initial Ball Speed,
vertical release angle,
horizontal release angle,
Spin Rate,
Tilt in Hours,
Tilt in minutes,
Spin Efficiency,
Y seam orientation angle,
Z seam orientation angle,
Primay inputs are: initial position, x0, y0, and z0 with origin at the
point of home plate, x to the rright of the catcher, y from the catcher
towards the pitcher, and z straight up. Initial velocities
u0, v0, and w0 which are the speeds of the ball in x, y, and z
respectivley. And spin rates
UMBA1.0: This code uses a constant Cd and rod cross's model for CL
Predictions. Seam Orientation is not accounted for. Air Density is
considered only at sea level at 60% relative humidity. but can be easily
altered
UMBA2.0 Adding seam positions and attempting to model CL from seams.
"""
Yang = (Yang) * np.pi/180
Zang = -Zang * np.pi/180
# seamsOn = True
frameRate = 0.002
Tilt = TimeToTilt(TiltH, Tiltm)
if LorR == 'l':
Gyro = np.arcsin(SpinE/100)
elif LorR =='r':
Gyro = np.pi - np.arcsin(SpinE/100)
else:
while LorR != 'l' or LorR != 'r':
if LorR == 'l':
Gyro = np.arcsin(SpinE/100)
elif LorR =='r':
Gyro = np.pi - np.arcsin(SpinE/100)
else:
LorR = input('please type in an "l" or an "r" for which pole goes forward')
positions,NGfinal = (PitchedBallTraj(x, y, z, Vtot, Theta, Psi, SpinRate, Tilt, Gyro, Yang, Zang, i, frameRate, seamsOn, FullRot))
plotting.Plotting(positions)
pX = (positions[0])
pY = (positions[1])
pZ = (positions[2])
IX = (positions[3])
IY = (positions[4])
IZ = (positions[5])
DX = (positions[6])
DY = (positions[7])
DZ = (positions[8])
FX = (positions[9])
FY = (positions[10])
FZ = (positions[11])
TF = (positions[12])
aX = (positions[13])
aY = (positions[14])
aZ = (positions[15])
TiltDeg = np.arctan2((NGfinal[0] - x + ((60-y)*np.arctan(Psi*np.pi/180))), (NGfinal[2] - z - (60-z)*np.arctan(Theta*np.pi/180)))*180/np.pi
TiltTime = TiltToTime(-TiltDeg)
print('Apparent Tilt = ',TiltTime[0],':',TiltTime[1])
return(pX,pY,pZ,IX,IY,IZ,DX,DY,DZ,FX,FY,FZ,TF,aX,aY,aZ,TiltTime)
###############################################################################
def FindAccel(pX,pY,pZ,TF):
"""
Find the acceleration given the position and final time of a single pitch
"""
TF = TF
aX = []
aY = []
aZ = []
t = np.linspace(0,TF,num = len(pX))
pX = np.array(pX) / 12
pZ = np.array(pZ) / 12
xCoeff = np.polyfit(t,pX,5)
xvPoly = np.polyder(xCoeff)
xaPoly = np.polyder(xvPoly)
xAPoly = np.poly1d(xaPoly)
yPoly = np.polyfit(t,pY,5)
yvPoly = np.polyder(yPoly)
yaPoly = np.polyder(yvPoly)
yAPoly = np.poly1d(yaPoly)
zPoly = np.polyfit(t,pZ,5)
zvPoly = np.polyder(zPoly)
zaPoly = np.polyder(zvPoly)
zAPoly = np.poly1d(zaPoly)
for i in range(len(pX)-1):
aX.append(xAPoly(t[i]))
aY.append(yAPoly(t[i]))
aZ.append(zAPoly(t[i]))
return(aX,aY,aZ)
###############################################################################
def normalPlane(VelVec,SpinVecT,t):
"""
finds an acceptable range of seam locations where they can have an affect
on the aerodynamics.
Plans: the code finds the acceptable range wherein the seams can produce a
force on the ball normal to the direction of flight. the force the ball
will also be normal to the surface of the ball at that location.
However, htis portion of code is only to find the range.
"""
SpinVecTNew = np.zeros([3])
# SpinVecTMag = np.linalg.norm(SpinVecT)
# SpinVecTUnit = SpinVecT/SpinVecTMag
dt = 0.001
SpinShiftFactor = 1.5 #this number effects how much the separation location
#Will change based on the spin rate. Bigger, Move shift
forwardBackV = 0.21 # allows for the moving the effectiveness of the seams
# forwards or backwards.
AngleOfActivation = 5
diameter = (2. + 15/16) #in
acceptableRange = diameter*1.05
acceptableThickness = diameter/2*np.sin(AngleOfActivation*2*np.pi/180)
#
#produces a 3d box
xmin0 = -acceptableRange*.5
xmax0 = acceptableRange*.5
zmin0 = -acceptableRange*.5
zmax0 = acceptableRange*.5
ymin0 = -acceptableThickness + forwardBackV
ymax0 = acceptableThickness + forwardBackV
node10 = [xmin0,ymin0,zmin0]
node20 = [xmin0,ymax0,zmin0]
node30 = [xmax0,ymax0,zmin0]
node40 = [xmax0,ymin0,zmin0]
node50 = [xmin0,ymin0,zmax0]
node60 = [xmin0,ymax0,zmax0]
node70 = [xmax0,ymax0,zmax0]
node80 = [xmax0,ymin0,zmax0]
nodes0 = np.asarray([node10, node20, node30, node40, node50, node60, node70, node80])
VRotVec = findRotVec(0, -1., 0, VelVec[0], VelVec[1], VelVec[2])
SpinVecTNew[0] = SpinVecT[0] * SpinShiftFactor * dt
SpinVecTNew[1] = SpinVecT[1] * SpinShiftFactor * dt
SpinVecTNew[2] = SpinVecT[2] * SpinShiftFactor * dt
r = R.from_rotvec(VRotVec)
rr = R.from_rotvec(SpinVecTNew)
nodes1 = r.apply(nodes0)
nodes2 = rr.apply(nodes1)
# print(nodes1-nodes2)
# x0check = nodes0[0,0]
# y0check = nodes0[0,1]
# v1 = [x0check,y0check]
#
# x2check = nodes2[0,0]
# y2check = nodes2[0,1]
# v2 = [x2check,y2check]
# cosang = np.dot(v1, v2)
# sinang = np.linalg.norm(np.cross(v1, v2))
# angle = (np.arctan2(sinang, cosang))
# print(angle*180/np.pi)
# print(x0check, y0check, '\n', x2check, y2check)
# print(np.arctan2(nodes2[1],nodes2[0])*180/np.pi)
return (nodes2)
###############################################################################
def derivs(t, BallState, BallConsts, activeSeams, ng):
"""
This is where the magic happens all models are input here
Ball State:
1, x
2, y
3, z
4, u
5, v
6, w
7, spinx
8, spiny
9, spinz
"""
dy = np.zeros(len(BallState))
u = BallState[3]
v = BallState[4]
w = BallState[5]
Spinx = BallState[6]
Spiny = BallState[7]
Spinz = BallState[8]#rad/sec
VelTot = np.sqrt(u**2 + v**2 + w**2)
SpinRate = np.sqrt(Spinx**2 + Spiny**2 + Spinz**2)
diameter = BallConsts[1] #ft
c0 = BallConsts[4]
rw = (diameter/2)*SpinRate
S = (rw/VelTot)*np.exp(-t/10000) #the "np.exp(-t/NUM) is for spin decay
#for no spin decay NUM should be large. When better data is available on
#spin decay will account for it here likely
Cl = ClCross(S)
# Cl = ClKensrud(S) This is not right yet
CdConst = 0.33
# The coefficient of Seams "Cseams" is the essentially the Lift coeficient
# per seam per length away from the origin.
Cseams = .021 #per active seam
aDragx = -c0*CdConst*VelTot*u
aDragy = -c0*CdConst*VelTot*v
aDragz = -c0*CdConst*VelTot*w
aSpinx = c0*(Cl/SpinRate)*VelTot*(Spiny*w - Spinz*v)
aSpiny = c0*(Cl/SpinRate)*VelTot*(Spinz*u - Spinx*w)
aSpinz = c0*(Cl/SpinRate)*VelTot*(Spinx*v - Spiny*u)
SeamXLength = 0
SeamYLength = 0
SeamZLength = 0
if len(activeSeams) > 4:
for i in range(len(activeSeams)):
SeamXLength = SeamXLength + activeSeams[i,0]
SeamYLength = SeamYLength + activeSeams[i,1]
SeamZLength = SeamZLength + activeSeams[i,2]
aSeamsx = -c0*Cseams*(VelTot**2)*SeamXLength
aSeamsy = -c0*Cseams*(VelTot**2)*SeamYLength
aSeamsz = -c0*Cseams*(VelTot**2)*SeamZLength
# print(SeamXLength,SeamYLength, SeamZLength)
ax = aDragx + aSpinx + aSeamsx
ay = aDragy + aSpiny + aSeamsy
if ng == False:
az = aDragz + aSpinz + aSeamsz - 32.2
else:
az = aDragz + aSpinz + aSeamsz
# print az
dSpinx = 0
dSpiny = 0
dSpinz = 0
dy[0] = u
dy[1] = v
dy[2] = w
dy[3] = ax
dy[4] = ay
dy[5] = az
dy[6] = dSpinx
dy[7] = dSpiny
dy[8] = dSpinz
return dy
###############################################################################
def PitchedBallTraj(x,y,z,Vtot, Theta, Psi, SpinRate, Tilt, Gyro, Yangle, Zangle,i, frameRate, seamsOn, FullRot):
#this is wehre the work needs to happen now
FullState = anglesTOCart(Vtot, Theta, Psi, SpinRate, Tilt, Gyro, Yangle, Zangle)
print(FullState)
x0 = x
y0 = 60.5 - y
z0 = z
u0 = FullState[0]
v0 = FullState[1]
w0 = FullState[2]
Spinx0 = FullState[3]
Spiny0 = FullState[4]
Spinz0 = FullState[5]
Yangle = FullState[6] #angle 1 is the angle from
Zangle = -FullState[7]
Spinx0 = Spinx0 * .104719754 #converts rps to rad/s
Spiny0 = Spiny0 * -.104719754
Spinz0 = Spinz0 * .104719754
xSeam, ySeam, zSeam = initializeSeam() #initialized seams to a 90 deg x rotations
# see https://www.baseballaero.com/2020/03/09/describing-ball-orientation-post-51/
# for further info
xSeam, ySeam, zSeam = rotateSeam(xSeam, ySeam, zSeam, -np.pi/2, 0, 0, 1)
xSeam, ySeam, zSeam = rotateSeam(xSeam, ySeam, zSeam, 0, np.pi/2, 0, 1)
xSeam, ySeam, zSeam = rotateSeam(xSeam, ySeam, zSeam, 0, Yangle, 0, 1)
xSeam, ySeam, zSeam = rotateSeam(xSeam, ySeam, zSeam, 0, 0, Zangle, 1)
# xSeam3, ySeam3, zSeam3 = rotateSeam(1, 0, 0, Spinx0, Spiny0, Spinz0, 1)
# xSeam, ySeam, zSeam = rotateSeam(xSeam, ySeam, zSeam, 0, Yangle, 0, 1)
RotVec = findRotVec(1,0,0,Spinx0,Spiny0,Spinz0)
xSeam, ySeam, zSeam = rotateSeam(xSeam, ySeam, zSeam, RotVec[0],RotVec[1],RotVec[2], 1)
xSeam0,ySeam0,zSeam0 = xSeam, ySeam, zSeam
xSeamNG0,ySeamNG0,zSeamNG0 = xSeam, ySeam, zSeam
# xSeam0, ySeam0, zSeam0 = rotateSeam(xSeam, ySeam, zSeam, 0, Yangle, 0, 1)
# RotVec = findRotVec(Yangle, Zangle, 0, Spinx0, Spiny0, Spinz0)
# xSeam0, ySeam0, zSeam0 = rotateSeam(xSeam2, ySeam2, zSeam2, RotVec[0],RotVec[1],RotVec[2],1)
# xSeam0, ySeam0, zSeam0 = xSeam, ySeam, zSeam
# extablished the (0,0) initial condition as a 2-seam spin
# All air properties are the approximate averages for sea level over the season
# rhoDRY = 0.0765 #lb/ft^3
# relHum = 0.73
# Temp = 60 #deg fahrenheit
rho = 0.074 #lb/ft^3, with humidity at sea level
circ = 9.125/12 #ft
diameter = (2. + 15/16)/12 #ft
Area = .25*np.pi*diameter**2 #ft^2
mass = 0.3203125 #lbm
c0 = 0.5*rho*Area/mass
BallConsts = [circ,diameter,Area,mass,c0]
t0 = 0.0
t = t0
dt = 0.001
u0 = u0 *1.467#ft/sec
v0 = -v0 *1.467#ft/sec
w0 = w0 *1.467#ft/sec
decisionPoint = 0.2 #sec #time before ball arrives when batter has
#to decide to hit or not.
SpinVec = [Spinx0,Spiny0,Spinz0]
Vel = [u0,v0,w0]
VelTot = np.sqrt(u0**2 + v0**2 + w0**2)
SpinRate0 = np.sqrt(Spinx0**2 + Spiny0**2 + Spinz0**2)
# SpinEfficiency0 = 1-abs(np.dot(Vel, SpinVec)/(SpinRate0*VelTot))
unit_vector_1 = Vel / np. linalg. norm(Vel)
unit_vector_2 = SpinVec / np. linalg. norm(SpinVec)
dot_product = np. dot(unit_vector_1, unit_vector_2)
Gangle = np. arccos(dot_product)
# SpinEfficiency0 = 1 - Gangle/(np.pi/2)
SpinEfficiency0 = np.sin(Gangle)
#assumes that the efficiency is non-linear and that it follows the sin of the
#angle between the ball direction and the spin.
ax, ay, az = 0, 0, 0
BallState0 = [x0,y0,z0,u0,v0,w0,Spinx0,Spiny0,Spinz0]
fileBT = open(str(i) + "BallTrajectoryNEW.txt","w+")
fileBT.write("time x y z u v w Spin_x Spin_y Spin_z ax ay az\n")
# fileBT.write("===============================================================================================================================\n")
fileBT.write("{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}\n"
.format(t,x0,y0,z0,u0,v0,w0,Spinx0,Spiny0,Spinz0,ax,ay,az))
xP = []
yP = []
zP = []
xA = []
yA = []
zA = []
uP = []
vP = []
wP = []
xD = BallState0[0]
yD = BallState0[1]
zD = BallState0[2]
uD = BallState0[3]
vD = BallState0[4]
wD = BallState0[5]
BallStateNG0 = BallState0
while BallState0[1] > 0. and BallState0[2] > 0. and t < 20:
SpinVec = [Spinx0,Spiny0,Spinz0]
#need to input a non-magnus ball path indicator.
if seamsOn == True:
xSeam1, ySeam1, zSeam1 = rotateSeam(xSeam0, ySeam0, zSeam0, BallState0[6],BallState0[7],BallState0[8],dt)
seamPoints = np.asarray([xSeam1, ySeam1, zSeam1])
VelVec = np.asarray([BallState0[3], BallState0[4], BallState0[5]])
activeSeams, inactiveSeams, nodes = findSSWseams(VelVec,seamPoints,SpinVec,t)
# plotting.plotPointsTest(activeSeams, inactiveSeams, nodes,t)
xSeam0 = xSeam1
ySeam0 = ySeam1
zSeam0 = zSeam1
# if t == 0:
# seamPoints0 = np.asarray([xSeam0, ySeam0, zSeam0])
# activeSeams0, inactiveSeams0, nodes0 = findSSWseams(VelVec,seamPoints0,SpinVec,t)
# plotting.plotSeams(activeSeams0, inactiveSeams0, Spinx0, Spiny0, Spinz0, 0, VelVec,nodes)
# time.sleep(10)
if FullRot == True:
if t % frameRate > -0.0000001 and t % frameRate < 0.0000001:# and (SpinRate0*t) < np.pi*2 and t <= .0501:# and SpinRate0 > 100:
plotting.plotSeams(activeSeams, inactiveSeams, Spinx0, Spiny0, Spinz0, t, VelVec, nodes)
else:
if t % frameRate > -0.0000001 and t % frameRate < 0.0000001 and (SpinRate0*t) < np.pi*2 and t <= .0501:# and SpinRate0 > 100:
plotting.plotSeams(activeSeams, inactiveSeams, Spinx0, Spiny0, Spinz0, t, VelVec, nodes)
else:
activeSeams = [0,0,0]
SpinVec = [Spinx0,Spiny0,Spinz0]
#need to input a non-magnus ball path indicator.
if seamsOn == True:
xSeamNG1, ySeamNG1, zSeamNG1 = rotateSeam(xSeamNG0, ySeamNG0, zSeamNG0, BallStateNG0[6],BallStateNG0[7],BallStateNG0[8],dt)
seamPointsNG = np.asarray([xSeamNG1, ySeamNG1, zSeamNG1])
VelVecNG = np.asarray([BallStateNG0[3], BallStateNG0[4], BallStateNG0[5]])
activeSeamsNG, inactiveSeamsNG, nodesNG = findSSWseams(VelVecNG,seamPointsNG,SpinVec,t)
# plotting.plotPointsTest(activeSeams, inactiveSeams, nodes,t)
xSeamNG0 = xSeamNG1
ySeamNG0 = ySeamNG1
zSeamNG0 = zSeamNG1
else:
activeSeamsNG = [0,0,0]
# # This section is for showing the spin behaviour of the ball and where
# # the seams are moving
BallState1,slope = RK4(t, BallState0, dt, BallConsts, activeSeams, False)
BallStateNG1,slopeNG = RK4(t, BallStateNG0, dt, BallConsts, activeSeamsNG, True)
fileBT.write("{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}{:<10.3f}\n"
.format(t,BallState1[0],BallState1[1],BallState1[2],BallState1[3],BallState1[4],BallState1[5],BallState1[6],BallState1[7],BallState1[8], slope[3], slope[4], slope[5]))
BallState0 = BallState1
BallStateNG0 = BallStateNG1
xP.append(BallState1[0]*12)
yP.append(BallState1[1])
zP.append(BallState1[2]*12)
uP.append(BallState1[3])
vP.append(BallState1[4])
wP.append(BallState1[5])
t = t + dt
# fileBT.close()
print('NG final position',BallStateNG1[0], BallStateNG1[1], BallStateNG1[2])
NGFinal = BallStateNG1[0], BallStateNG1[1], BallStateNG1[2]
DecisionPointStep = int(t - (.2/dt))
if t < decisionPoint:
print("WOW! no batter has enough skill to hit a ball thrown that fast")
xD = -10
yD = -10
zD = -10
uD = 0
vD = 0
wD = 0
else:
xD = xP[DecisionPointStep]/12
yD = yP[DecisionPointStep]
zD = zP[DecisionPointStep]/12
uD = uP[DecisionPointStep]
vD = vP[DecisionPointStep]
wD = wP[DecisionPointStep]
BallStateF = BallState1
xF, yF, zF = BallStateF[0], BallStateF[1], BallStateF[2]
fileBT.close()
dzNoSpin = w0*t - (32.2/2)*t*t
zfg = z0 + dzNoSpin
vBreak = BallStateF[2] - zfg
dxNoSpin = u0*t
xfg = x0 + dxNoSpin
hBreak = BallStateF[0] - xfg
SpinVecF = [BallStateF[6],BallStateF[7],BallStateF[8]]
VelF = [BallStateF[3],BallStateF[4],BallStateF[5]]
VelTotF = np.sqrt(BallStateF[3]**2 + BallStateF[4]**2 + BallStateF[5]**2)
SpinRateF = np.sqrt(BallStateF[6]**2 + BallStateF[7]**2 + BallStateF[8]**2)
# SpinEfficiencyF = 1-abs(np.dot(VelF, SpinVecF)/(SpinRateF*VelTotF))
unit_vector_1 = VelF / np. linalg. norm(VelF)
unit_vector_2 = SpinVecF / np. linalg. norm(SpinVecF)
dot_product = np. dot(unit_vector_1, unit_vector_2)
Gangle = np. arccos(dot_product)
# SpinEfficiency0 = 1 - Gangle/(np.pi/2)
SpinEfficiencyF = np.sin(Gangle)
totalRotations = SpinRateF/(2*np.pi) #assumes no spin decay
finalApproachAngleyz = np.arctan2(abs(BallStateF[5]), abs(BallStateF[4]))
finalApproachAnglexy = np.arctan2(abs(BallStateF[3]), abs(BallStateF[4]))
Hrs, mins = TiltToTime(Tilt)
# Tiltdegs = TimeToTilt(Hrs,mins)
print('initial conditions:')
print('x0 (ft)------------------------------- ', to_precision(x0,4))
print('y0 (ft)------------------------------- ', to_precision(y0,4))
print('z0 (ft)------------------------------- ', to_precision(z0,4))
print('u0 (mph)------------------------------ ', to_precision(u0/1.467,4))
print('v0 (mph)------------------------------ ', to_precision(v0/1.467,4))
print('w0 (mph)------------------------------ ', to_precision(w0/1.467,4))
print('Total Velocity (mph)------------------ ', to_precision(VelTot/1.467,4))
print('Spinx0 (rpm)-------------------------- ', to_precision(Spinx0/0.104719754,4))
print('Spiny0 (rpm)-------------------------- ', to_precision(Spiny0/-0.104719754,4))
print('Spinz0 (rpm)-------------------------- ', to_precision(Spinz0/0.104719754,4))
print('Total Spin Rate (rpm)----------------- ', to_precision(SpinRate0/0.104719754,4))
print('Tilt (clock face)----------------------', Hrs,':',mins)
# print('Tilt (deg) --------------------------- ', to_precision(Tiltdegs,4))
if SpinRate0 == 0:
print('Initial Efficiency (%)---------------- NA')
else:
print('Initial Efficiency (%)---------------- ', to_precision(SpinEfficiency0*100,4))
print('\n\nconditions at decision point:')
print('x at decision point (ft)------------- ', to_precision(xD,4))
print('y at decision point (ft)------------- ', to_precision(yD,4))
print('z at decision point (ft)--------------', to_precision(zD,4))
print('u at decision point (ft)--------------', to_precision(uD,4))
print('v at decision point (ft)--------------', to_precision(vD,4))
print('w at decision point (ft)--------------', to_precision(wD,4))
print('\n\nconditions across the plate:')
print('xf (ft)-------------------------------', to_precision(BallStateF[0],4))
print('yf (ft)-------------------------------', to_precision(BallStateF[1],4)) # actually just the last point data was taken
print('zf (ft)-------------------------------', to_precision(BallStateF[2],4))
print('uf (mph)------------------------------', to_precision(BallStateF[3]/1.467,4))
print('vf (mph)------------------------------', to_precision(-BallStateF[4]/1.467,4))
print('wf (mph)------------------------------', to_precision(BallStateF[5]/1.467,4))
print('Total Velocity (mph)------------------', to_precision(VelTotF/1.467,4))
print('Spinxf (rpm)--------------------------', to_precision(BallStateF[6]/0.104719754,4))
print('Spinyf (rpm)--------------------------', to_precision( BallStateF[7]/0.104719754,4))
print('Spinzf (rpm)--------------------------', to_precision(BallStateF[8]/0.104719754,4))
print('Total Spin Rate (rpm)-----------------', to_precision(SpinRateF/0.104719754,4))
print('Approach Angle (yz, deg)--------------', to_precision(finalApproachAngleyz*180/np.pi,4))
print('Approach Angle (xy, deg)--------------', to_precision(finalApproachAnglexy*180/np.pi,4))
print('Final Efficiency (%)------------------', to_precision(SpinEfficiencyF*100,4))
print('dx after decision point (ft)----------', to_precision((BallStateF[0] - xD)/12,4))
print('dy after decision point (ft)----------', to_precision((BallStateF[1] - yD)/12,4))
print('dz after decision point (ft)----------', to_precision((BallStateF[2] - zD)/12,4))
print('\n\nTotals:')
print('flight time (t)-----------------------', to_precision(t,4))
print('Vertical break (in)-------------------', to_precision(vBreak*12,4))
print('Horizontal break (in)-----------------', to_precision(hBreak*12,4))
print('Number of Revolutions-----------------', to_precision(totalRotations*t,4))
xA, yA, zA = FindAccel(xP,yP,zP,t)
positions = [xP,yP,zP,x0,y0,z0,xD,yD,zD,xF,yF,zF,t,xA,yA,zA]
return positions,NGFinal
###############################################################################
def TiltToTime(Tilt):
"""
'Tilt' is in degrees and this function outputs the hours and minutes
"""
TiltTime = (((Tilt)%360)/360)*12
Hrs = int(TiltTime)
if Hrs == 0:
Hrs = 12
mins = int(round(TiltTime*60)%60)
return(Hrs,mins)
###############################################################################
def TimeToTilt(Hrs, mins):
"""
Take the tilt in hrs and mins and turns it into radians
"""
radHrs = ((Hrs-3)*np.pi/6)
radmins = (mins*np.pi/360)
return(radHrs + radmins)
###############################################################################
def anglesTOCart(Vtot, Theta, Psi, SpinRate, Tilt, Gyro, Yangle, Zangle):
"""
This function is designed merely to generate the balls initial conditions
It will take various options and output x0,y0,z0,u0,v0,w0,Spinx0,\
Spiny0,Spinz0,Yangle,Zangle angle 1 and angle 2 are for seam effects
"""
Theta = Theta*np.pi/180
Psi = Psi*np.pi/180
uvmag = Vtot*np.cos(Theta)
w0 = Vtot*np.sin(Theta)
u0 = -uvmag*np.sin(Psi)
v0 = uvmag*np.cos(Psi)
Tilt = (Tilt) # rad tilt
Gyro = (Gyro) # rad gyro
#this is where the changes need to occur to fix the problems with the gyro
Spinx0 = SpinRate*np.sin(Gyro)*np.sin(Tilt)
Spiny0 = SpinRate*np.cos(Gyro)
Spinz0 = -SpinRate*np.sin(Gyro)* | np.cos(Tilt) | numpy.cos |
from matplotlib import pylab as plt
import numpy as np
import datetime
import pathlib
from sklearn.metrics import roc_curve, auc, confusion_matrix
import matplotlib.dates as mdates
from matplotlib.collections import PolyCollection
from collections import OrderedDict
import io
import base64
from urllib.parse import quote
import itertools
from .. import to_numpy
from ..metrics import moving_average
def plot_line(
y,
x=None,
figsize=None,
window=1,
xlim=None,
ylim=None,
line_width=2,
stroke='-',
title=None,
xlabel=None,
ylabel=None,
xscale=None,
yscale=None,
show=True,
save=False,
legend=True,
name=None,
grid=True
):
fig = figsize and plt.figure(figsize=figsize)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if title:
plt.title(title)
if yscale:
plt.yscale(yscale)
if xscale:
plt.xscale(xscale)
if window != 1:
y = moving_average(y, window=window)
if x is None:
x = [n * window for n in range(len(y))]
elif len(x) != len(y):
x = moving_average(x, window=window)
plt.plot(x, y, stroke, lw=line_width, label=name and str(name))
if grid:
plt.grid(True)
if legend:
plt.legend(loc='best')
if save and show:
raise ValueError("Can't show and save at the same time")
if show:
plt.show()
if save:
plt.gcf().savefig(
save if isinstance(save, (str, pathlib.Path)) else
f"{title or ylabel or datetime.datetime.utcnow().strftime('%y-%m-%dT%H%M%S')}.jpg"
)
plt.gcf().clear()
if fig:
plt.close(fig)
return plt.gcf()
def plot_pred_scores(
preds,
targets,
classes=None,
logscale=True,
figsize=(12, 6),
show=True,
save=False,
title=None):
import seaborn as sns
preds, targets = to_numpy(preds), to_numpy(targets)
if title:
plt.title(title)
if classes:
classes = classes.items() if isinstance(classes, dict) else \
((c, n) for n, c in enumerate(classes))
else:
classes = ((n, n) for n in range(preds.shape[1]))
for cls, idx in classes:
f, ax = plt.subplots(figsize=figsize)
if logscale:
ax.set(yscale="log")
if len(targets.shape) == 1:
sns.distplot(preds[targets != idx, idx], bins=50, kde=False,
rug=False, hist_kws={"range": [0, 1]}, ax=ax, color='red',
label='Negative')
sns.distplot(preds[targets == idx, idx], bins=50, kde=False,
rug=False, hist_kws={"range": [0, 1]}, ax=ax, color='blue',
label='Positive')
else:
sns.distplot(preds[targets[:, idx] == 0, idx], bins=50, kde=False,
rug=False, hist_kws={"range": [0,1]}, ax=ax, color='red', label='Negative')
sns.distplot(preds[targets[:, idx] == 1, idx], bins=50, kde=False,
rug=False, hist_kws={"range": [0,1]}, ax=ax, color='blue', label='Positive')
ax.set_title(cls)
plt.xlabel('Score')
plt.ylabel('Sample Count')
plt.legend(loc='best')
if show:
plt.show()
if save:
plt.savefig(save if isinstance(save, (str, pathlib.Path)) else
f"{title or datetime.datetime.utcnow().strftime('%y-%m-%dT%H%M%S')}.jpg")
plt.gcf().clear()
def plot_rocs(
preds,
targets,
classes=None,
figsize=(12,12),
show=True,
save=False,
title=None):
preds, targets = to_numpy(preds), to_numpy(targets)
fig = plt.figure(figsize=figsize)
if title:
plt.title(title)
if classes:
classes = classes.items() if isinstance(classes, dict) else \
((c, n) for n, c in enumerate(classes))
else:
classes = ((n, n) for n in range(preds.shape[1]))
plt.xlim([0.0, 1.02])
plt.ylim([0.0, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot([0, 1], [0, 1], 'k--', lw=2)
vals = {}
for cls, idx in classes:
if len(targets.shape) == 1:
fpr, tpr, thresholds = roc_curve(targets, preds[:, idx])
else:
fpr, tpr, thresholds = roc_curve(targets[:, idx], preds[:, idx])
area = auc(fpr, tpr)
plt.plot(fpr, tpr, label=f'{cls} (AUC = %0.3f)' % area)
vals[cls] = (area, fpr, tpr, thresholds)
plt.legend(loc='best')
if show:
plt.show()
if save:
plt.savefig(save if isinstance(save, (str, pathlib.Path)) else
f"{title or datetime.datetime.utcnow().strftime('%y-%m-%dT%H%M%S')}.jpg")
plt.gcf().clear()
return vals
def plot_cooccurrences(counts, classes, figsize=(14, 11)):
import seaborn as sns
import pandas as pd
mask = np.ones_like(counts)
mask[np.tril_indices_from(mask)] = False
df_cm = pd.DataFrame(
counts,
index=list(classes),
columns=list(classes))
plt.figure(figsize=figsize)
plot = sns.heatmap(
df_cm,
robust=True,
annot=True,
fmt="d",
cmap="YlGnBu",
mask=mask)
plot.set_title('Class Co-occurrence')
def sorted_indices(matrix, desc=False):
inds = np.dstack(np.unravel_index(np.argsort(matrix.ravel()), matrix.shape))
return ( | np.fliplr(inds) | numpy.fliplr |
from scipy.spatial import distance
import numpy as np
from math import factorial, atan2, degrees
import pandas as pd
from Utils.decorators import clock_noself
def calc_distance_2d(data, vectors = True):
"""
Calculates the euclidean distance between point, or each pair of points in vectors
"""
# TODO testing
if not vectors:
return distance.euclidean(data[0], data[1])
else:
dist = []
if isinstance(data[0], list) or isinstance(data[0], dict):
raise Warning('This case needs to be dealt with')
else:
try:
data = (data[0].values, data[1].values)
except:
pass
for n, pos in enumerate(zip(data[0], data[1])):
# Get a pair of points
if n == 0:
p0 = pos
dist.append(0)
else:
p1 = pos
# Calc distance
try:
dist.append(distance.euclidean(p0, p1))
except:
if np.isnan(p1).any():
dist.append(np.nan)
# Prepare for next iteration
p0 = p1
return dist
def calc_acceleration(d, unit: str=False, fps: int = False, bodylength: float = False):
""" Calculates the acceleration (1st derivative of velocity). different options for output format """
if not unit or unit == 'pxperframe':
# Return the velocity in px per frame
return np.insert(np.diff(d), 0, 0)
else:
# Scale the velocity from px per frame depending on the unit used
velocity = np.insert(np.diff(d), 0, 0)
if not fps:
print('No FPS was available when calculating velocity\n FPS set as 30 frames per second')
fps = 30
else:
if isinstance(fps, list):
fps = fps[0]
if unit == 'pxpersec':
return velocity*fps
if unit =='blpersec':
if not bodylength:
print('No body length was found when calculating velocity as bodylengths per second\n'
'Using px per second instead')
return velocity*fps
else:
velocity = velocity * fps
velocity = velocity / bodylength
return velocity
def calc_angle_2d(p1, p2, vectors: bool=False):
""" calculates the angle of a line going through two points, or sets of points in two vectors"""
def angle(a, b):
radang = atan2(b[1] - a[1], b[0] - a[0])
degang = degrees(radang)
if degang < 0:
return 360 + degang
else:
return degang
if not vectors:
# Calc for just two points
return angle(p1, p2)
else:
# calc for two vectors of points
if isinstance(p1, pd.DataFrame):
p1 = np.vstack((p1['y'].values, p1['x'].values))
p2 = np.vstack((p2['y'].values, p2['x'].values))
deltas = np.subtract(p1.T, p2.T)
angs = np.degrees(np.arctan2(deltas[:, 0], deltas[:, 1]))
negs = np.where(angs < 0)[0]
angs[negs] += 360
angs += 90
# angles = []
# frames = len(p1['x'])
# for idx in range(frames):
# angles.append(angle((p1.loc[idx]['x'], p1.loc[idx]['y']),
# (p2.loc[idx]['x'], p2.loc[idx]['y'])))
return angs
def calc_ang_velocity(orientation, fps: int=False):
"""
Given a vector of orientation (degrees) per frame, calculates the velocity as either degrees per frame
or degrees per second (if fps != False).
:param orientation: vector of angle values
:param fps: frame rate of video the orientation was extracted from
:return: angular velocity as either deg per sec or deg per frame.
"""
rad_ori = np.radians(orientation.values)
rad_ang_vel = np.insert(np.diff(np.unwrap(rad_ori)), 0, 0)
if not fps: # return and vel as degrees per frame
return np.degrees(rad_ang_vel)
else: # return and vel as degrees per sec
return np.degrees(np.multiply(rad_ang_vel, fps))
def calc_ang_acc(velocity):
""" calculates the angular acceleration given a angular velocity vector"""
return np.insert(np.diff(velocity), 0, 0)
def line_smoother(y, window_size=31, order=3, deriv=0, rate=1):
# Apply a Savitzy-Golay filter to smooth traces
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with values taken from the signal itself
try:
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + | np.abs(y[-half_window - 1:-1][::-1] - y[-1]) | numpy.abs |
import numpy as np
from integrator import *
from qoi import *
import parallel as par
def simSetUp(inpt):
Sim = {}
Ndof = int(inpt['Ndof'])
Timestep = float(inpt['Timestep'])
Tf = float(inpt['Tf'])
NSim = int(inpt['NSim'])
Sim['Simulation name'] = inpt['Simulation name']
Sim['Ndof'] = Ndof
Sim['Timestep'] = Timestep
Sim['Tf'] = Tf
Sim['NSim'] = NSim
Sim['Record solution'] = (inpt['Record solution']=="True")
# MPI parallelization
nSim_, startSim_ = par.partitionSim(NSim)
Sim['nSim_'] = nSim_
Sim['startSim_'] = startSim_
if par.nProc > 1:
Sim['reconstruct Sol'] = (inpt['reconstruct Sol']=="True")
Sim['reconstruct QOI'] = (inpt['reconstruct QOI']=="True")
# Post proc
Sim['Plot'] = (inpt['Plot']=="True")
Sim['Build CDF'] = (inpt['Build CDF']=="True")
if Sim['Build CDF']:
Sim['Plot CDF'] = (inpt['Plot CDF']=="True")
Sim['Build rare paths'] = (inpt['Build rare paths']=="True")
if Sim['Build rare paths']:
Sim['Levels'] = [float(lev) for lev in inpt['Levels'].split()]
Sim['Plot rare paths'] = (inpt['Plot rare paths']=="True")
if inpt['Simulation name'] == 'KS':
# scalars for ETDRK4
h = Timestep
Sim['Lx/pi'] = float(inpt['Lx/pi'])
k = np.transpose(np.conj(np.concatenate(( | np.arange(0, Ndof/2.0) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 23:20:13 2020
@author: <NAME>
This file contain some functions for picture processing
"""
import shutil
import sys
import os
import cv2
import numpy as np
import json
# import pyrealsense2 as rs
import time
import random
from matplotlib import pyplot as plt
import glob
def check_path(file_path):
"""
Check whether the file is existing
:param file_path:
:return:
"""
if not (os.path.exists(file_path)):
print('file is not existence')
sys.exit()
def show_image(img, time=5000):
"""
Show a img mat in normal way.
"""
cv2.namedWindow('Licence Img', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Licence Img', 1280, 768)
cv2.moveWindow('Licence Img', 300, 100)
cv2.imshow('Licence Img', img)
if time > 0:
cv2.waitKey(time)
cv2.destroyAllWindows()
def rgb_to_gray(img):
"""
Convert bgr image to q grey one, and return them.
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def binary_thresh_image(gray_img, thresh1=0, thresh2=255):
"""
Get binary img from a gray img
:param thresh1: low thresh
:param thresh2: high thresh
:param gray_img: gray mat
:return: thresh value; binary mat
"""
ret, binary_img = cv2.threshold(gray_img, thresh1, thresh2, cv2.THRESH_BINARY)
return binary_img
def auto_binary_thresh_image(gray_img):
"""
Get binary img from a gray img in mean index value thresh
:param gray_img: gray mat
:return: thresh value; binary mat
"""
max_index = float(gray_img.max())
min_index = float(gray_img.min())
thresh1 = (max_index + min_index) / 2
thresh2 = 255
ret, binary_img = cv2.threshold(gray_img, thresh1, thresh2, cv2.THRESH_BINARY)
return binary_img
def stretch(img):
"""
图像拉伸函数
"""
maxi = float(img.max())
mini = float(img.min())
for i in range(img.shape[0]):
for j in range(img.shape[1]):
img[i, j] = (255 / (maxi - mini) * img[i, j] - (255 * mini) / (maxi - mini))
return img
def horizon_stack_image(*args):
"""
stack array
:param args:
:return:
"""
img_mix = args[0]
for i in range(1, len(args)):
img_mix = np.hstack((img_mix, args[i]))
return img_mix
def vertical_stack_image(*args):
"""
stack array
:param args:
:return:
"""
img_mix = args[0]
for i in range(1, len(args)):
img_mix = | np.vstack((img_mix, args[i])) | numpy.vstack |
# aux.py
# auxiliary functions
# Copyright 2019 <NAME>
import numpy as np
import pandas as pd
# for stat
from scipy.sparse import coo_matrix
from scipy import stats
# for io
import csv
# for plot
import matplotlib as mpl
import matplotlib.pyplot as plt
# === ds: custom data structure
class Tray:
''' empty class, to emulate Matlab's struct '''
def __init__(self):
pass
def get_attr_keys(self):
dkey = self.__dict__.keys()
return dkey
# /
# === dm: data manipulation
# --- pandas DataFrame specific
def collect_df_rows_by_index(df, idx_input, drop=True):
# should extend for the bad-index case (NaN)
idx = idx_input.astype('int')
df_new = df.iloc[idx].reset_index(drop=drop)
return df_new
def convert_data_types(df, fields, type):
for myfield in fields:
myvalue = getattr(df, myfield).astype(type)
setattr(df, myfield, myvalue)
return df
def sort_and_reset_index(intab, columns, drop=True):
''' sort by columns and reset index '''
sorttab = intab.sort_values(columns)
outtab = sorttab.reset_index(drop=drop)
return outtab
# --- other
def find_equal(listlike, targ):
idx_hit = []
for m in range(len(listlike)):
if targ == listlike[m]:
idx_hit.append(m)
return idx_hit
def find_idx(testlist_bool):
# https://stackoverflow.com/questions/364621/how-to-get-items-position-in-a-list
myidx = [i for i,x in enumerate(testlist_bool) if x == 1]
return myidx
def findby(vlist, testlist_bool):
myidx_list = find_idx(testlist_bool)
val = [vlist[i] for i in myidx_list]
return val
def isin_lists(list, testlist):
y_array = np.isin(np.array(list), np.array(testlist))
y = y_array.tolist()
return y
def normalize_by(mat, axis):
mysum = np.sum(mat, axis=axis)
newmat = np.true_divide(mat, mysum)
return newmat
def center_by(mat, axis):
mymean = np.mean(mat, axis=axis)
newmat = mat - mymean
return newmat
# /
# === stat: reusable statistics
# --- counting & probability estimation
def count_with_weight(vec, wgt=None, *args):
# v_uniq, v_cnt = np.unique(vec, return_counts=True)
if wgt is None:
wgt = np.ones(np.size(vec))
v_uniq = np.unique(vec).tolist()
v_wgtcnt = []
for vu in v_uniq:
myidx = find_idx(isin_lists(vec, vu))
mywgtcnt = sum([wgt[i] for i in myidx])
v_wgtcnt.append(mywgtcnt)
return v_uniq, v_wgtcnt
def samp_prob1(vec, wgt=None, normalize=True):
''' sampled probability for one variable with discrete values '''
v_uniq, v_cnt = count_with_weight(vec, wgt)
cnt_mat = np.matrix(v_cnt).transpose()
if normalize:
cnt_mat = normalize_by(cnt_mat, axis=None) # single dimension
return cnt_mat, v_uniq
def samp_joint_prob(v1, v2, wgt=None, normalize=True):
''' sampled joint probability for two variables v1 and v2 '''
if not wgt:
wgt = np.ones(np.size(v1))
# use COO matrix
v1uniq, v1iinv = np.unique(v1, return_inverse=True) # renumber
v2uniq, v2iinv = | np.unique(v2, return_inverse=True) | numpy.unique |
# -*- coding: utf-8 -*-
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import time
import numpy as np
DTYPE = theano.config.floatX
class BaseLayer(object):
def __init__(
self,
n_visible, n_hidden,
bias_visible=None, bias_hidden=None,
W=None, WT = None, init_w_limit=None,
numpy_rng_seed=None, theano_rng_seed=None,
activation=None,
**kwargs
):
if not n_visible or not n_hidden:
raise AttributeError("BaseLayer init n_visible and n_hidden cannot be zero")
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng_seed is None:
numpy_rng = np.random.RandomState(int(time.time()))
else:
numpy_rng = np.random.RandomState(numpy_rng_seed)
self.numpy_rng = numpy_rng
if theano_rng_seed is None:
theano_rng_seed = numpy_rng.randint(2 ** 30)
self.theano_rng = RandomStreams(theano_rng_seed)
if W is None:
if init_w_limit is None:
init_w_limit = 4. * np.sqrt(6. / (n_visible + n_hidden))
self.W = theano.shared(
value=np.asarray(
numpy_rng.uniform(
low=-init_w_limit,
high=init_w_limit,
size=(n_visible, n_hidden)
),
dtype=DTYPE
),
name='W'
)
elif isinstance(W, list):
init_W = np.asarray(W, dtype=DTYPE)
if init_W.shape != (n_visible, n_hidden):
raise AttributeError("BaseLayer init shape of W is illegal: %r, n_visible=%r, n_hidden=%r" %
(init_W.shape, n_visible, n_hidden))
self.W = theano.shared(
value=init_W,
name='W',
borrow=True
)
elif isinstance(W, np.ndarray):
shape = W.shape
if shape != (n_visible, n_hidden):
raise AttributeError("BaseLayer init shape of W is illegal: %r, n_visible=%r, n_hidden=%r" %
(shape, n_visible, n_hidden))
self.W = theano.shared(W, name='W')
elif isinstance(W, theano.Variable):
self.W = W
else:
raise AttributeError("BaseLayer init type of W is illegal: %r" % type(W))
if WT is None:
if init_w_limit is None:
init_w_limit = 4. * np.sqrt(6. / (n_visible + n_hidden))
self.WT = theano.shared(
value=np.asarray(
numpy_rng.uniform(
low=-init_w_limit,
high=init_w_limit,
size=(n_hidden, n_visible)
),
dtype=DTYPE
),
name='WT'
)
elif isinstance(WT, list):
init_WT = np.asarray(WT, dtype=DTYPE)
if init_WT.shape != (n_hidden, n_visible):
raise AttributeError("BaseLayer init shape of WT is illegal: %r, n_visible=%r, n_hidden=%r" %
(init_WT.shape, n_hidden, n_visible))
self.WT = theano.shared(
value=init_WT,
name='WT',
borrow=True
)
elif isinstance(WT, np.ndarray):
shape = WT.shape
if shape != (n_visible, n_hidden):
raise AttributeError("BaseLayer init shape of WT is illegal: %r, n_visible=%r, n_hidden=%r" %
(shape, n_visible, n_hidden))
self.WT = theano.shared(WT, name='WT')
elif isinstance(WT, theano.Variable):
self.WT = WT
else:
raise AttributeError("BaseLayer init type of WT is illegal: %r" % type(WT))
if bias_visible is None:
self.bias_visible = theano.shared(
value=np.zeros(shape=n_visible, dtype=DTYPE),
name='b_vis',
borrow=True
)
elif isinstance(bias_visible, list):
init_bias_visible = np.asarray(bias_visible, dtype=DTYPE)
if init_bias_visible.shape != (n_visible,):
raise AttributeError("BaseLayer init shape of bias_visible is illegal: %r, n_visible=%r" %
(init_bias_visible.shape, n_visible))
self.bias_visible = theano.shared(value=init_bias_visible, name='b_vis')
elif isinstance(bias_visible, theano.Variable):
self.bias_visible = bias_visible
else:
raise AttributeError("BaseLayer init type of bias_visible is illegal: %r" % type(bias_visible))
if bias_hidden is None:
self.bias_hidden = theano.shared(
value=np.zeros(shape=n_hidden, dtype=DTYPE),
name='b_hid',
borrow=True
)
elif isinstance(bias_hidden, list):
init_bias_hidden = | np.asarray(bias_hidden, dtype=DTYPE) | numpy.asarray |
"""
Bandwidth optimization methods
"""
__author__ = "<NAME>"
import numpy as np
def golden_section(a, c, delta, function, tol, max_iter, int_score=False):
"""
Golden section search routine
Method: p212, 9.6.4
<NAME>., <NAME>., & <NAME>. (2002).
Geographically weighted regression: the analysis of spatially varying relationships.
Parameters
----------
a : float
initial max search section value
b : float
initial min search section value
delta : float
constant used to determine width of search sections
function : function
obejective function to be evaluated at different section
values
int_score : boolean
False for float score, True for integer score
tol : float
tolerance used to determine convergence
max_iter : integer
maximum iterations if no convergence to tolerance
Returns
-------
opt_val : float
optimal value
opt_score : kernel
optimal score
output : list of tuples
searching history
"""
b = a + delta * np.abs(c-a)
d = c - delta * np.abs(c-a)
score = 0.0
diff = 1.0e9
iters = 0
output = []
dict = {}
while np.abs(diff) > tol and iters < max_iter:
iters += 1
if int_score:
b = np.round(b)
d = np.round(d)
if b in dict:
score_b = dict[b]
else:
score_b = function(b)
dict[b] = score_b
if d in dict:
score_d = dict[d]
else:
score_d = function(d)
dict[d] = score_d
if score_b <= score_d:
opt_val = b
opt_score = score_b
c = d
d = b
b = a + delta * | np.abs(c-a) | numpy.abs |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 14:24:20 2021
@author: bmoseley
"""
# This module defines various active schedulers, which are iterables which allow us
# to define which FBPINN subdomains are active/fixed/inactive at each training step
# This module is used by constants.py when defining FBPINN / PINN problems
import itertools
import numpy as np
class _ActiveScheduler:
"Helper class for scheduling updates to the active array"
name = None
def __init__(self, N_STEPS, D):
self.N_STEPS = N_STEPS
self.nd = D.nd
self.nm = D.nm
self.xx = D.xx.copy()
def __len__(self):
return self.N_STEPS
def __iter__(self):
# returns None if active map not to be changed, otherwise active map
raise NotImplementedError
# ALL ACTIVE SCHEDULER
class AllActiveSchedulerND(_ActiveScheduler):
"All models are active all of the time"
name = "All"
def __iter__(self):
for i in range(self.N_STEPS):
if i == 0: yield np.ones(self.nm, dtype=int)# (nm)
else: yield None
# POINT-BASED ACTIVE SCHEDULERS
class _SubspacePointActiveSchedulerND(_ActiveScheduler):
"Slowly expands radially outwards from a point in a subspace of the domain (in x units)"
def __init__(self, N_STEPS, D, point, iaxes):
super().__init__(N_STEPS, D)
point = np.array(point)# point in constrained axes
iaxes = list(iaxes)# unconstrained axes
# validation
if point.ndim != 1: raise Exception("ERROR: point ndim !=1")
if len(point) > self.nd: raise Exception("ERROR: len point > self.nd")
if len(iaxes) + len(point) != self.nd: raise Exception("ERROR: len iaxes + len point != nd")
self.point = point
self.iaxes = iaxes
def _get_radii(self, point, xx):
"Get the radii from a point in a subspace of xx"
# get subspace dimensions
nd, nm = xx.shape[0], tuple(s-1 for s in xx.shape[1:])
assert len(nm) == nd
assert len(point) == nd# make sure they match with point
# get xmin, xmax of each model
xmins = xx[(slice(None),)+(slice(None,-1),)*nd]# (nd, nm) self.xx (nd,nm+1)
xmaxs = xx[(slice(None),)+(slice(1, None),)*nd]# (nd, nm)
# whether point is inside model
point = point.copy().reshape((nd,)+(1,)*nd)# (nd, (1,)*nd)
c_inside = (point >= xmins) & (point < xmaxs)# point is broadcast
c_inside = np.product(c_inside, axis=0).astype(bool)# (nm) must be true across all dims
# get bounding corners of each model
x = np.stack([xmins, xmaxs], axis=0)# (2, nd, nm)
bb = np.zeros((2**nd, nd)+nm)# (2**nd, nd, nm)
for ic,offsets in enumerate(itertools.product(*([[0,1]]*nd))):# for each corner
for i,o in enumerate(offsets):# for each dimension
bb[(ic,i)+(slice(None),)*nd] = x[(o,i)+(slice(None),)*nd]
# distance from each corner to point
point = point.copy().reshape((1, nd)+(1,)*nd)# (1, nd, (1,)*nd)
r = np.sqrt(np.sum((bb - point)**2, axis=1))# (2**nd, nm) point is broadcast
rmin, rmax = | np.min(r, axis=0) | numpy.min |
import numpy as np
import pybullet as p
from .env import AssistiveEnv
from .agents import furniture
from .agents.furniture import Furniture
class FeedingNewEnv(AssistiveEnv):
def __init__(self, robot, human):
super(FeedingNewEnv, self).__init__(robot=robot, human=human, task='feeding', obs_robot_len=(18 + len(robot.controllable_joint_indices) - (len(robot.wheel_joint_indices) if robot.mobile else 0)), obs_human_len=(19 + len(human.controllable_joint_indices)))
def step(self, action):
if self.human.controllable:
action = np.concatenate([action['robot'], action['human']])
self.take_step(action)
obs = self._get_obs()
reward_food, food_mouth_velocities, food_hit_human_reward = self.get_food_rewards()
# Get human preferences
end_effector_velocity = np.linalg.norm(self.robot.get_velocity(self.robot.right_end_effector))
preferences_score = self.human_preferences(end_effector_velocity=end_effector_velocity, total_force_on_human=self.total_force_on_human, tool_force_at_target=self.spoon_force_on_human, food_hit_human_reward=food_hit_human_reward, food_mouth_velocities=food_mouth_velocities)
spoon_pos, spoon_orient = self.tool.get_base_pos_orient()
reward_distance_mouth_target = - | np.linalg.norm(self.target_pos - spoon_pos) | numpy.linalg.norm |
#!/usr/bin/python3
#################################################################################
# Fedorenko
#
# Copyright (C) 2021, <NAME>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################
# Import all necessary modules
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
############################### GLOBAL VARIABLES ################################
# Choose the grid sizes as indices from below list so that there are 2^n + 2 grid points
# Size index: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Grid sizes: 1 2 4 8 16 32 64 128 256 512 1024 2048 4096 8192 16384
sInd = np.array([6, 6])
# Flag to switch between uniform and non-uniform grid with tan-hyp stretching
nuFlag = True
# Stretching parameter for tangent-hyperbolic grid
beta = 1.0
# Depth of each V-cycle in multigrid
VDepth = min(sInd) - 1
# Number of V-cycles to be computed
vcCnt = 10
# Number of iterations during pre-smoothing
preSm = 3
# Number of iterations during post-smoothing
pstSm = 3
# Tolerance value for iterative solver
tolerance = 1.0e-6
# N should be of the form 2^n
# Then there will be 2^n + 2 points in total, including 2 ghost points
sLst = [2**x for x in range(12)]
# Get array of grid sizes are tuples corresponding to each level of V-Cycle
N = [(sLst[x[0]], sLst[x[1]]) for x in [sInd - y for y in range(VDepth + 1)]]
# Maximum number of iterations while solving at coarsest level
maxCount = 10*N[-1][0]*N[-1][1]
# Integer specifying the level of V-cycle at any point while solving
vLev = 0
# Flag to determine if non-zero homogenous BC has to be applied or not
zeroBC = False
##################################### MAIN ######################################
def main():
global N
global pData
global rData, sData, iTemp
nList = np.array(N)
rData = [np.zeros(tuple(x)) for x in nList]
pData = [np.zeros(tuple(x)) for x in nList + 2]
sData = [np.zeros_like(x) for x in pData]
iTemp = [np.zeros_like(x) for x in rData]
initGrid()
initDirichlet()
mgRHS = np.ones_like(pData[0])
# Solve
t1 = datetime.now()
mgLHS = multigrid(mgRHS)
t2 = datetime.now()
print("Time taken to solve equation: ", t2 - t1)
plotResult(0)
############################## MULTI-GRID SOLVER ###############################
# The root function of MG-solver. And H is the RHS
def multigrid(H):
global N
global vcCnt
global rConv
global pAnlt
global pData, rData
rData[0] = H[1:-1, 1:-1]
chMat = np.zeros(N[0])
rConv = | np.zeros(vcCnt) | numpy.zeros |
"""
This file contains a class to locate and assign coordinates onto a stitched frame
This class will keep track of all points and manage them, for each frame that's
passed in with a point, we will extract the GPS coordinate from it.
"""
from video_process.image import StitchImage
import tkinter
from tkinter import ttk, filedialog
from PIL import Image, ImageTk
import cv2
import pandas as pd
import numpy as np
# from geopy.distance import distance,geodesic, lonlat
class Locate():
def __init__(self, parent, video_source):
"""
The parent window is the program that originally calls it.
"""
self.window = parent
self.window_width = 1080
self.window_height = 720
# assigned records where the GPS data is linked to the pixel data
self.assigned = []
# referenced records where a particular frame find's its place on the working image
self.referenced = []
self.referenced_tracked = []
# data is the GPS coordinate data
self.gps_data = None
self.tracked_data = None
# video source is the video which the interface works on
self.video_source = video_source
self.video_width = 1920
self.video_height = 1440
self.vid_length = None
#image_processor is the image processing layer of this interface.
self.img_processor = StitchImage()
#process image is the image which is to be worked on. This is the largest stitched image.
self.process_image = None
#step is how many frames exists between stitched frames
self.step = 100
#stitched frames is the total number of frames per stitched image
self.stitched_frames=10
self.view_frame = 11
#delay is the milisend delay between updating UI
self.delay = 15
def start(self, finish_frame):
"""
You might think I know... but its just a collection of code to start things...
"""
#counter for each stitched set
stitched_number = 1
self.gps_data = self.load_data("GPS COORDINATES")
self.tracked_data = self.load_data("TRACKED PIXELS")
cap = cv2.VideoCapture(self.video_source)
self.vid_length = cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.vid_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.vid_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
#stop at the end of the video
finish_frame = self.vid_length/2
#iterate in large steps where each step is the range of frames stitched. Eg. stitch(frames[100,200,300]) each iteration is 300
for num_stitch in range(0, int(finish_frame), int(self.step*self.stitched_frames)):
# print(int(self.step*self.stitched_frames))
# print(num_stitch)
#if limit is not passed
if num_stitch + self.step*self.stitched_frames - finish_frame < 0:
#create a stitched image
scan = self.stitch_from_video(self.video_source, num_stitch)
#reference each frame in the video that exists in the range stitched Eg. every frame in range 100-300
print("Finding reference...")
ret, frame = cap.read()
for frames in range(1 ,int((self.step*self.stitched_frames)) ):
#read each frame and record their referenced locations. These are the corners of the frame
ret, frame = cap.read()
if ret:
if frames % 10 == 0:
print(str(frames) + "/" + str(self.step*self.stitched_frames*stitched_number))
try:
points, matrix = self.reference(frame, scan)
self.referenced.append(points)
except:
print("Could not reference frame " + str(frames))
try:
self.referenced_tracked.append(self.map_referenced(frames, matrix))
except:
print("Data does not exist for this frame index")
print("Number of coordinates: "+str(len(self.referenced)))
stitched_number +=1
cv2.imwrite("./output/stitched/stitched"+ str(stitched_number) +".jpg", scan)
# reference_data = pd.DataFrame(self.referenced, columns=['frame','Top_Left', 'Top_Right'
# ,'Bottom_Right', 'Bottom_Left'])
# reference_data.to_csv("./output/csv/" + "Reference" + ".csv")
self.process_image = scan
self.setup_dropdown(self.window, self.gps_data)
self.setup_frame_bar()
#create a frame inside the window which will contain the canvas
frame=tkinter.Frame(self.window,width=self.window_width,height=self.window_height)
frame.pack(side=tkinter.LEFT)
self.canvas=tkinter.Canvas(frame,width=self.window_width,height=self.window_height,cursor="crosshair")
self.canvas.pack(side=tkinter.LEFT, expand=True,fill=tkinter.BOTH)
#set the image inside of the region
resized = cv2.resize(self.process_image,(self.window_width, self.window_height),cv2.INTER_CUBIC)
self.photo = ImageTk.PhotoImage(image=Image.fromarray(resized))
#0,0 is the image location we anchor, anchor is how. If not anchored to nw, it sets the center of the image to top left corner
self.canvas.create_image(0,0,image=self.photo, anchor="nw")
self.canvas.bind("<Button-1>", self.assign_coord)
self.update()
self.window.mainloop()
def update(self):
self.id_coordinates.config(text=self.get_data_by_id(int(self.tkvar.get())))
# print(self.assigned[0][0])
display = self.process_image.copy()
# gray = cv2.cvtColor(display, cv2.COLOR_BGR2GRAY)
current_coord = self.get_data_by_id(int(self.tkvar.get()))
for points in range(len(self.assigned)):
colour = (255,0,0)
if current_coord == self.assigned[points][1]:
colour = (0,255,0)
cv2.circle(display, tuple(self.assigned[points][0]), 10, colour, -1, cv2.LINE_AA)
try:
cv2.polylines(display,self.referenced[self.view_frame], True, (0, 0, 255),5, cv2.LINE_AA)
except:
print("no references exist for that frame")
try:
cv2.circle(display, tuple(self.referenced_tracked[self.view_frame]), 10, (0,191,255), -1, cv2.LINE_AA)
except:
print("Cannot place points on frame")
#if we can calculate distance
if len(self.assigned) > 1:
# try:
for i in range(len(self.assigned)):
pix_coord1 = self.assigned[i][0]
real_coord1 = self.assigned[i][1]
for j in range(len(self.assigned)):
#we dont want to measure distance to self
if i != j:
try:
pix_coord2 = self.assigned[j][0]
real_coord2 = self.assigned[j][1]
pixel_dist = self.calculate_pixel_distance(pix_coord1,pix_coord2)
real_dist = self.calculate_gps_distance(real_coord1, real_coord2)
dist_ratio = self.get_distance_ratio(pixel_dist, real_dist)
print("Pixels:" + str(pixel_dist) + ", Real:" + str(real_dist)+ "m, ",end="")
cv2.line(display, pix_coord1, pix_coord2, (255,215,0), 5)
# for k in range(len(self.referenced_tracked)):
#get the distance from each point (once)?
calculated_dist = self.get_real_distance(self.referenced_tracked[self.view_frame],pix_coord1,dist_ratio)
#this line, for every point, is from the current frame's tracked coordinate
bearing = self.calculate_bearing(pix_coord1, self.referenced_tracked[self.view_frame])
cv2.line(display, self.referenced_tracked[self.view_frame], pix_coord1, (85,240,30), 5)
# print(real_coord1)
new_point = self.get_real_coordinate(real_coord1, calculated_dist, bearing)
print(new_point)
except:
print("")
# cv2.text(display, "Distance: " + str(real_dist),
# pix_coord1, 1, (255,255,255)
# )
# except:
# print("cannot calculate distance between 2 points")
resized = cv2.resize(display,(self.window_width, self.window_height),cv2.INTER_CUBIC)
self.photo = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.canvas.create_image(0,0,image=self.photo, anchor="nw")
self.window.after(self.delay, self.update)
def format_coordinate(self, type):
"""
converts format ?Needed?
"""
pass
def load_data(self, title):
"""
loads_coords from csv.
"""
print("loading file")
#if there is no passed variable, open dialog to select
file_types = [('Microsoft Excel', '*.csv'), ('All files', '*')]
dlg = filedialog.Open(filetypes=file_types)
file = dlg.show()
print(file)
#if file is selected, read the data
if file != '':
data = pd.read_csv(file)
print(data)
return data
def assign_coord(self, event):
"""
Uses the ID of the coordinates to assign them on the image
This function should be called by clicking.
"""
h,w = self.process_image.shape[:2]
ratio_x = w / self.window_width
ratio_y = h / self.window_height
#calculate the position to the ratio
event.x = round(event.x * ratio_x)
event.y = round(event.y * ratio_y)
if len(self.assigned) == 0:
self.assigned.append( ( (event.x, event.y), (self.get_data_by_id(int(self.tkvar.get()))) ) )
else:
found = False
current_coord = self.get_data_by_id(int(self.tkvar.get()))
for points in range(len(self.assigned)):
if current_coord == self.assigned[points][1]:
found = True
self.assigned[points] = ( ((event.x, event.y), current_coord) )
break
if not found:
self.assigned.append( ( (event.x, event.y), (self.get_data_by_id(int(self.tkvar.get()))) ) )
return event.x, event.y
def remove_assigned_coord(self):
"""
removes the location of an assigned coordinate
"""
current_coord = self.get_data_by_id(int(self.tkvar.get()))
for points in range(len(self.assigned)):
if current_coord == self.assigned[points][1]:
self.assigned.pop(points)
break
def convert_referenced(self, point):
top_left = point[0][0][0]
top_right = point[0][1][0]
bottom_right = point[0][2][0]
bottom_left = point[0][3][0]
return [top_left, top_right, bottom_right, bottom_left]
def setup_dropdown(self, window, data):
"""
This dropdown is what maps the data to the user interface to select
GPS coordinate ID's and assigns the coordinates to pixel locations
"""
print("Setting up dropdown")
# Create a Tkinter variable
self.tkvar = tkinter.StringVar(window)
all_id = self.gps_data.loc[:,'ID'].tolist()
#duplicate first index
all_id.insert(0, all_id[0])
self.tkvar.set(all_id[0])
#assign all id's to the option menu
popup_menu = ttk.OptionMenu(window, self.tkvar, *all_id)
popup_lable = tkinter.Label(window, text="GPS Coordinates")
self.id_coordinates = tkinter.Label(window, text=self.get_data_by_id(int(self.tkvar.get())))
remove = tkinter.Button(window, text="Remove", command=self.remove_assigned_coord)
#set the option menu
popup_lable.pack(side=tkinter.TOP)
popup_menu.pack(side=tkinter.TOP)
self.id_coordinates.pack(side=tkinter.TOP)
remove.pack(side=tkinter.TOP)
def setup_frame_bar(self):
self.frame_bar = ttk.Scale(self.window, from_=0, to=self.vid_length - 1, command=self.set_frame)
self.frame_bar.config(length=self.window_width)
self.frame_bar.pack(side=tkinter.BOTTOM)
def set_frame(self, value):
self.view_frame = int(float(value))
def get_data_by_id(self, id):
id_type = self.gps_data.ID.dtype
#get the row which the id matches
id_loc = self.gps_data.loc[self.gps_data['ID'] == id]
# print(id_loc)
#get the coordinates of that id
x_coord = id_loc.iloc[0]['X']
y_coord = id_loc.iloc[0]['Y']
return x_coord, y_coord
def map_referenced(self, frame, matrix):
"""
This function will map the given points through the reference onto
the stitched frame
"""
try:
# print(frame)
frame = int(frame)
#get tracked coordinates for that frame
frame_index = self.tracked_data.loc[self.tracked_data['frame'] == frame]
#with the frame index, we find the coordinates
x_coord = frame_index.iloc[0]['pos_x']
y_coord = frame_index.iloc[0]['pos_y']
point = | np.array([x_coord, y_coord], dtype=np.float32) | numpy.array |
import json
import numpy as np
from . import parse
def descriptors(materials, embedding_file, operations=["wmean","wstd"]):
"""
he
"""
featuriser = Featuriser(embedding_file)
statistics = Statistics(operations)
descriptors_list = []
for material in materials:
elements, weights = parse.parse_composition(material)
atom_features = atom_descriptors(elements, featuriser)
material_features = material_descriptors(atom_features, weights, statistics)
descriptors_list.append(material_features)
features = np.vstack(descriptors_list)
return features
def atom_descriptors(elements, featuriser):
"""
get feature vectors for the atoms
"""
atom_fea = np.vstack([featuriser.get_fea(element) for element in elements])
return atom_fea
class Featuriser(object):
"""
Lookup dict object
"""
def __init__(self, embedding_file):
with open(embedding_file) as f:
self.embedding = json.load(f)
self.allowed_types = set(self.embedding.keys())
def get_fea(self, key):
assert key in self.allowed_types, "{} wasn't allowed".format(key)
return self.embedding[key]
def get_dict(self):
return self.embedding
def material_descriptors(features, weights, statistics):
"""
get feature vectors for the materials
"""
material_fea = statistics.dispatch(features, weights)
return material_fea
class Statistics(object):
"""
Statistics object
"""
def __init__(self, operations):
self.operations = operations
def dispatch(self, features, weights):
data_list = []
for operation in self.operations:
method_name = "eval_" + str(operation)
method = getattr(self, method_name)
stat = method(features, weights)
data_list.append(stat)
data = np.hstack(data_list)
return data
def eval_wmean(self, features, weights):
return np.average(features, axis=0, weights=weights)
def eval_wstd(self, features, weights):
wmean = self.eval_wmean(features, weights)
return np.sqrt(np.average((features - wmean) ** 2, axis=0, weights=weights))
def eval_geometric(self, features, weights):
return np.exp(np.sum(weights*np.log(features), axis=0)/np.sum(weights, axis=0))
def eval_harmonic(self, features, weights):
return np.sum(weights, axis=0)/ | np.sum(weights/features, axis=0) | numpy.sum |
from .ngram_vectorizer import ngrams_of
from .preprocessing import (
prune_token_dictionary,
preprocess_token_sequences,
construct_token_dictionary_and_frequency,
construct_document_frequency,
)
from sklearn.utils.validation import check_is_fitted
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import randomized_svd, svd_flip
from collections.abc import Iterable
from scipy.sparse.linalg import svds
from .DS_NMF import DS_NMF
from .transformers.info_weight import InformationWeightTransformer
import vectorizers.distances as distances
from .utils import (
validate_homogeneous_token_types,
flatten,
str_to_bytes,
pair_to_tuple,
make_tuple_converter,
dirichlet_process_normalize,
dp_normalize_vector,
l1_normalize_vector,
)
from .coo_utils import (
coo_append,
coo_sum_duplicates,
CooArray,
merge_all_sum_duplicates,
set_array_size,
)
import numpy as np
import numba
import dask
import scipy.sparse
from ._window_kernels import (
_KERNEL_FUNCTIONS,
_WINDOW_FUNCTIONS,
window_at_index,
update_kernel,
)
MOCK_DICT = numba.typed.Dict()
MOCK_DICT[(-1, -1)] = -1
@numba.njit(nogil=True)
def build_multi_skip_ngrams(
token_sequences,
window_size_array,
window_reversals,
kernel_array,
kernel_args,
mix_weights,
normalize_windows,
n_unique_tokens,
array_lengths,
ngram_dictionary=MOCK_DICT,
ngram_size=1,
array_to_tuple=pair_to_tuple,
):
"""Generate a matrix of (weighted) counts of co-occurrences of tokens within
windows in a set of sequences of tokens. Each sequence in the collection of
sequences provides an effective boundary over which skip-grams may not pass
(such as sentence boundaries in an NLP context). This is done for a collection
of different window and kernel types simultaneously.
Parameters
----------
token_sequences: Iterable of Iterables
The collection of token sequences to generate skip-gram data for.
n_unique_tokens: int
The number of unique tokens in the token_dictionary.
window_size_array: numpy.ndarray(float, size = (n_windows, n_unique_tokens))
A collection of window sizes per vocabulary index per window function
window_reversals: numpy.array(bool, size = (n_windows,))
Array indicating whether the window is after or not.
kernel_array: numpy.ndarray(float, size = (n_windows, max_window_radius))
A collection of kernel values per window index per window funciton
kernel_args: tuple of tuples
Arguments to pass through to the kernel functions per function
mix_weights: numpy.array(bool, size = (n_windows,))
The scalars values used to combine the values of the kernel functions
normalize_windows: bool
Indicates whether or nor to L_1 normalize the kernel values per window occurrence
array_lengths: numpy.array(int, size = (n_windows,))
The lengths of the arrays per window used to the store the coo matrix triples.
ngram_dictionary: dict (optional)
The dictionary from tuples of token indices to an n_gram index
ngram_size: int (optional, default = 1)
The size of ngrams to encode token cooccurences of.
array_to_tuple: numba.jitted callable (optional)
Function that casts arrays of fixed length to tuples
Returns
-------
cooccurrence_matrix: CooArray
Weight counts of values (kernel weighted counts) that token_head[i] cooccurred with token_tail[i]
"""
n_windows = window_size_array.shape[0]
array_mul = n_windows * n_unique_tokens + 1
kernel_masks = [ker[0] for ker in kernel_args]
kernel_normalize = [ker[1] for ker in kernel_args]
window_reversal_const = np.zeros(len(window_reversals)).astype(np.int32)
window_reversal_const[window_reversals] = 1
coo_data = [
CooArray(
np.zeros(array_lengths[i], dtype=np.int32),
np.zeros(array_lengths[i], dtype=np.int32),
np.zeros(array_lengths[i], dtype=np.float32),
| np.zeros(array_lengths[i], dtype=np.int64) | numpy.zeros |
import os, sys, wave, re
import copy
import math
import numpy as np
import pickle, string, csv
from emotion_inferring.dataset.iemocap_utils import *
from gensim.models.keyedvectors import KeyedVectors
emotions_used = np.array(['ang', 'exc', 'hap', 'neu', 'sad'])
sessions = ['Session1', 'Session2', 'Session3', 'Session4', 'Session5']
def read_iemocap_mocap(data_path, word2vec_path, renew=False):
file_path = data_path + '/../' + 'data_collected.pickle'
fea_folder_path = data_path + '/../' + 'audio_features_ComParE2016/'
if not os.path.isfile(file_path) or renew:
Word2Vec = KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
data = []
ids = {}
for session in sessions:
path_to_wav = data_path + session + '/dialog/wav/'
path_to_emotions = data_path + session + '/dialog/EmoEvaluation/'
path_to_transcriptions = data_path + session + '/dialog/transcriptions/'
path_to_features = fea_folder_path + session + '/'
files2 = os.listdir(path_to_wav)
files = []
for f in files2:
if f.endswith(".wav"):
if f[0] == '.':
files.append(f[2:-4])
else:
files.append(f[:-4])
for f in files:
print('Processing' + f + ' ...')
wav = get_audio(path_to_wav, f + '.wav')
with open(path_to_features + f + '.csv', newline='') as fea_file:
reader = csv.reader(fea_file, delimiter=';')
first_line = True
features = []
for row in reader:
if first_line:
first_line = False
continue
features.append(np.array(row[1:], dtype=np.float))
transcriptions = get_transcriptions(path_to_transcriptions, f + '.txt')
emotions = get_emotions(path_to_emotions, f + '.txt')
sample = split_wav(wav, features, emotions)
for ie, e in enumerate(emotions):
e['signal'] = sample[ie]['left']
e['acoustic_features'] = sample[ie]['acoustic_features']
e.pop("left", None)
e.pop("right", None)
transcriptions_list = re.split(r' ', transcriptions[e['id']])
transcriptions_emb = []
for word in transcriptions_list:
word = ''.join(filter(str.isalpha, word))
if len(word) < 1:
continue
try:
transcriptions_emb.append( | np.array(Word2Vec[word]) | numpy.array |
from pathlib import Path
import os , re
import datetime as dt
import numpy as np
import xarray as xr
from io import StringIO
def is_valid_cptv10(da, assertmissing=True, assert_units=True):
valid_dims = ['T', 'X', 'Y', 'Mode', 'index', 'C']
valid_coords = ['T', 'Ti', 'Tf', 'S', 'X', 'Y', 'Mode', 'index', 'C']
assert type(da) == xr.DataArray, "CPTv10 only deals with data arrays, not datasets"
assert len(list(da.dims)) >= 2 and len(list(da.dims)) <= 4, 'CPTv10 can only have between 2-4 dimensions'
for dim in da.dims:
assert dim in valid_dims, 'Invalid dim for a CPTv10: {}'.format(dim)
for coord in da.coords:
assert coord in valid_coords, 'Invalid coord for a CPTv10: {}'.format(coord)
for dim in da.dims:
assert dim in da.coords, 'Each dim on a CPTv10 must have corresponding coordinates'
assert len(da.coords[dim].values) == da.shape[list(da.dims).index(dim)], 'Each dim on a CPTv10 must have exactly one coordinate per index along that dimension'
if 'T' in da.dims:
for dim in ['Ti', 'Tf', 'S']:
if dim in da.coords:
assert len(da.coords[dim].values) == da.shape[list(da.dims).index('T')], 'If the CPTv10 has optional Time coordinates Ti, Tf, or S, they must be indexing the T dimension'
for dim in ['Ti', 'Tf', 'S']:
if dim in da.dims:
assert 'T' in da.dims, "if the optional time coordinates are present on the CPTv10, the required time coord must also be"
if 'Ti' in da.coords:
assert 'Tf' in da.coords, 'Cannot have one optional time coordinate and not the other. found Ti but not Tf. except for S'
if 'Tf' in da.coords:
assert 'Ti' in da.coords, 'Cannot have one optional time coordinate and not the other. found Tf but not Ti. except for S'
if assertmissing:
assert 'missing' in da.attrs.keys(), "CPTv10 is required to have a 'missing' attribute indicating the 'missing_value' value which replaces NaNs in CPT"
assert not np.isnan(float(da.attrs['missing'])), "CPTv10 Missing Value cannot be NaN"
if assert_units:
assert 'units' in da.attrs.keys(), 'CPTv10 is required to have a "units" attribute'
def cpt_headers(header):
m = re.compile("(?P<tag>cpt:.*?|cf:.*?)=(?P<value>.*?,|.*$)")
matches = m.findall(header)
return len(matches), { i.split(':')[1]: j.replace(',', '') for i,j in matches }
def open_cptdataset(filename):
assert Path(filename).absolute().is_file(), 'Cannot find {}'.format(Path(filename).absolute())
with open(str(Path(filename).absolute()), 'r') as f:
content1 = f.read()
content = [line.strip() for line in content1.split("\n") if 'xmlns' not in line]
xmlnns_lines = [line.strip() for line in content1.split("\n") if 'xmlns' in line ]
assert 'xmlns:cpt=http://iri.columbia.edu/CPT/v10/' in ' '.join(xmlnns_lines), 'CPT XML Namespace: {} Not detected'.format('xmlns:cpt=http://iri.columbia.edu/CPT/v10/')
headers = [(linenum, *cpt_headers(line)) if ',' in line or ('=' in line and 'ncats' not in line and 'nfields' not in line) else ( linenum, line ) for linenum, line in enumerate(content) if 'cpt:' in line ]
attrs, data_vars = {}, {}
for i, header in enumerate(headers):
if len( header) == 3: # we are only looking at the CPT headers that preceed a data block
attrs_at_row = { k: header[2][k] for k in header[2].keys() if k not in ['T', 'S'] }
attrs.update(attrs_at_row)
array = np.genfromtxt( StringIO('\n'.join(content[header[0]+2:header[0]+2+ int(attrs['nrow'])])), delimiter='\t', dtype=str)
columns = np.genfromtxt(StringIO(content[header[0]+1]), delimiter='\t', dtype=str)
try:
columns = columns.astype(float)
except:
try:
columns = np.asarray([ read_cpt_date(ii) for ii in columns])
except:
pass
columns = np.expand_dims(columns, 0) if len(columns.shape) < 1 else np.squeeze(columns )
if len(array.shape) < 2:
array = array.reshape(1, -1)
rows = np.squeeze(array[:, 0])
rows = | np.expand_dims(rows, 0) | numpy.expand_dims |
# %%
import os
import numpy as np
import pandas as pd
from gurobipy import GRB, Model, quicksum
from matplotlib import pyplot as plt
from plotly import express as px
from thermo.correlation import expected_rand_obj_val, rand_obj_val_avr
from thermo.data import dropna, load_gaultois, load_screen, train_test_split
from thermo.evaluate import filter_low_risk_high_ret, plot_output
from thermo.rf import RandomForestRegressor
from thermo.utils import ROOT
from thermo.utils.amm import MatPipe, featurize, fit_pred_pipe
DIR = ROOT + "/results/screen/amm+rf/"
os.makedirs(DIR, exist_ok=True)
# %%
magpie_features, gaultois_df = load_gaultois(target_cols=["formula", "zT", "T"])
screen_df, _ = load_screen()
for df in [gaultois_df, screen_df]:
df.rename(columns={"formula": "composition"}, inplace=True)
# %%
# Form Cartesian product between screen features and the 4 temperatures ([300, 400, 700,
# 1000] Kelvin) found in Gaultois' database. We'll predict each material at all 4 temps.
# Note: None of the composition are predicted to achieve high zT at 300, 400 Kelvin.
# Remove those to cut computation time in half.
temps = (700, 1000)
temps_col = np.array(temps).repeat(len(screen_df))
screen_df = screen_df.loc[screen_df.index.repeat(len(temps))]
screen_df.insert(0, "T", temps_col)
# %%
mat_pipe_zT, zT_pred = fit_pred_pipe(gaultois_df, screen_df, "zT")
# %%
mat_pipe_zT = MatPipe.save(DIR + "mat.pipe")
# %%
mat_pipe_zT = MatPipe.load(DIR + "mat.pipe")
# %%
amm_train_features = featurize(mat_pipe_zT, gaultois_df[["T", "composition"]])
amm_screen_features = featurize(mat_pipe_zT, screen_df[["T", "composition"]])
# %%
# add composition column for duplicate detection so we save features for
# every material only once
amm_screen_features["composition"] = screen_df.composition
amm_screen_features.drop_duplicates(subset=["composition"]).to_csv(
DIR + "amm_screen_features.csv", float_format="%g", index=False
)
amm_train_features.to_csv(
DIR + "amm_train_features.csv", float_format="%g", index=False
)
# %%
amm_train_features = pd.read_csv(DIR + "amm_train_features.csv")
amm_screen_features = pd.read_csv(DIR + "amm_screen_features.csv")
del amm_screen_features["composition"]
# add temperature column to AMM features
amm_screen_features = amm_screen_features.loc[
amm_screen_features.index.repeat(len(temps))
]
amm_screen_features.insert(0, "T", temps_col)
# %% [markdown]
# # Check AMM+RF performance on Gaultois data
# Running cells in this section shows automatminer (AMM) features (which are just a
# subset of less correlated MagPie features) performs about the same as the complete
# MagPie set in accuracy but slightly better in uncertainty.
# %%
zT_series, magpie_features, check_features = dropna(
gaultois_df.zT, magpie_features, amm_train_features
)
[X_tr_amm, X_tr_magpie, y_tr], [X_test_amm, X_test_magpie, y_test] = train_test_split(
check_features, magpie_features, zT_series
)
# %%
amm_rf_zT = RandomForestRegressor()
amm_rf_zT.fit(X_tr_amm, y_tr)
amm_check_pred, amm_check_var = amm_rf_zT.predict(X_test_amm)
plot_output(y_test.values, amm_check_pred, amm_check_var)
# %%
magpie_rf_zT = RandomForestRegressor()
magpie_rf_zT.fit(X_tr_magpie, y_tr)
magpie_check_pred, magpie_check_var = magpie_rf_zT.predict(X_test_magpie)
plot_output(y_test.values, magpie_check_pred, magpie_check_var)
# %% [markdown]
# # Train AMM+RF on entire Gaultois data, then screen ICSD+COD
# %%
rf_zT = RandomForestRegressor()
rf_zT.fit(amm_train_features.iloc[gaultois_df.dropna().index], gaultois_df.zT.dropna())
zT_pred, zT_var = rf_zT.predict(amm_screen_features)
screen_df["zT_pred"] = zT_pred
screen_df["zT_var"] = zT_var
# %% [markdown]
# # Coarse triaging
# %%
# Save to CSV the 20 materials predicted to have the highest zT with no concern for
# estimated uncertainty. Baseline comparison to check if uncertainty estimation reduces
# the false positive rate.
screen_df.sort_values("zT_pred", ascending=False)[:20].to_csv(
ROOT + "/results/screen/hr-materials.csv", index=False, float_format="%g"
)
# %%
lrhr_idx = filter_low_risk_high_ret(screen_df.zT_pred, screen_df.zT_var, min_ret=1.3)
lrhr_candidates = screen_df[lrhr_idx]
# %%
px.scatter(lrhr_candidates, x="zT_var", y="zT_pred", hover_data=lrhr_candidates.columns)
# %% [markdown]
# # Correlation between low-risk high-return materials
# %%
zT_corr = rf_zT.get_corr(amm_screen_features.iloc[lrhr_candidates.index])
zT_corr = pd.DataFrame(
zT_corr, columns=lrhr_candidates.composition, index=lrhr_candidates.composition
)
# %%
zT_corr.to_csv(DIR + "correlation_matrix.csv", float_format="%g")
# %%
zT_corr_evals, zT_corr_evecs = | np.linalg.eig(zT_corr) | numpy.linalg.eig |
from multiprocessing import Pool
import numpy as np
from scipy.spatial.transform import Rotation as R
def array_to_list(input):
if type(input) != type(list()): # convert 3D array to list of 2D arrays
input = list(input)
return input
def np_mat_to_rot6d(np_mat):
""" Get 6D rotation representation for rotation matrix.
Implementation base on
https://arxiv.org/abs/1812.07035
[Inputs]
flattened rotation matrix (last dimension is 9)
[Returns]
6D rotation representation (last dimension is 6)
"""
shape = np_mat.shape
if not ((shape[-1] == 3 and shape[-2] == 3) or (shape[-1] == 9)):
raise AttributeError("The inputs in tf_matrix_to_rotation6d should be [...,9] or [...,3,3], \
but found tensor with shape {}".format(shape[-1]))
np_mat = np.reshape(np_mat, [-1, 3, 3])
np_r6d = np.concatenate([np_mat[...,0], np_mat[...,1]], axis=-1)
if len(shape) == 1:
np_r6d = np.reshape(np_r6d, [6])
return np_r6d
## utility function to convert from r6d space to axis angle
def _rot6d_to_aa(r6ds):
res = np.zeros((r6ds.shape[0], 3))
for i,row in enumerate(r6ds):
np_r6d = np.expand_dims(row, axis=0)
np_mat = np.reshape(np_rot6d_to_mat(np_r6d)[0], (3,3))
np_mat = R.from_matrix(np_mat)
aa = np_mat.as_rotvec()
res[i,:] = aa
return res
def clip_rot6d_to_aa(r6d_clip):
aa_clip = np.empty((r6d_clip.shape[0], r6d_clip.shape[1]//2))
for idx in range(0, r6d_clip.shape[1], 6):
aa_clip[:,idx//2:idx//2+3] = _rot6d_to_aa(r6d_clip[:,idx:idx+6])
return aa_clip
def rot6d_to_aa(r6d):
r6d = array_to_list(r6d)
aa = []
with Pool(processes=24) as pool:
aa = pool.starmap( clip_rot6d_to_aa, zip(r6d) )
return aa
## utility function to convert from axis angle to r6d space
def _aa_to_rot6d(vecs):
res = np.zeros((vecs.shape[0], 6))
for i,row in enumerate(vecs):
np_mat = R.from_rotvec(row)
np_mat = np_mat.as_matrix()
np_mat = np.expand_dims(np_mat, axis=0) #e.g. batch 1
np_r6d = np_mat_to_rot6d(np_mat)[0]
res[i,:] = np_r6d
return res
# convert from axis angle to r6d space
def aa_to_rot6d(aa):
aa = array_to_list(aa)
r6d = []
for clip in range(len(aa)):
aa_clip = aa[clip]
r6d_clip = np.empty((aa_clip.shape[0], aa_clip.shape[1]*2)) # from 3d to r6d
for idx in range(0, aa_clip.shape[1], 3):
r6d_clip[:,idx*2:idx*2+6] = _aa_to_rot6d(aa_clip[:,idx:idx+3])
r6d.append(r6d_clip)
return r6d
# https://github.com/facebookresearch/body2hands/blob/0eba438b4343604548120bdb03c7e1cb2b08bcd6/utils/load_utils.py
## utility function to convert from r6d space to rotation matrix
def np_rot6d_to_mat(np_r6d):
shape = np_r6d.shape
np_r6d = np.reshape(np_r6d, [-1,6])
x_raw = np_r6d[:,0:3]
y_raw = np_r6d[:,3:6]
x = x_raw / (np.linalg.norm(x_raw, ord=2, axis=-1) + 1e-6)
z = np.cross(x, y_raw)
z = z / (np.linalg.norm(z, ord=2, axis=-1) + 1e-6)
y = np.cross(z, x)
x = np.reshape(x, [-1,3,1])
y = np.reshape(y, [-1,3,1])
z = np.reshape(z, [-1,3,1])
np_matrix = np.concatenate([x,y,z], axis=-1)
if len(shape) == 1:
np_matrix = np.reshape(np_matrix, [9])
else:
output_shape = shape[:-1] + (9,)
np_matrix = np.reshape(np_matrix, output_shape)
return np_matrix
# From a vector representing a rotation in axis-angle representation,
# retrieves the rotation angle and the rotation axis
def _retrieve_axis_angle(aa):
th = np.linalg.norm(aa, axis=1)
a = aa / th[:,np.newaxis]
return a, th
def aa_to_xyz(aa, root, bone_len, structure):
aa = array_to_list(aa)
xyz = []
for i in range(len(aa)):
aa_clip = aa[i]
xyz_clip = np.empty((aa_clip.shape[0], aa_clip.shape[1]+6), dtype="float32") # add 6 values, corresponding to two keypoints defining the root bone
xyz_clip[:,0:6] = root
for iBone in range(1,len(structure)):
id_p_J, id_p_E, _, id_p_B = structure[iBone]
p_J, p_B = xyz_clip[:,id_p_J*3:id_p_J*3+3], xyz_clip[:,id_p_B*3:id_p_B*3+3]
u = p_J - p_B
u = u / np.linalg.norm(u, axis=1)[:, np.newaxis]
a, th = _retrieve_axis_angle(aa_clip[:,(iBone-1)*3:(iBone-1)*3+3])
# Rodrigues' rotation formula
v = np.multiply(u, np.cos(th)[:, np.newaxis]) \
+ np.multiply(np.cross(a, u), np.sin(th)[:, np.newaxis]) \
+ np.multiply(np.multiply(a, np.einsum('ij,ij->i', a, u)[:, np.newaxis]), (1-np.cos(th))[:, np.newaxis])
p_E = p_J + bone_len[iBone]*v
xyz_clip[:,(iBone+1)*3:(iBone+1)*3+3] = p_E
xyz.append(xyz_clip)
return xyz
def xyz_to_aa(xyz, structure):
xyz = array_to_list(xyz)
aa = []
for i in range(len(xyz)):
xyz_clip = xyz[i]
aa_clip = np.array([])
for iBone in range(1,len(structure)):
id_p_J, id_p_E, _, id_p_B = structure[iBone]
u = xyz_clip[:,id_p_J*3:id_p_J*3+3] - xyz_clip[:,id_p_B*3:id_p_B*3+3]
v = xyz_clip[:,id_p_E*3:id_p_E*3+3] - xyz_clip[:,id_p_J*3:id_p_J*3+3]
th = np.arccos( np.einsum('ij,ij->i', u, v)/(np.linalg.norm(u, axis=1)*np.linalg.norm(v, axis=1) + 1e-6) )
a = | np.cross(u, v) | numpy.cross |
import sys
import numpy as np
from ..util import tiling_2d as tiling
from ..scores.cd import cd, cd_text
from skimage import measure # for connected components
from math import ceil
from scipy.signal import convolve2d
from copy import deepcopy
from ..scores import score_funcs
# score doesn't have to just be prediction for label
def refine_scores(scores, lab_num):
return scores[:, lab_num]
# higher scores are more likely to be picked
def threshold_scores(scores, percentile_include, method):
X = scores
# pick more when more is already picked
num_picked = np.sum(np.isnan(scores))
if num_picked > scores.size / 3:
percentile_include -= 15
thresh = np.nanpercentile(X, percentile_include)
# thresh = np.max(X) # pick only 1 pixel at a time
im_thresh = np.logical_and(scores >= thresh, ~np.isnan(scores))
# scores >= thresh #np.logical_and(scores >= thresh, scores != 0)
# make sure we pick something
while np.sum(im_thresh) == 0:
percentile_include -= 4
thresh = np.nanpercentile(X, percentile_include)
# thresh = np.max(X) # pick only 1 pixel at a time
im_thresh = np.logical_and(scores >= thresh, ~np.isnan(scores))
# np.logical_and(scores >= thresh, scores != 0)
return im_thresh
# if 3 sides of a pixel are selected, also select the pixel
filt = np.zeros((3, 3))
filt[:, 1] = 1 # middle column
filt[1, :] = 1 # middle row
def smooth_im_thresh(im_thresh_old, im_thresh):
im = im_thresh_old + im_thresh
im_count_neighbors = convolve2d(im, filt, mode='same')
pixels_to_add = np.logical_and(np.logical_not(im), im_count_neighbors >= 3)
return im + pixels_to_add
# establish correspondence between segs
def establish_correspondence(seg1, seg2):
seg_out = np.zeros(seg1.shape, dtype='int64')
new_counter = 0
num_segs = int( | np.max(seg2) | numpy.max |
import numpy as np
from numpy.random import Generator, PCG64
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from modpy.stats import metropolis_hastings
from modpy.stats._core import auto_correlation, auto_correlation_time
from modpy.plot.plot_util import cm_parula, default_color, set_font_sizes
from modpy.illustration.illustration_util import STATS_PATH
def _plot_MH_1D():
# example from: http://www.mit.edu/~ilkery/papers/MetropolisHastingsSampling.pdf
seed = 1234
gen = Generator(PCG64(seed))
n = 150
samples = 100000
mu = np.array([0., 0.])
rho = 0.45
sigma = np.array([(1., rho),
[rho, 1.]])
x1, x2 = gen.multivariate_normal(mu, sigma, n).T
rho_emp = np.corrcoef(x1, x2)[0, 1]
# arbitrary symmetrical distribution
def proposal(rho_):
return np.atleast_1d(gen.uniform(rho_ - 0.07, rho_ + 0.07))
# bi-variate normal distribution with mu1=mu2=0 and sigma1=sigma2=1
def log_like(rho_):
p = 1. / (2. * np.pi * np.sqrt(1. - rho_ ** 2.)) * np.exp(-1. / (2. * (1. - rho_ ** 2.)) * (x1 ** 2 - 2. * rho_ * x1 * x2 + x2 ** 2.))
return np.sum(np.log(p))
# Jeffreys prior
def log_prior(rho_):
return np.log((1. / (1. - rho_ ** 2.)) ** 1.5)
rho0 = np.array([0.])
res = metropolis_hastings(rho0, proposal, log_like, log_prior, samples, burn=100, seed=seed, keep_path=True)
xp = res.x
# calculate auto-correlation and determine lag-time until independence.
lags = 100
auto_corr = auto_correlation(xp.flatten(), lags)
tau = auto_correlation_time(xp.flatten())
# sub-sample only uncorrelated samples
xp_ind = xp[::tau]
samples_ind = np.arange(0, samples, tau)
rho_sam = np.mean(xp_ind)
# plot problem plots -----------------------------------------------------------------------------------------------
# # plot observations
# ax1.scatter(x1, x2, s=20, color=default_color(0))
# ax1.set_xlabel('$x_1$')
# ax1.set_ylabel('$x_2$')
# ax1.grid(True)
# ax1.set_title('Data')
# set_font_sizes(ax1, 12)
# # plot the log-likelihood over the domain [-1, 1]
# k = 500
# rhos = np.linspace(-0.999, 0.999, k)
# L = np.array([log_like(r) for r in rhos])
#
# ax4.plot(rhos, L, color=default_color(0))
# ax4.set_xlabel('$\\rho$')
# ax4.set_ylabel('$\log(f(\\rho | x, y))$')
# ax4.grid(True)
# ax4.set_title('Log-Likelihood')
# set_font_sizes(ax4, 12)
#
# # plot the log-prior probability
# ax5.plot(rhos, log_prior(rhos), color=default_color(0))
# ax5.set_xlabel('$\\rho$')
# ax5.set_ylabel('$\log(f(\\rho))$')
# ax5.grid(True)
# ax5.set_title('Log-Prior Probability')
# set_font_sizes(ax5, 12)
# plot HMC behaviour plots -----------------------------------------------------------------------------------------
# plot
fig, axes = plt.subplots(2, 3, figsize=(20, 14))
ax1, ax2, ax3, ax4 , ax5, ax6 = axes.flatten()
# plot markov chain
ax1.plot(np.arange(samples), xp, color=default_color(0), label='Full')
ax1.plot(samples_ind, xp_ind, color=default_color(1), label='Thinned')
ax1.plot([0, samples], [rho, rho], 'k', label='True $\\rho$')
ax1.plot([0, samples], [rho_emp, rho_emp], color='m', label='Empirical $\\rho$')
ax1.plot([0, samples], [rho_sam, rho_sam], lw=2, color='orange', label='Sampled $\\rho$')
ax1.set_xlim([0, samples])
ax1.set_ylim([0.2, 0.7])
ax1.set_xlabel('Samples')
ax1.set_ylabel('$\\rho$')
ax1.legend(loc='upper right')
ax1.grid(True)
ax1.set_title('Markov Chain')
set_font_sizes(ax1, 12)
# plot histogram of rho
hist = ax2.hist(xp, 50, facecolor=default_color(0)) # , edgecolor='k', linewidth=0.2
freq = hist[0]
max_freq = np.amax(freq) * 1.1
ax2.plot([rho, rho], [0, max_freq], color='k', label='True $\\rho$')
ax2.plot([rho_emp, rho_emp], [0, max_freq], color='m', label='Empirical $\\rho$')
ax2.plot([rho_sam, rho_sam], [0, max_freq], lw=2, color='orange', label='Sampled $\\rho$')
ax2.set_xlim([0.2, 0.7])
ax2.set_ylim([0., max_freq])
ax2.set_xlabel('$\\rho$')
ax2.set_ylabel('Frequency (ind.)')
ax2.grid(True)
ax2.set_title('Posterior Distribution')
set_font_sizes(ax2, 12)
ax2_1 = ax2.twinx()
ax2_1.hist(xp_ind, 50, facecolor=default_color(1), alpha=0.35) # , edgecolor='k', linewidth=0.2
ax2_1.set_ylabel('Frequency')
set_font_sizes(ax2_1, 12)
ax2.legend(handles=(Patch(color=default_color(0), label='Full'),
Patch(color=default_color(1), label='Thinned'),
Line2D([], [], color='k', label='True $\\rho$'),
Line2D([], [], color='m', label='Empirical $\\rho$'),
Line2D([], [], color='orange', label='Sampled $\\rho$')))
# plot the autocorrelation
ax3.plot(np.arange(lags), auto_corr, color=default_color(0), label='Auto-correlation')
ax3.plot([tau, tau], [-1., 1.], 'k--', label='Lag-time, $\\tau}$')
ax3.set_xlim([0., lags])
ax3.set_ylim([-0.1, 1.])
ax3.set_xlabel('Lag')
ax3.set_ylabel('Auto-Correlation')
ax3.legend()
ax3.grid(True)
ax3.set_title('Auto-Correlation')
set_font_sizes(ax3, 12)
# plot the acceptance probability
ax4.plot(np.arange(res.path.accept.size), res.path.accept, color=default_color(0)) # , label='$\delta$'
#ax4.plot([0, res.path.accept.size], [0.65, 0.65], 'k--', label='$\delta_{target}$')
ax4.set_xlim([0, res.path.accept.size])
ax4.set_ylim([0., 1.])
ax4.set_xlabel('Samples (incl. burn-in)')
ax4.set_ylabel('Acceptance Ratio, $\delta$')
ax4.grid(True)
#ax4.legend()
ax4.set_title('Acceptance Ratio')
set_font_sizes(ax4, 12)
fig.savefig(STATS_PATH + '1D_performance_metropolis.png')
def _plot_MH_2D():
seed = 1234
gen = Generator(PCG64(seed))
n = 150
samples = 100000
mu = np.array([0., 0.])
sigma1 = 3.
sigma2 = 2.
rho = 0.9
cov = rho * sigma1 * sigma2
sigma = np.array([(sigma1 ** 2., cov),
[cov, sigma2 ** 2.]])
x1, x2 = gen.multivariate_normal(mu, sigma, n).T
s1_emp = np.std(x1)
s2_emp = np.std(x2)
# arbitrary symmetrical distribution
def proposal(sigma_):
return np.array([gen.uniform(sigma_[0] - 0.25, sigma_[0] + 0.25),
gen.uniform(sigma_[1] - 0.25, sigma_[1] + 0.25)])
# bi-variate normal distribution with mu1=mu2=0, known rho and unknown sigma1 and sigma2
def log_like(sigma_):
s1, s2 = sigma_
p = 1. / (2. * np.pi * s1 * s2 * np.sqrt(1. - rho ** 2.)) * np.exp(-1. / (2. * (1. - rho ** 2.)) * ((x1 / s1) ** 2 - 2. * rho * (x1 / s1) * (x2 / s2) + (x2 / s2) ** 2.))
return np.sum(np.log(p))
# bi-variate normal distribution with mu1=mu2=0, rho=0.0
def log_prior(sigma_):
s1, s2 = sigma_
p = 1. / (2. * np.pi * s1 * s2) * np.exp(-1. / 2 * ((x1 / s1) ** 2 + (x2 / s2) ** 2.))
return np.sum(np.log(p))
rho0 = np.array([1., 1.])
bounds = ((1., None), (1., None))
res = metropolis_hastings(rho0, proposal, log_like, log_prior, samples, burn=100, bounds=bounds, seed=seed, keep_path=True)
xp = res.x
# calculate auto-correlation and determine lag-time until independence.
lags = 100
auto_corr1 = auto_correlation(xp[:, 0], lags)
auto_corr2 = auto_correlation(xp[:, 1], lags)
tau1 = auto_correlation_time(xp[:, 0])
tau2 = auto_correlation_time(xp[:, 1])
tau = np.maximum(tau1, tau2)
# sub-sample only uncorrelated samples
xp_ind = xp[::tau, :]
s1_sam = np.mean(xp_ind[:, 0])
s2_sam = | np.mean(xp_ind[:, 1]) | numpy.mean |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestElementwiseAddOp(OpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.op_type = "elementwise_add"
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out}
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=(self.use_mkldnn == False))
def test_check_grad_normal(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
self.check_grad(
['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_x(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_y(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False))
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.add(self.x, self.y)
def init_dtype(self):
self.dtype = np.float64
def init_axis(self):
self.axis = -1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(
place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def init_axis(self):
self.axis = 1
class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100)
class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100)
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1)
def init_axis(self):
self.axis = 1
class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1)
def init_axis(self):
self.axis = 0
class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y
class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
self.out = self.x + self.y
class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = | np.random.rand(2, 12, 3, 5) | numpy.random.rand |
"""
Filename: build_dataset.py
Function: Crops the images into small 256x256 images and divides the dataset into training and testing set.
Author: <NAME> (https://github.com/Paulymorphous)
Website: https://www.livetheaiexperience.com/
"""
import numpy as np
import cv2
from tqdm import tqdm
import os
import math
import time
def train_test_split(images_path, masks_path, test_split=0.3):
"""
Splits the dataset into train and test sets and stores then in an ImageDataAugmentation friendly format.
Please note:
> All the images which has less than 1% annotation, in terms of area is removed. In other words, Images that are 99% empty are removed.
Parameters
----------
>images_path (str): Path to the directory containing all the images.
>masks_path (str): Path to the directory containing all the masks.
>test_split (float): Ratio of the size of the test set to the entire dataset. Default value: 0.3
"""
image_filenames = [filename for filename in os.walk(images_path)][0][2]
import pdb; pdb.set_trace()
test_set_size = int(test_split*len(image_filenames))
root_path = os.path.dirname(os.path.dirname(images_path)) + "/"
train_dir = root_path + "Train/"
test_dir = root_path + "Test/"
if not os.path.exists(train_dir):
print("CREATING:", train_dir)
os.makedirs(train_dir+"images/samples/")
os.makedirs(train_dir+"masks/samples/")
if not os.path.exists(test_dir):
print("CREATING:", test_dir)
os.makedirs(test_dir+"images/samples/")
os.makedirs(test_dir+"masks/samples/")
train_image_dir = train_dir+"images/samples/"
train_mask_dir = train_dir+"masks/samples/"
test_image_dir = test_dir+"images/samples/"
test_mask_dir = test_dir+"masks/samples/"
for n, filename in enumerate(image_filenames):
if n < test_set_size:
os.rename(images_path + filename, test_image_dir + filename)
os.rename(masks_path + filename, test_mask_dir + filename)
else:
os.rename(images_path + filename, train_image_dir + filename)
os.rename(masks_path + filename, train_mask_dir + filename)
print("Train-Test-Split COMPLETED.\nNUMBER OF IMAGES IN TRAIN SET:{}\nNUMBER OF IMAGES IN TEST SET: {}".format(len(image_filenames)-test_set_size, test_set_size))
print("\nTrain Directory:", train_dir)
print("Test Directory:", test_dir)
def crop_and_save(images_path, masks_path, new_images_path, new_masks_path, img_width, img_height):
"""
Imports Images and creates multiple crops and then stores them in the specified folder. Cropping is important in the project to protect spatial information, which otherwise would be lost if we resize the images.
Please note:
> All the images which has less than 1% annotation, in terms of area is removed. In other words, Images that are 99% empty are removed.
Parameters
----------
>images_path (str): Path to the directory containing all the images.
>masks_path (str): Path to the directory containing all the masks.
>new_images_path (str): Path to the Directory where the cropped images will be stored.
>new_masks_path (str): Path to the Directory where the cropped masks will be stored.
>img_width (int): width of the cropped image.
>img_height (int): height of the cropped image.
"""
print("Building Dataset.")
num_skipped = 0
start_time = time.time()
#files = next(os.walk(images_path))[2]
files = [i for i in os.listdir(images_path)]
print('Total number of files =',len(files))
for image_file in tqdm(files, total = len(files)):
image_path = images_path + image_file
image = cv2.imread(image_path)
mask_path = masks_path + image_file
mask = cv2.imread(mask_path, 0)
num_splits = math.floor((image.shape[0]*image.shape[1])/(img_width*img_height))
counter = 0
if image is None or mask is None:
import pdb; pdb.set_trace()
for r in range(0, image.shape[0], img_height):
for c in range(0, image.shape[1], img_width):
counter += 1
blank_image = np.zeros((img_height ,img_width, 3), dtype = int)
blank_mask = np.zeros((img_height ,img_width), dtype = int)
new_image_path = new_images_path + str(counter) + '_' + image_file
new_mask_path = new_masks_path + str(counter) + '_' + image_file
new_image = np.array(image[r:r+img_height, c:c+img_width,:])
new_mask = np.array(mask[r:r+img_height, c:c+img_width])
blank_image[:new_image.shape[0], :new_image.shape[1], :] += new_image
blank_mask[:new_image.shape[0], :new_image.shape[1]] += new_mask
blank_mask[blank_mask>1] = 255
# Skip any Image that is more than 99% empty.
if | np.any(blank_mask) | numpy.any |
"""
Ridge regression
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asarray
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto',
max_iter=None, tol=1e-3):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'dense_cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'dense_cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol: float
Precision of the solution.
Returns
-------
coef: array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight != 1.0
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if hasattr(X, '__array__'):
solver = 'dense_cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
solver = 'dense_cholesky'
if solver == 'sparse_cg':
# gradient descent
X1 = sp_linalg.aslinearoperator(X)
if y.ndim == 1:
y1 = np.reshape(y, (-1, 1))
else:
y1 = y
coefs = | np.empty((y1.shape[1], n_features)) | numpy.empty |
from __future__ import print_function, division
from keras.layers import Concatenate, RepeatVector, TimeDistributed, Reshape, Permute
from keras.layers import Add, Lambda, Flatten, BatchNormalization, Activation
from keras.layers import Input, LSTM, Dense, GRU, Bidirectional, CuDNNLSTM
from keras.layers.merge import _Merge
from keras.initializers import Zeros
from keras.models import Model
from keras.models import load_model
from keras.optimizers import RMSprop, Adam
from functools import partial
from keras.utils import print_summary, plot_model
from keras.utils import to_categorical
from keras import backend as K
from keras.engine.topology import Layer
import functools
import tensorflow as tf
import gpustat
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import sys, os
import numpy as np
import progressbar
import time
import itertools
import math, random
import json
from queue import Queue
import threading
import pickle
import pypianoroll as pproll
import config, encoders, decoders, discriminators
from scipy.stats import pearsonr
import pprint
pp = pprint.PrettyPrinter(indent=4)
class RandomWeightedAverage(_Merge):
def _merge_function(self, inputs):
batch_size = K.shape(inputs[0])[0]
weights = K.random_uniform((batch_size, 1))
return (weights * inputs[0]) + ((1 - weights) * inputs[1])
class MusAE():
def __init__(self, **kwargs):
# setting params as class attributes
self.__dict__.update(kwargs)
print("n_cropped_notes: ", self.n_cropped_notes)
# using GPU with most memory avaiable
self.set_gpu()
print("Initialising encoder...")
self.encoder = encoders.build_encoder_sz()
print("Initialising decoder...")
self.decoder = decoders.build_decoder_sz_flat()
print("Initialising z discriminator...")
self.z_discriminator = discriminators.build_gaussian_discriminator()
print("Initialising s discriminator...")
self.s_discriminator = discriminators.build_bernoulli_discriminator()
#print("Initialising infomax network...")
#self.infomax_net = discriminators.build_infomax_network()
path = os.path.join(self.plots_path, self.name, "models")
if not os.path.exists(path):
os.makedirs(path)
print("Saving model plots..")
plot_model(self.encoder, os.path.join(path, "encoder.png"), show_shapes=True)
plot_model(self.decoder, os.path.join(path, "decoder.png"), show_shapes=True)
plot_model(self.z_discriminator, os.path.join(path, "z_discriminator.png"), show_shapes=True)
plot_model(self.s_discriminator, os.path.join(path, "s_discriminator.png"), show_shapes=True)
#plot_model(self.infomax_net, os.path.join(path, "infomax_net.png"), show_shapes=True)
#-------------------------------
# Construct Computational Graph
# for the Adversarial Autoencoder
#-------------------------------
print("Building reconstruction phase's computational graph...")
self.encoder.trainable = True
self.decoder.trainable = True
self.z_discriminator.trainable = False
self.s_discriminator.trainable = False
#self.infomax_net.trainable = False
X = Input(shape=(self.phrase_size, self.n_cropped_notes, self.n_tracks), name="X_recon")
s_recon, z_recon = self.encoder(X)
Y_drums, Y_bass, Y_guitar, Y_strings = self.decoder([s_recon, z_recon])
self.reconstruction_phase = Model(
inputs=X,
outputs=[Y_drums, Y_bass, Y_guitar, Y_strings],
name="autoencoder"
)
plot_model(self.reconstruction_phase, os.path.join(path, "reconstruction_phase.png"), show_shapes=True)
#-------------------------------
# Construct Computational Graph
# for the z discriminator
#-------------------------------
print("Building z regularisation phase's computational graph...")
self.encoder.trainable = False
self.decoder.trainable = False
self.z_discriminator.trainable = True
self.s_discriminator.trainable = False
#self.infomax_net.trainable = False
X = Input(shape=(self.phrase_size, self.n_cropped_notes, self.n_tracks), name="X_z_reg")
z_real = Input(shape=(self.z_length,), name="z_reg")
_, z_fake = self.encoder(X)
z_int = RandomWeightedAverage(name="weighted_avg_z")([z_real, z_fake])
z_valid_real = self.z_discriminator(z_real)
z_valid_fake = self.z_discriminator(z_fake)
z_valid_int = self.z_discriminator(z_int)
self.z_regularisation_phase = Model(
[z_real, X],
[z_valid_real, z_valid_fake, z_valid_int, z_int],
name="z_regularisation_phase"
)
plot_model(self.z_regularisation_phase, os.path.join(path, "z_regularisation_phase.png"), show_shapes=True)
#-------------------------------
# Construct Computational Graph
# for the s discriminator
#-------------------------------
print("Building s regularisation phase's computational graph...")
self.encoder.trainable = False
self.decoder.trainable = False
self.z_discriminator.trainable = False
self.s_discriminator.trainable = True
#self.infomax_net.trainable = False
X = Input(shape=(self.phrase_size, self.n_cropped_notes, self.n_tracks), name="X_s_reg")
s_real = Input(shape=(self.s_length,), name="s_reg")
s_fake, _ = self.encoder(X)
s_int = RandomWeightedAverage(name="weighted_avg_s")([s_real, s_fake])
s_valid_real = self.s_discriminator(s_real)
s_valid_fake = self.s_discriminator(s_fake)
s_valid_int = self.s_discriminator(s_int)
self.s_regularisation_phase = Model(
[s_real, X],
[s_valid_real, s_valid_fake, s_valid_int, s_int],
name="s_regularisation_phase"
)
plot_model(self.s_regularisation_phase, os.path.join(path, "s_regularisation_phase.png"), show_shapes=True)
#-------------------------------
# Construct Computational Graph
# for the generator (encoder)
#-------------------------------
print("Building generator regularisation phase's computational graph...")
self.encoder.trainable = True
self.decoder.trainable = False
self.z_discriminator.trainable = False
self.s_discriminator.trainable = False
#self.infomax_net.trainable = False
X = Input(shape=(self.phrase_size, self.n_cropped_notes, self.n_tracks), name="X_gen_reg")
s_gen, z_gen = self.encoder(X)
z_valid_gen = self.z_discriminator(z_gen)
s_valid_gen = self.s_discriminator(s_gen)
self.gen_regularisation_phase = Model(
inputs=X,
outputs=[s_valid_gen, z_valid_gen],
name="gen_regularisation_phase"
)
plot_model(self.gen_regularisation_phase, os.path.join(path, "gen_regularisation_phase.png"), show_shapes=True)
#-------------------------------
# Construct Computational Graph
# for the supervised phase
#-------------------------------
print("Building supervised phase's computational graph...")
self.encoder.trainable = True
self.decoder.trainable = False
self.z_discriminator.trainable = False
self.s_discriminator.trainable = False
#self.infomax_net.trainable = False
X = Input(shape=(self.phrase_size, self.n_cropped_notes, self.n_tracks), name="X_sup")
s_pred, _ = self.encoder(X)
self.supervised_phase = Model(
inputs=X,
outputs=s_pred,
name="supervised_phase"
)
plot_model(self.supervised_phase, os.path.join(path, "supervised_phase.png"), show_shapes=True)
print("Building infomax phase's computational graph...")
self.encoder.trainable = True
self.decoder.trainable = True
self.z_discriminator.trainable = False
self.s_discriminator.trainable = False
#self.infomax_net.trainable = True
z_info = Input(shape=(self.z_length,), name="z_info")
s_info = Input(shape=(self.s_length,), name="s_info")
Y_drums_info, Y_bass_info, Y_guitar_info, Y_strings_info = self.decoder([s_info, z_info])
Y = Concatenate(axis=-1, name="concat")([Y_drums_info, Y_bass_info, Y_guitar_info, Y_strings_info])
s_info_pred, _ = self.encoder(Y)
#s_info_pred = self.infomax_net([Y_drums_info, Y_bass_info, Y_guitar_info, Y_strings_info])
self.infomax_phase = Model(
inputs=[s_info, z_info],
outputs=s_info_pred,
name="infomax_phase"
)
plot_model(self.infomax_phase, os.path.join(path, "infomax_phase.png"), show_shapes=True)
#-------------------------------
# Construct Computational Graph
# for the generator (encoder)
#-------------------------------
print("Building adversarial autoencoder's computational graph...")
self.encoder.trainable = True
self.decoder.trainable = True
self.z_discriminator.trainable = True
self.s_discriminator.trainable = True
X = Input(shape=(self.phrase_size, self.n_cropped_notes, self.n_tracks), name="X")
z_real = Input(shape=(self.z_length,), name="z")
s_real = Input(shape=(self.s_length,), name="s")
Y_drums, Y_bass, Y_guitar, Y_strings = self.reconstruction_phase(X)
z_valid_real, z_valid_fake, z_valid_int, z_int = self.z_regularisation_phase([z_real, X])
s_valid_real, s_valid_fake, s_valid_int, s_int = self.s_regularisation_phase([s_real, X])
s_valid_gen, z_valid_gen = self.gen_regularisation_phase(X)
s_pred = self.supervised_phase(X)
s_infomax = self.infomax_phase([s_real, z_real])
self.adversarial_autoencoder = Model(
inputs=[s_real, z_real, X],
outputs=[
Y_drums, Y_bass, Y_guitar, Y_strings,
s_valid_real, s_valid_fake, s_valid_int,
z_valid_real, z_valid_fake, z_valid_int,
s_valid_gen, z_valid_gen,
s_pred,
s_infomax
],
name="adversarial_autoencoder"
)
# prepare gp losses
self.s_gp_loss = partial(self.gradient_penalty_loss, averaged_samples=s_int)
self.s_gp_loss.__name__ = "gradient_penalty_s"
self.z_gp_loss = partial(self.gradient_penalty_loss, averaged_samples=z_int)
self.z_gp_loss.__name__ = "gradient_penalty_z"
self.adversarial_autoencoder.compile(
loss=[
"categorical_crossentropy", "categorical_crossentropy", "categorical_crossentropy", "categorical_crossentropy",
self.wasserstein_loss, self.wasserstein_loss, self.s_gp_loss,
self.wasserstein_loss, self.wasserstein_loss, self.z_gp_loss,
self.wasserstein_loss, self.wasserstein_loss,
"binary_crossentropy",
"binary_crossentropy"
],
loss_weights=[
self.reconstruction_weight, self.reconstruction_weight, self.reconstruction_weight, self.reconstruction_weight,
self.regularisation_weight, self.regularisation_weight, self.regularisation_weight * self.s_lambda,
self.regularisation_weight, self.regularisation_weight, self.regularisation_weight * self.z_lambda,
self.regularisation_weight, self.regularisation_weight,
self.supervised_weight,
self.infomax_weight
],
optimizer=self.aae_optim,
metrics=[
"categorical_accuracy",
"binary_accuracy",
self.output
]
)
plot_model(self.adversarial_autoencoder, os.path.join(path, "adversarial_autoencoder.png"), show_shapes=True)
def set_gpu(self):
stats = gpustat.GPUStatCollection.new_query()
ids = map(lambda gpu: int(gpu.entry['index']), stats)
ratios = map(lambda gpu: float(gpu.entry['memory.used']) / float(gpu.entry['memory.total']), stats)
bestGPU = min(zip(ids, ratios), key=lambda x: x[1])[0]
print("Setting GPU to: {}".format(bestGPU))
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(bestGPU)
# Just report the mean output of the model (useful for WGAN)
def output(self, y_true, y_pred):
return K.mean(y_pred)
# wrapper for using tensorflow metrics in keras
def as_keras_metric(self, method):
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
def precision(self, y_true, y_pred):
# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
# predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
# precision = true_positives / (predicted_positives + K.epsilon())
# return precision
precision = self.as_keras_metric(tf.metrics.precision)
return precision(y_true, y_pred)
def recall(self, y_true, y_pred):
# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
# possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
# recall = true_positives / (possible_positives + K.epsilon())
recall = self.as_keras_metric(tf.metrics.recall)
return recall(y_true, y_pred)
def f1_score(self, y_true, y_pred):
precision = self.as_keras_metric(tf.metrics.precision)
recall = self.as_keras_metric(tf.metrics.recall)
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
return (2 * p * r) / (p + r + K.epsilon())
# dummy loss
def no_loss(self, y_true, y_pred):
return K.zeros(shape=(1,))
def wasserstein_loss(self, y_true, y_pred):
return K.mean(y_true * y_pred)
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
def _compute_gradients(tensor, var_list):
grads = tf.gradients(tensor, var_list)
return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)]
#gradients = K.gradients(y_pred, averaged_samples)[0]
gradients = _compute_gradients(y_pred, [averaged_samples])[0]
gradients_sqr = K.square(gradients)
gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
gradient_penalty = K.square(1 - gradient_l2_norm)
return K.mean(gradient_penalty)
def train_v2(self, dataset):
epsilon_std = self.encoder_params["epsilon_std"]
# create checkpoint and plots folder
now = str(int(round(time.time())))
paths = {
"interpolations": os.path.join(self.interpolations_path, self.name),
"autoencoded": os.path.join(self.autoencoded_path, self.name),
"checkpoints": os.path.join(self.checkpoints_path, self.name),
"plots": os.path.join(self.plots_path, self.name),
"sampled": os.path.join(self.sampled_path, self.name),
"style_transfers": os.path.join(self.style_transfers_path, self.name),
"latent_sweeps": os.path.join(self.latent_sweeps_path, self.name)
}
for key in paths:
if not os.path.exists(paths[key]):
os.makedirs(paths[key])
print("Splitting training set and validation set...")
batches_path = os.path.join(self.dataset_path, "batches", "X")
_, _, files = next(os.walk(batches_path))
self.len_dataset = len(files)
tr_set, vl_set = train_test_split(files, test_size=self.test_size)
del files
self.len_tr_set = len(tr_set)
self.len_vl_set = len(vl_set)
# storing losses over time
tr_log = {
"iteration": [],
"AE_loss_drums": [],
"AE_loss_bass": [],
"AE_loss_guitar": [],
"AE_loss_strings": [],
"AE_loss_tot": [],
"AE_accuracy_drums": [],
"AE_accuracy_bass": [],
"AE_accuracy_guitar": [],
"AE_accuracy_strings": [],
"AE_accuracy_tot": [],
"s_score_real": [],
"s_score_fake": [],
"s_gradient_penalty": [],
"z_score_real": [],
"z_score_fake": [],
"z_gradient_penalty": [],
"supervised_loss": [],
"supervised_accuracy": [],
"infomax_loss": [],
"infomax_accuracy": []
}
vl_log = {
"epoch": [],
# "AE_loss_drums": [],
# "AE_loss_bass": [],
# "AE_loss_guitar": [],
# "AE_loss_strings": [],
"VL_AE_accuracy_drums": [],
"VL_AE_accuracy_bass": [],
"VL_AE_accuracy_guitar": [],
"VL_AE_accuracy_strings": [],
"VL_AE_accuracy_tot":[],
# "s_score_real": [],
# "s_score_fake": [],
# "s_gradient_penalty": [],
# "z_score_real": [],
# "z_score_fake": [],
# "z_gradient_penalty": [],
# "supervised_loss": [],
# "supervised_accuracy": []
"VL_infomax_loss": [],
"VL_infomax_accuracy": []
}
#... let the training begin!
bar = progressbar.ProgressBar(max_value=(self.n_epochs * self.len_dataset))
pbc = 0
pbc_tr = 0
pbc_vl = 0
annealing_first_stage = False
annealing_second_stage = False
annealing_third_stage = False
#bar.update(0)
for epoch in range(self.n_epochs):
print("- Epoch", epoch+1, "of", self.n_epochs)
print("-- Number of TR batches:", self.len_tr_set)
print("-- Number of VL batches:", self.len_vl_set)
epoch_pbc = pbc
print("Generating training batches...")
tr_queue = Queue(maxsize=128)
def async_batch_generator_tr():
#training_set = dataset.generate_batches(pianorolls_path, tr_set, batch_size=self.batch_size)
tr_batches = list(range(self.len_tr_set))
random.shuffle(tr_batches)
for i in tr_batches:
tr_queue.put(dataset.select_batch(i), block=True)
training_batch_thread = threading.Thread(target=async_batch_generator_tr)
training_batch_thread.start()
print("Training on training set...")
# train on the training set
for _ in range(self.len_tr_set):
bar.update(pbc)
X, Y, label = tr_queue.get(block=True)
label = label[:, :self.s_length]
n_chunks = X.shape[0]
# Adversarial ground truth (wasserstein)
real_gt = -np.ones((n_chunks, 1))
fake_gt = np.ones((n_chunks, 1))
dummy_gt = np.zeros((n_chunks, 1)) # Dummy gt for gradient penalty (not actually used)
# draw z from N(0,epsilon_std)
z_real = np.random.normal(0, epsilon_std, (n_chunks, self.z_length))
# draw s from B(s_length)
# s is a k-hot vector of tags
s_real = np.random.binomial(1, 0.5, size=(n_chunks, self.s_length))
#Y_split = [ Y[:, :, : , t] for t in range(self.n_tracks) ]
Y_drums = Y[:, :, : , 0]
Y_bass = Y[:, :, : , 1]
Y_guitar = Y[:, :, : , 2]
Y_strings = Y[:, :, : , 3]
aae_loss = self.adversarial_autoencoder.train_on_batch(
[s_real, z_real, X],
[
Y_drums, Y_bass, Y_guitar, Y_strings,
real_gt, fake_gt, dummy_gt,
real_gt, fake_gt, dummy_gt,
real_gt, real_gt,
label,
s_real
]
)
tr_log["AE_loss_drums"].append(aae_loss[1])
tr_log["AE_loss_bass"].append(aae_loss[2])
tr_log["AE_loss_guitar"].append(aae_loss[3])
tr_log["AE_loss_strings"].append(aae_loss[4])
tr_log["AE_loss_tot"].append(np.array([aae_loss[1], aae_loss[2], aae_loss[3], aae_loss[4]]).mean())
tr_log["AE_accuracy_drums"].append(aae_loss[15])
tr_log["AE_accuracy_bass"].append(aae_loss[18])
tr_log["AE_accuracy_guitar"].append(aae_loss[21])
tr_log["AE_accuracy_strings"].append(aae_loss[24])
tr_log["AE_accuracy_tot"].append(np.array([aae_loss[15], aae_loss[18], aae_loss[21], aae_loss[24]]).mean())
tr_log["s_score_real"].append(aae_loss[29])
tr_log["s_score_fake"].append(aae_loss[32])
tr_log["s_gradient_penalty"].append(aae_loss[7])
tr_log["z_score_real"].append(aae_loss[38])
tr_log["z_score_fake"].append(aae_loss[41])
tr_log["z_gradient_penalty"].append(aae_loss[10])
tr_log["supervised_loss"].append(aae_loss[47])
tr_log["supervised_accuracy"].append(aae_loss[50])
tr_log["infomax_loss"].append(aae_loss[14])
tr_log["infomax_accuracy"].append(aae_loss[55])
if pbc_tr % 500 == 0:
print("\nPlotting stats...")
print("Regularisation weight:", K.get_value(self.regularisation_weight))
self.plot(paths["plots"], tr_log)
if pbc_tr % 5000 == 0:
print("\nSaving checkpoint...")
self.save_checkpoint(paths["checkpoints"], pbc_tr)
# annealing the regularisation part
if pbc_tr > 1000 and not annealing_first_stage:
K.set_value(self.regularisation_weight, 0.0)
print("Regularisation weight annealed to ", K.get_value(self.regularisation_weight))
annealing_first_stage = True
elif pbc_tr > 10000 and not annealing_second_stage:
K.set_value(self.regularisation_weight, 0.1)
print("Regularisation weight annealed to ", K.get_value(self.regularisation_weight))
annealing_second_stage = True
elif pbc_tr > 15000 and not annealing_third_stage:
K.set_value(self.regularisation_weight, 0.2)
print("Regularisation weight annealed to ", K.get_value(self.regularisation_weight))
annealing_third_stage = True
pbc += 1
pbc_tr += 1
# at the end of each epoch, we evaluate on the validation set
print("Generating validation batches...")
vl_queue = Queue(maxsize=128)
def async_batch_generator_vl():
#training_set = dataset.generate_batches(pianorolls_path, tr_set, batch_size=self.batch_size)
vl_batches = range(self.len_vl_set)
for i in vl_batches:
#tr_queue.put(dataset.preprocess(next(training_set)), block=True)
vl_queue.put(dataset.select_batch(i), block=True)
validation_batch_thread = threading.Thread(target=async_batch_generator_vl)
validation_batch_thread.start()
print("\nEvaluating on validation set...")
# evaluating on validation set
pbc_vl0 = pbc_vl
vl_log_tmp = {
"VL_AE_accuracy_drums": [],
"VL_AE_accuracy_bass": [],
"VL_AE_accuracy_guitar": [],
"VL_AE_accuracy_strings": [],
"VL_infomax_loss": [],
"VL_infomax_accuracy": []
}
for _ in range(self.len_vl_set):
bar.update(pbc)
# try:
X, Y, label = vl_queue.get(block=True)
label = label[:, :self.s_length]
#print("batch get")
n_chunks = X.shape[0]
# Adversarial ground truths (wasserstein)
real_gt = -np.ones((n_chunks, 1))
fake_gt = | np.ones((n_chunks, 1)) | numpy.ones |
"""A module for reading, parsing, and preprocessing trodes data
collected during robot calibration routines.
"""
import numpy as np
import pandas as pd
from scipy import ndimage
from . import readTrodesExtractedDataFile3 as read_trodes
def get_trodes_files(data_dir, trodes_name):
"""Generate names of all the trodes files from a calibration recording.
Assumes data is saved in the default trodes filesystem and channels are
named appropriately in the trodes configuration file.
Parameters
----------
data_dir : str
Parent directory where the trodes data lives
trodes_name : str
Name of original *.rec trodes file
Returns
-------
trodes_files : dict
The file names for each channel of a calibration recording. More
specifically, `x_push_file` is the *.dat file for the `x` actuator
`push` valve command recording. Similarly, `y_pot_file` is the
*.dat for the `y` actuator potentiometer recording.
"""
trodes_files = {
'time_file': data_dir + '/%s.analog/%s.timestamps.dat' % ( trodes_name, trodes_name),
'x_push_file': data_dir + '/%s.DIO/%s.dio_xPush.dat' % ( trodes_name, trodes_name),
'x_pull_file': data_dir + '/%s.DIO/%s.dio_xPull.dat' % (trodes_name, trodes_name),
'y_push_file': data_dir + '/%s.DIO/%s.dio_yPush.dat' % ( trodes_name, trodes_name),
'y_pull_file': data_dir + '/%s.DIO/%s.dio_yPull.dat' % ( trodes_name, trodes_name),
'z_push_file': data_dir + '/%s.DIO/%s.dio_zPush.dat' % ( trodes_name, trodes_name),
'z_pull_file': data_dir + '/%s.DIO/%s.dio_zPull.dat' % ( trodes_name, trodes_name),
'x_pot_file': data_dir + '/%s.analog/%s.analog_potX.dat' % ( trodes_name, trodes_name),
'y_pot_file': data_dir + '/%s.analog/%s.analog_potY.dat' % ( trodes_name, trodes_name),
'z_pot_file': data_dir + '/%s.analog/%s.analog_potZ.dat' % ( trodes_name, trodes_name)
}
return trodes_files
def read_data(trodes_files, sampling_rate=3000):
"""Read all the trodes file data using the SpikeGadgets
`readTrodesExtractedDataFile` script.
Parameters
----------
trodes_files : dict
The file names for each channel of a calibration recording. For
example, as returned by get_trodes_files().
sampling_rate : int
Specifying a rate (Hz) lower than the SpikeGadgets MCU clock rate of
30 kHz will downsample the data to speed up parsing.
Returns
-------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording.
"""
# clockrate = np.float_(read_trodes.readTrodesExtractedDataFile(trodes_files['time_file'])['clock rate'])
clockrate = 30000
ds = int(clockrate / sampling_rate)
calibration_data = {
'clockrate': clockrate,
'sampling_rate': sampling_rate,
'time': {
'units': 'samples',
'time': read_trodes.readTrodesExtractedDataFile(trodes_files['time_file'])['data'][0:-1:ds]
},
'DIO': {
'x_push': read_trodes.readTrodesExtractedDataFile(trodes_files['x_push_file'])['data'],
'x_pull': read_trodes.readTrodesExtractedDataFile(trodes_files['x_pull_file'])['data'],
'y_push': read_trodes.readTrodesExtractedDataFile(trodes_files['y_push_file'])['data'],
'y_pull': read_trodes.readTrodesExtractedDataFile(trodes_files['y_pull_file'])['data'],
'z_push': read_trodes.readTrodesExtractedDataFile(trodes_files['z_push_file'])['data'],
'z_pull': read_trodes.readTrodesExtractedDataFile(trodes_files['z_pull_file'])['data']
},
'analog': {
'x_pot': read_trodes.readTrodesExtractedDataFile(trodes_files['x_pot_file'])['data']['voltage'][0:-1:ds],
'y_pot': read_trodes.readTrodesExtractedDataFile(trodes_files['y_pot_file'])['data']['voltage'][0:-1:ds],
'z_pot': read_trodes.readTrodesExtractedDataFile(trodes_files['z_pot_file'])['data']['voltage'][0:-1:ds]
}
}
return calibration_data
def to_numpy(calibration_data):
"""Convert the calibration data to numpy arrays
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
Returns
-------
calibration_data : dict
Numpy-converted calibration data.
"""
calibration_data['time']['time'] = np.array(
[t[0] for t in calibration_data['time']['time']],
dtype='float_'
)
for key in calibration_data['DIO'].keys():
calibration_data['DIO'][key] = np.array(
[i[0] for i in calibration_data['DIO'][key]],
dtype='float_'
)
return calibration_data
def to_seconds(calibration_data, start_at_zero=True):
"""Convert the calibration data time units to seconds.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
start_at_zero : bool
If True, the start time will be set to 0.
Returns
-------
calibration_data : dict
Seconds-converted calibration data
"""
if calibration_data['time']['units'] is not 'seconds':
if start_at_zero:
for key in calibration_data['DIO'].keys():
calibration_data['DIO'][key] = (
calibration_data['DIO'][key] - calibration_data['time']['time'][
0]
) / calibration_data['clockrate']
calibration_data['time']['time'] = (
calibration_data['time']['time'] -
calibration_data['time']['time'][0]
) / calibration_data['clockrate']
else:
for key in calibration_data['DIO'].keys():
calibration_data['DIO'][key] = calibration_data['DIO'][key] / calibration_data['clockrate']
calibration_data['time']['time'] = calibration_data['time']['time'] / calibration_data['clockrate']
else:
pass
return calibration_data
def pots_to_cm(calibration_data, supply_voltage=3.3, pot_range=5.0):
"""Convert the potentiometer data units to cm.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
supply_voltage : float
Maximum voltage for the potentiometers
pot_range : float
Potentiometer maximum travel range in cm
Returns
-------
calibration_data : dict
Calibration data with potentiometer data convert to cm
"""
trodes_max_bits = 32767.0
trodes_max_volts = 10.0
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = (
calibration_data['analog'][key] / trodes_max_bits * trodes_max_volts / supply_voltage * pot_range
)
return calibration_data
def median_filter_pots(calibration_data, width):
"""Apply a median filter to the potentiometer series.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
width : int
Width (in samples) of window used for median filter. Should be odd.
If not, one is added.
Returns
-------
calibration_data : dict
Calibration data with median-filtered potentiometers
"""
# convert width units to samples
if width == 0:
pass
elif (width % 2) != 0:
width += 1
else:
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = ndimage.median_filter(
calibration_data['analog'][key], size=(width)
)
return calibration_data
def pots_to_volts(calibration_data):
"""Convert the potentiometer data units to volts.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
Returns
-------
calibration_data : dict
Calibration data with potentiometer data convert to volts
"""
trodes_max_bits = 32767.0
trodes_max_volts = 10.0
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = (
calibration_data['analog'][key] / trodes_max_bits * trodes_max_volts
)
return calibration_data
def pots_to_bits(calibration_data, supply_voltage=3.3, controller_max_bits=1023):
"""Convert the potentiometer data units to microcontroller bits.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
supply_voltage : float
Maximum voltage for the potentiometers
controller_max_bits : int
Maximum bits for the microcontroller
Returns
-------
calibration_data : dict
Calibration data with potentiometer data convert to microcontroller
bits
"""
trodes_max_bits = 32767.0
trodes_max_volts = 10.0
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = np.round(
calibration_data['analog'][key] / trodes_max_bits * trodes_max_volts /
supply_voltage * controller_max_bits
)
return calibration_data
def get_valve_transitions(calibration_data):
"""Get the valve start and stop times.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
Returns
-------
start_times : dict
Times at which each of the valves transitioned from closed to open
stop_times : dict
Times at which each of the valves transitioned from open to closed
"""
start_times = {
'x_push': calibration_data['DIO']['x_push'][1::2],
'x_pull': calibration_data['DIO']['x_pull'][1::2],
'y_push': calibration_data['DIO']['y_push'][1::2],
'y_pull': calibration_data['DIO']['y_pull'][1::2],
'z_push': calibration_data['DIO']['z_push'][1::2],
'z_pull': calibration_data['DIO']['z_pull'][1::2]
}
stop_times = {
'x_push': calibration_data['DIO']['x_push'][2::2],
'x_pull': calibration_data['DIO']['x_pull'][2::2],
'y_push': calibration_data['DIO']['y_push'][2::2],
'y_pull': calibration_data['DIO']['y_pull'][2::2],
'z_push': calibration_data['DIO']['z_push'][2::2],
'z_pull': calibration_data['DIO']['z_pull'][2::2]
}
return start_times, stop_times
def get_calibration_frame(
data_dir,
trodes_name,
sampling_rate=3000,
medfilter_width=11,
pot_units='cm'
):
"""Generate a data frame for estimating robot calibration parameters.
State variables include the starting positions and valve open
durations for each actuator. The response variable is displacement.
Durations and displacements are assumed to be negative for the pull
valves by convention.
Parameters
----------
data_dir : str
Parent directory where the trodes data lives
trodes_name : str
Name of original .rec trodes file
sampling_rate : int
Specifying a rate (Hz) lower than the SpikeGadgets MCU clock rate of
30 kHz will downsample the data and speed up the parsing.
medfilter_width : int
Width, in samples, of the median fitler applied to the potentiometer
recordings.
pot_units : str
Units to return potentiometer recordings. Can be `cm`, `volts`, or
`bits`.
Returns
-------
data_frame : pandas.core.frame.DataFrame
A pandas data frame with columns `start_time`, `x_position`,
`x_duration`, `x_displacement`, `y_position`, `y_duration`,
`y_displacement`, `z_position`, `z_duration`, and `z_displacement`.
"""
trodes_files = get_trodes_files(data_dir, trodes_name)
calibration_data = read_data(trodes_files, sampling_rate)
calibration_data = to_numpy(calibration_data)
calibration_data = median_filter_pots(calibration_data, width=medfilter_width)
calibration_data = to_seconds(calibration_data)
if pot_units == 'cm':
calibration_data = pots_to_cm(calibration_data)
elif pot_units == 'volts':
calibration_data = pots_to_volts(calibration_data)
elif pot_units == 'bits':
calibration_data = pots_to_bits(calibration_data)
start_times, stop_times = get_valve_transitions(calibration_data)
num_events = start_times['x_push'].size + start_times['x_pull'].size
# sort start times
x_start_times = np.concatenate([start_times['x_push'], start_times['x_pull']])
y_start_times = np.concatenate([start_times['y_push'], start_times['y_pull']])
z_start_times = np.concatenate([start_times['z_push'], start_times['z_pull']])
x_order = np.argsort(x_start_times)
y_order = np.argsort(y_start_times)
z_order = np.argsort(z_start_times)
# estimate valve period
valve_period = np.median(np.diff(x_start_times[x_order]))
# match start times to position indices
start_indices = np.searchsorted(calibration_data['time']['time'], x_start_times[x_order])
stop_indices = start_indices + int(valve_period * sampling_rate)
# make data frame
data_frame = pd.DataFrame(
data={
'start_time': x_start_times[x_order],
'x_position': calibration_data['analog']['x_pot'][start_indices],
'x_duration': np.concatenate([
stop_times['x_push'] - start_times['x_push'],
start_times['x_pull'] - stop_times['x_pull'] # scipy.ndimagen convention
])[x_order],
'x_displacement': (
calibration_data['analog']['x_pot'][stop_indices] -
calibration_data['analog']['x_pot'][start_indices]
),
'y_position': calibration_data['analog']['y_pot'][start_indices],
'y_duration': np.concatenate([
stop_times['y_push'] - start_times['y_push'],
start_times['y_pull'] - stop_times['y_pull'] # scipy.ndimagen convention
])[y_order],
'y_displacement': (
calibration_data['analog']['y_pot'][stop_indices] -
calibration_data['analog']['y_pot'][start_indices]
),
'z_position': calibration_data['analog']['z_pot'][start_indices],
'z_duration': np.concatenate([
stop_times['z_push'] - start_times['z_push'],
start_times['z_pull'] - stop_times['z_pull'] # scipy.ndimagen convention
])[z_order],
'z_displacement': (
calibration_data['analog']['z_pot'][stop_indices] -
calibration_data['analog']['z_pot'][start_indices]
)
}
)
return data_frame
def get_traces_frame(
data_dir,
trodes_name,
sampling_rate=3000,
medfilter_width=11,
pot_units='cm'
):
"""Generate a data frame containing the position trajectories of
each actuator in response to each of the valve duration commands.
Similar to get_calibration_frame(), but returns the entire
trajectory instead of just the starting positions and
displacements.
Parameters
----------
data_dir : str
Parent directory where the trodes data lives
trodes_name : str
Name of original .rec trodes file
sampling_rate : int
Specifying a rate (Hz) lower than the SpikeGadgets MCU clock rate of
30 kHz will downsample the data, speed up parsing, and return
a smaller data frame.
medfilter_width : int
Width, in samples, of the median fitler applied to the potentiometer
recordings.
pot_units : str
Units to return potentiometer recordings. Can be `cm`, `volts`, or
`bits`.
Returns
-------
data_frame : pandas.core.frame.DataFrame
A pandas data frame with columns `trace_time`, `start_time`,
`x_start_position`, `x_duration`, `x_displacement`,
`y_start_position`, `y_duration`, `y_displacement`,
`z_start_position`, `z_duration`, and `z_displacement`.
"""
trodes_files = get_trodes_files(data_dir, trodes_name)
calibration_data = read_data(trodes_files, sampling_rate)
calibration_data = to_numpy(calibration_data)
calibration_data = median_filter_pots(calibration_data, width=medfilter_width)
calibration_data = to_seconds(calibration_data)
if pot_units == 'cm':
calibration_data = pots_to_cm(calibration_data)
elif pot_units == 'volts':
calibration_data = pots_to_volts(calibration_data)
elif pot_units == 'bits':
calibration_data = pots_to_bits(calibration_data)
start_times, stop_times = get_valve_transitions(calibration_data)
num_events = start_times['x_push'].size + start_times['x_pull'].size
# sort start times
x_start_times = np.concatenate([start_times['x_push'], start_times['x_pull']])
y_start_times = np.concatenate([start_times['y_push'], start_times['y_pull']])
z_start_times = np.concatenate([start_times['z_push'], start_times['z_pull']])
x_order = np.argsort(x_start_times)
y_order = | np.argsort(y_start_times) | numpy.argsort |
"""Thomson Problem solver"""
import itertools
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.spatial import ConvexHull
# Plot creation and parameters
fig = plt.figure()
ax = Axes3D(fig)
ax.set_aspect("equal")
ax.axis("off")
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.set_zlim([-1, 1])
def random_uniform_sphere(N):
"""Create N random points on a unit sphere using a uniform distribution"""
points = []
for _ in itertools.repeat(None, N):
theta = np.random.uniform(0, np.pi)
phi = np.random.uniform(0, 2*np.pi)
points.append([np.cos(theta), np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi)])
return points
def random_gaussian_sphere(N, theta, phi, variance):
"""Create N random points on a unit sphere centered using a gaussian distribution"""
points = []
for _ in itertools.repeat(None, N):
bm_rand1 = np.random.uniform(0, 1)
bm_rand2 = np.random.uniform(0, 1)
theta_gaus = np.sqrt(-2*np.log(bm_rand1))*np.cos(2*np.pi*bm_rand2)*np.sqrt(variance)+theta
phi_gaus = np.sqrt(-2*np.log(bm_rand1))*np.sin(2*np.pi*bm_rand2)*np.sqrt(2*variance)+phi
points.append([np.cos(theta_gaus), np.sin(theta_gaus)*np.cos(phi_gaus), np.sin(theta_gaus)*np.sin(phi_gaus)])
return points
def distance(point1, point2):
"""Distance between 2 points"""
return np.sqrt((point2[0]-point1[0])**2+(point2[1]-point1[1])**2+(point2[2]-point1[2])**2)
def metropolis(points, iterations, temperature, method, variance):
"""Apply the Metropolis algorithm to a set of points"""
system_energy = 0
for i in range(0, len(points)):
for j in range(0, len(points)):
if j <= i:
continue
else:
system_energy += 1/(distance(points[i], points[j]))
print("starting energy = %f" % system_energy)
for _ in itertools.repeat(None, iterations):
i = np.random.randint(0, len(points)-1) # Pick a random point from the pointlist
if method == "uniform": # Generates the compared point by a uniform random distribution
random_point = random_uniform_sphere(1)[0]
elif method == "gaussian": # Generates the compared point by a local gaussian distribution centered on the chosen existing point
theta = np.arccos(points[i][0])
phi = np.arctan2(points[i][2], points[i][1])
random_point = random_gaussian_sphere(1, theta, phi, variance)[0]
else:
raise ValueError("Invalid method")
old_point_energy = 0
new_point_energy = 0
for j in range(0, len(points)): # Compare the energies of the old and new point
if i == j:
continue
else:
old_point_energy += 1/distance(points[i], points[j])
new_point_energy += 1/distance(random_point, points[j])
if old_point_energy > new_point_energy: # The new point is improved so replaces the old point
points[i] = random_point
system_energy += (new_point_energy - old_point_energy)
print("energy down -> current energy = %f, energy change = %f" % (system_energy, 2*(new_point_energy - old_point_energy)))
else: # If the new point is not an improvement it still may be chosen according to its boltzmann probability
j = np.random.uniform(0, 1)
if j <= np.exp((old_point_energy-new_point_energy)/(1.3806503*(10**-23)*temperature)):
# print "exp(delta(e)/kt = %f)" % np.exp((new_point_energy-old_point_energy)/(1.3806503*(10**-23)*temperature))
points[i] = random_point
system_energy -= (old_point_energy-new_point_energy)
print("energy up -> current energy = %f, energy change = %f" % (system_energy, 2*(new_point_energy - old_point_energy)))
print("final energy = %f" % system_energy)
return points
def pointplot(points):
"""Display a set of points in 3D"""
# Draws a sphere
phi = np.linspace(0, 2*np.pi, 200)
theta = | np.linspace(0, np.pi, 200) | numpy.linspace |
#from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pf
import matplotlib.cm as cm
import warnings
import multiprocess as mtp
from numpy import linalg as LA
from scipy import signal as scp
import scipy.ndimage.filters as med
from SLIT import Lens
from SLIT import tools
warnings.simplefilter("ignore")
##SLIT: Sparse Lens Inversion Technique
def SLIT(Y, Fkappa, kmax, niter, size, PSF, PSFconj, S0 = [0], levels = [0], scheme = 'FB',
mask = [0], lvl = 0, weightS = 1, noise = 'gaussian', tau = 0, verbosity = 0, nweights = 1):
##DESCRIPTION:
## Function that estimates the source light profile from an image of a lensed source given the mass density profile.
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations of the algorithm.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
n1,n2 = np.shape(Y)
PSFconj = PSF.T
#Size of the source
ns1,ns2 = int(n1*size), int(n2*size)
#Number of starlet scales in source plane
if lvl ==0:
lvl = np.int(np.log2(ns2))
else:
lvl = np.min([lvl,np.int(np.log2(ns2))])
lvlg = np.int(np.log2(n2))
#Masking if required
if np.sum(mask) == 0:
mask = | np.ones((n1,n2)) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2D linear elasticity example
Solve the equilibrium equation -\nabla \cdot \sigma(x) = f(x) for x\in\Omega
with the strain-displacement equation:
\epsilon = 1/2(\nabla u + \nabla u^T)
and the constitutive law:
\sigma = 2*\mu*\epsilon + \lambda*(\nabla\cdot u)I,
where \mu and \lambda are Lame constants, I is the identity tensor.
Dirichlet boundary conditions: u(x)=\hat{u} for x\in\Gamma_D
Neumann boundary conditions: \sigma n = \hat{t} for x\in \Gamma_N,
where n is the normal vector.
For this example:
\Omega is a quarter annulus in the 1st quadrant, centered at origin
with inner radius 1, outer radius 4
Symmetry (Dirichlet) boundary conditions on the bottom and left
u_x(x,y) = 0 for x=0
u_y(x,y) = 0 for y=0
and pressure boundary conditions for the curved boundaries:
\sigma n = P_int n on the interior boundary with P_int = 10 MPa
\sigma n = P_ext n on the exterior boundary with P_ext = 0 MPa.
Use DEM
"""
import tensorflow as tf
import numpy as np
import time
from utils.tfp_loss import tfp_function_factory
from utils.Geom_examples import QuarterAnnulus
from utils.Solvers import Elasticity2D_DEM_dist
from utils.Plotting import plot_field_2d
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
#make figures bigger on HiDPI monitors
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 200
np.random.seed(42)
tf.random.set_seed(42)
class Elast_ThickCylinder(Elasticity2D_DEM_dist):
'''
Class including the symmetry boundary conditions for the thick cylinder problem
'''
def __init__(self, layers, train_op, num_epoch, print_epoch, model_data, data_type):
super().__init__(layers, train_op, num_epoch, print_epoch, model_data, data_type)
@tf.function
def dirichletBound(self, X, xPhys, yPhys):
# multiply by x,y for strong imposition of boundary conditions
u_val = X[:,0:1]
v_val = X[:,1:2]
u_val = xPhys*u_val
v_val = yPhys*v_val
return u_val, v_val
#define the model properties
model_data = dict()
model_data["radius_int"] = 1.
model_data["radius_ext"] = 4.
model_data["E"] = 1e2
model_data["nu"] = 0.3
model_data["state"] = "plane strain"
model_data["inner_pressure"] = 10.
model_data["outer_pressure"] = 0.
# generate the model geometry
geomDomain = QuarterAnnulus(model_data["radius_int"], model_data["radius_ext"])
# define the input and output data set
numElemU = 10
numElemV = 10
numGauss = 5
#xPhys, yPhys = myQuad.getRandomIntPts(numPtsU*numPtsV)
xPhys, yPhys, Wint = geomDomain.getQuadIntPts(numElemU, numElemV, numGauss)
data_type = "float32"
Xint = np.concatenate((xPhys,yPhys),axis=1).astype(data_type)
Wint = np.array(Wint).astype(data_type)
# prepare boundary points in the fromat Xbnd = [Xcoord, Ycoord, norm_x, norm_y] and
# Wbnd for boundary integration weights and
# Ybnd = [trac_x, trac_y], where Xcoord, Ycoord are the x and y coordinates of the point,
# norm_x, norm_y are the x and y components of the unit normals
# trac_x, trac_y are the x and y components of the traction vector at each point
# inner curved boundary, include both x and y directions
xPhysBnd, yPhysBnd , xNorm, yNorm, Wbnd = geomDomain.getQuadEdgePts(numElemV, numGauss, 4)
Xbnd = np.concatenate((xPhysBnd, yPhysBnd), axis=1).astype(data_type)
Wbnd = np.array(Wbnd).astype(data_type)
plt.scatter(xPhys, yPhys, s=0.1)
plt.scatter(xPhysBnd, yPhysBnd, s=1, c='red')
plt.title("Boundary and interior integration points")
plt.show()
# define loading
Ybnd_x = -model_data["inner_pressure"]*xNorm
Ybnd_y = -model_data["inner_pressure"]*yNorm
Ybnd = np.concatenate((Ybnd_x, Ybnd_y), axis=1).astype(data_type)
#define the model
tf.keras.backend.set_floatx(data_type)
l1 = tf.keras.layers.Dense(20, "swish")
l2 = tf.keras.layers.Dense(20, "swish")
l3 = tf.keras.layers.Dense(20, "swish")
l4 = tf.keras.layers.Dense(2, None)
train_op = tf.keras.optimizers.Adam()
train_op2 = "TFP-BFGS"
num_epoch = 1000
print_epoch = 100
pred_model = Elast_ThickCylinder([l1, l2, l3, l4], train_op, num_epoch,
print_epoch, model_data, data_type)
#convert the training data to tensors
Xint_tf = tf.convert_to_tensor(Xint)
Wint_tf = tf.convert_to_tensor(Wint)
Xbnd_tf = tf.convert_to_tensor(Xbnd)
Wbnd_tf = tf.convert_to_tensor(Wbnd)
Ybnd_tf = tf.convert_to_tensor(Ybnd)
#training
t0 = time.time()
print("Training (ADAM)...")
pred_model.network_learn(Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
t1 = time.time()
print("Time taken (ADAM)", t1-t0, "seconds")
print("Training (TFP-BFGS)...")
loss_func = tfp_function_factory(pred_model, Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
# convert initial model parameters to a 1D tf.Tensor
init_params = tf.dynamic_stitch(loss_func.idx, pred_model.trainable_variables)
# train the model with L-BFGS solver
results = tfp.optimizer.bfgs_minimize(
value_and_gradients_function=loss_func, initial_position=init_params,
max_iterations=10000, tolerance=1e-14)
# after training, the final optimized parameters are still in results.position
# so we have to manually put them back to the model
loss_func.assign_new_model_parameters(results.position)
t2 = time.time()
print("Time taken (BFGS)", t2-t1, "seconds")
print("Time taken (all)", t2-t0, "seconds")
def cart2pol(x, y):
rho = np.sqrt(np.array(x)**2 + np.array(y)**2)
phi = np.arctan2(y, x)
return rho, phi
# define the exact displacements
def exact_disp(x,y,model):
nu = model["nu"]
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
mu = model["E"]/(2*(1+nu))
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
dispxy = 1/(2*mu*(b**2-a**2))*((1-2*nu)*(p1*a**2-p0*b**2)+(p1-p0)*a**2*b**2/r**2)
ux = x*dispxy
uy = y*dispxy
return ux, uy
#define the exact stresses
def exact_stresses(x,y,model):
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
term_fact = a**2*b**2/(b**2-a**2)
term_one = p1/b**2 - p0/a**2 + (p1-p0)/r**2
term_two = 2*(p1-p0)/r**4
sigma_xx = term_fact*(term_one - term_two*x**2)
sigma_yy = term_fact*(term_one - term_two*y**2)
sigma_xy = term_fact*(-term_two*x*y)
return sigma_xx, sigma_yy, sigma_xy
print("Testing...")
numPtsUTest = 2*numElemU*numGauss
numPtsVTest = 2*numElemV*numGauss
xPhysTest, yPhysTest = geomDomain.getUnifIntPts(numPtsUTest, numPtsVTest, [1,1,1,1])
XTest = | np.concatenate((xPhysTest,yPhysTest),axis=1) | numpy.concatenate |
import numpy as np
from collections import OrderedDict
import torch
from .basemetric import DiscreetMetric
class IoU(DiscreetMetric):
def __init__(self, seg_classes, *args, **kwargs):
self.seg_labels = []
self.seg_classes = {}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
self.seg_classes[cat] = seg_classes[cat]
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
self.seg_labels.append(label)
self.min_label = min(self.seg_labels)
self.num_labels = max(self.seg_labels) - self.min_label + 1
self.names = ['global_iou', 'avg_iou']
self.perclass_names = [f'iou_{cat}' for cat in self.seg_classes.keys()]
super(IoU, self).__init__(*args, **kwargs)
def update(self, true, pred):
self.true += list(true)
self.pred += list(pred)
def reset(self):
self.true = []
self.pred = []
def compute(self):
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
pred_iou = []
for i in range(len(self.true)):
segp = self.pred[i]
segl = self.true[i]
cat = self.seg_label_to_cat[segl[0].item()]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (segl==l).sum() == 0 and (segp==l).sum() == 0:
part_ious[l-self.seg_classes[cat][0]] = torch.tensor(1.0)
else:
part_ious[l-self.seg_classes[cat][0]] = ((segl==l) & (segp==l)).sum() / float(((segl==l) | (segp==l)).sum())
part_ious = np.mean(part_ious)
shape_ious[cat].append(part_ious)
pred_iou.append(part_ious)
with | np.errstate(divide='ignore', invalid='ignore') | numpy.errstate |
from __future__ import division, print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import pdb
import argparse
import re
import datetime
import sys
import numpy as np
from scipy.stats import sigmaclip
from scipy.ndimage.filters import median_filter
import fitsio
from astropy.io import fits as fits_astropy
from astropy.table import Table, vstack
from astropy import units
from astropy.coordinates import SkyCoord
from photutils import (CircularAperture, CircularAnnulus,
aperture_photometry, DAOStarFinder)
# Sphinx build would crash
try:
from astrometry.util.file import trymakedirs
from astrometry.util.starutil_numpy import hmsstring2ra, dmsstring2dec
from astrometry.util.util import wcs_pv2sip_hdr
from astrometry.util.ttime import Time
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import match_radec
from astrometry.libkd.spherematch import match_xy
from tractor.splinesky import SplineSky
import legacypipe
from legacypipe.ps1cat import ps1cat
from legacypipe.gaiacat import GaiaCatalog
from legacypipe.survey import radec_at_mjd, get_git_version
from legacypipe.image import validate_procdate_plver
except ImportError:
#pass
raise
CAMERAS=['decam','mosaic','90prime','megaprime']
def ptime(text,t0):
tnow=Time()
print('TIMING:%s ' % text,tnow-t0)
return tnow
def read_lines(fn):
fin=open(fn,'r')
lines=fin.readlines()
fin.close()
if len(lines) < 1: raise ValueError('lines not read properly from %s' % fn)
return np.array( list(np.char.strip(lines)) )
def dobash(cmd):
print('UNIX cmd: %s' % cmd)
if os.system(cmd): raise ValueError
def astropy_to_astrometry_table(t):
T = fits_table()
for c in t.colnames:
T.set(c, t[c])
return T
def _ccds_table(camera='decam'):
'''Initialize the CCDs table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
max_camera_length = max([len(c) for c in CAMERAS])
cols = [
('err_message', 'S30'),
('image_filename', 'S120'),
('image_hdu', '>i2'),
('camera', 'S%i' % max_camera_length),
('expnum', '>i8'),
('plver', 'S8'),
('procdate', 'S19'),
('plprocid', 'S7'),
('ccdname', 'S5'),
('ccdnum', '>i2'),
('expid', 'S17'),
('object', 'S35'),
('propid', 'S10'),
('filter', 'S1'),
('exptime', '>f4'),
('date_obs', 'S26'),
('mjd_obs', '>f8'),
('ut', 'S15'),
('ha', 'S13'),
('airmass', '>f4'),
('fwhm', '>f4'),
('fwhm_cp', '>f4'),
('gain', '>f4'),
('width', '>i2'),
('height', '>i2'),
('ra_bore', '>f8'),
('dec_bore', '>f8'),
('crpix1', '>f4'),
('crpix2', '>f4'),
('crval1', '>f8'),
('crval2', '>f8'),
('cd1_1', '>f4'),
('cd1_2', '>f4'),
('cd2_1', '>f4'),
('cd2_2', '>f4'),
('pixscale', 'f4'),
('zptavg', '>f4'),
('yshift', 'bool'),
# -- CCD-level quantities --
('ra', '>f8'),
('dec', '>f8'),
('skymag', '>f4'),
('skycounts', '>f4'),
('skyrms', '>f4'),
('sig1', '>f4'),
('nmatch_photom', '>i2'),
('nmatch_astrom', '>i2'),
('goodps1', '>i2'),
('goodps1_wbadpix5', '>i2'),
('phoff', '>f4'),
('phrms', '>f4'),
('zpt', '>f4'),
('zpt_wbadpix5', '>f4'),
('transp', '>f4'),
('raoff', '>f4'),
('decoff', '>f4'),
('rarms', '>f4'),
('decrms', '>f4'),
('rastddev', '>f4'),
('decstddev', '>f4')
]
ccds = Table(np.zeros(1, dtype=cols))
return ccds
def _stars_table(nstars=1):
'''Initialize the stars table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
cols = [('image_filename', 'S100'),('image_hdu', '>i2'),
('expid', 'S16'), ('filter', 'S1'),('nmatch', '>i2'),
('x', 'f4'), ('y', 'f4'), ('expnum', '>i8'),
('plver', 'S8'), ('procdate', 'S19'), ('plprocid', 'S7'),
('gain', 'f4'),
('ra', 'f8'), ('dec', 'f8'), ('apmag', 'f4'),('apflux', 'f4'),('apskyflux', 'f4'),('apskyflux_perpix', 'f4'),
('radiff', 'f8'), ('decdiff', 'f8'),
('ps1_mag', 'f4'),
('gaia_g','f8'),('ps1_g','f8'),('ps1_r','f8'),('ps1_i','f8'),('ps1_z','f8'),
('exptime', '>f4')]
stars = Table(np.zeros(nstars, dtype=cols))
return stars
def get_pixscale(camera='decam'):
return {'decam':0.262,
'mosaic':0.262,
'90prime':0.470,
'megaprime':0.185}[camera]
def cols_for_survey_table(which='all'):
"""Return list of -survey.fits table colums
Args:
which: all, numeric,
nonzero_diff (numeric and expect non-zero diff with reference
when compute it)
"""
assert(which in ['all','numeric','nonzero_diff'])
martins_keys = ['airmass', 'ccdskymag']
gods_keys = ['plver', 'procdate', 'plprocid']
if which == 'all':
need_arjuns_keys= ['ra','dec','ra_bore','dec_bore',
'image_filename','image_hdu','expnum','ccdname','object',
'filter','exptime','camera','width','height','propid',
'mjd_obs','ccdnmatch',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff',
'ccdrarms', 'ccddecrms', 'ccdskycounts',
'ccdphrms',
'cd1_1','cd2_2','cd1_2','cd2_1',
'crval1','crval2','crpix1','crpix2']
dustins_keys= ['skyrms', 'sig1', 'yshift']
elif which == 'numeric':
need_arjuns_keys= ['ra','dec','ra_bore','dec_bore',
'expnum',
'exptime','width','height',
'mjd_obs','ccdnmatch',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff',
'cd1_1','cd2_2','cd1_2','cd2_1',
'crval1','crval2','crpix1','crpix2']
dustins_keys= ['skyrms']
elif which == 'nonzero_diff':
need_arjuns_keys= ['ra','dec','ccdnmatch',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff']
dustins_keys= ['skyrms']
return need_arjuns_keys + dustins_keys + martins_keys + gods_keys
def create_survey_table(T, surveyfn, camera=None, psf=False, bad_expid=None):
"""input _ccds_table fn
output a table formatted for legacypipe/runbrick
"""
assert(camera in CAMERAS)
need_keys = cols_for_survey_table(which='all')
# Rename
rename_keys= [('zpt','ccdzpt'),
('zptavg','zpt'),
('raoff','ccdraoff'),
('decoff','ccddecoff'),
('skycounts', 'ccdskycounts'),
('skymag', 'ccdskymag'),
('rarms', 'ccdrarms'),
('decrms', 'ccddecrms'),
('phrms', 'ccdphrms'),
('nmatch_photom','ccdnmatch')]
for old,new in rename_keys:
T.rename(old,new)
# Delete
del_keys= list( set(T.get_columns()).difference(set(need_keys)) )
for key in del_keys:
T.delete_column(key)
# precision
T.width = T.width.astype(np.int16)
T.height = T.height.astype(np.int16)
T.cd1_1 = T.cd1_1.astype(np.float32)
T.cd1_2 = T.cd1_2.astype(np.float32)
T.cd2_1 = T.cd2_1.astype(np.float32)
T.cd2_2 = T.cd2_2.astype(np.float32)
if psf:
from legacyzpts.psfzpt_cuts import add_psfzpt_cuts
add_psfzpt_cuts(T, camera, bad_expid)
writeto_via_temp(surveyfn, T)
print('Wrote %s' % surveyfn)
def create_annotated_table(leg_fn, ann_fn, camera, survey, psf=False):
from legacypipe.annotate_ccds import annotate, init_annotations
T = fits_table(leg_fn)
T = survey.cleanup_ccds_table(T)
init_annotations(T)
annotate(T, survey, mzls=(camera == 'mosaic'), bass=(camera == '90prime'),
normalizePsf=psf, carryOn=True)
writeto_via_temp(ann_fn, T)
print('Wrote %s' % ann_fn)
def cols_for_converted_star_table(star_table=None,
which=None):
assert(star_table in ['photom','astrom'])
assert(which in ['all','numeric','nonzero_diff'])
# which
if which == 'all':
need_arjuns_keys= ['filename','expnum','extname',
'ccd_x','ccd_y','ccd_ra','ccd_dec',
'ccd_mag','ccd_sky',
'raoff','decoff',
'magoff',
'nmatch',
'gmag','ps1_g','ps1_r','ps1_i','ps1_z']
# If want it in star- table, add it here
extra_keys= ['image_hdu','filter','ccdname']
elif which == 'numeric':
need_arjuns_keys= ['expnum',
'ccd_x','ccd_y','ccd_ra','ccd_dec',
'ccd_mag','ccd_sky',
'raoff','decoff',
'magoff',
'nmatch',
'gmag','ps1_g','ps1_r','ps1_i','ps1_z']
extra_keys= []
elif which == 'nonzero_diff':
need_arjuns_keys= ['ccd_x','ccd_y','ccd_ra','ccd_dec',
'ccd_mag','ccd_sky',
'raoff','decoff',
'magoff',
'nmatch']
extra_keys= []
# star_table
if star_table == 'photom':
for key in ['raoff','decoff']:
need_arjuns_keys.remove(key)
elif star_table == 'astrom':
for key in ['magoff']:
need_arjuns_keys.remove(key)
# Done
return need_arjuns_keys + extra_keys
def getrms(x):
return np.sqrt( np.mean( np.power(x,2) ) )
def get_bitmask_fn(imgfn):
if 'ooi' in imgfn:
fn= imgfn.replace('ooi','ood')
elif 'oki' in imgfn:
fn= imgfn.replace('oki','ood')
else:
raise ValueError('bad imgfn? no ooi or oki: %s' % imgfn)
return fn
def get_weight_fn(imgfn):
if 'ooi' in imgfn:
fn= imgfn.replace('ooi','oow')
elif 'oki' in imgfn:
fn= imgfn.replace('oki','oow')
else:
raise ValueError('bad imgfn? no ooi or oki: %s' % imgfn)
return fn
class Measurer(object):
"""Main image processing functions for all cameras.
Args:
aprad: Aperture photometry radius in arcsec
skyrad_inner,skyrad_outer: sky annulus in arcsec
det_thresh: minimum S/N for matched filter
match_radius: arcsec matching to gaia/ps1
sn_min,sn_max: if not None then then {min,max} S/N will be enforced from
aperture photoemtry, where S/N = apflux/sqrt(skyflux)
aper_sky_sub: do aperture sky subtraction instead of splinesky
"""
def __init__(self, fn, image_dir='images', aprad=3.5, skyrad_inner=7.0,
skyrad_outer=10.0, det_thresh=8., match_radius=3., sn_min=None,
sn_max=None, aper_sky_sub=False, calibrate=False, quiet=False,
**kwargs):
# Set extra kwargs
self.ps1_pattern= kwargs['ps1_pattern']
self.zptsfile= kwargs.get('zptsfile')
self.prefix= kwargs.get('prefix')
self.verboseplots= kwargs.get('verboseplots')
self.fn = os.path.join(image_dir, fn)
self.fn_base = fn
self.debug= kwargs.get('debug')
self.outdir= kwargs.get('outdir')
self.calibdir = kwargs.get('calibdir')
self.aper_sky_sub = aper_sky_sub
self.calibrate = calibrate
self.aprad = aprad
self.skyrad = (skyrad_inner, skyrad_outer)
self.det_thresh = det_thresh # [S/N]
self.match_radius = match_radius
self.sn_min = sn_min
self.sn_max = sn_max
# Tractor fitting of final star sample (when not doing --psf fitting)
self.stampradius= 4. # [arcsec] Should be a bit bigger than radius=3.5'' aperture
self.tractor_nstars= 30 # Tractorize at most this many stars, saves CPU time
# Set the nominal detection FWHM (in pixels) and detection threshold.
# Read the primary header and the header for this extension.
self.nominal_fwhm = 5.0 # [pixels]
try:
self.primhdr = read_primary_header(self.fn)
except ValueError:
# astropy can handle it
tmp= fits_astropy.open(self.fn)
self.primhdr= tmp[0].header
tmp.close()
del tmp
# CP WCS succeed?
self.goodWcs=True
if not ('WCSCAL' in self.primhdr.keys() and
'success' in self.primhdr['WCSCAL'].strip().lower()):
self.goodWcs=False
# Camera-agnostic primary header cards
try:
self.propid = self.primhdr['PROPID']
except KeyError:
self.propid = self.primhdr.get('DTPROPID')
self.exptime = self.primhdr['EXPTIME']
self.date_obs = self.primhdr['DATE-OBS']
self.mjd_obs = self.primhdr['MJD-OBS']
# Add more attributes.
for key, attrkey in zip(['AIRMASS','HA', 'DATE', 'PLVER', 'PLPROCID'],
['AIRMASS','HA', 'PROCDATE', 'PLVER', 'PLPROCID']):
val = self.primhdr[key]
if type(val) == str:
val = val.strip()
if len(val) == 0:
raise ValueError('Empty header card: %s' % key)
setattr(self, attrkey.lower(), val)
self.expnum = self.get_expnum(self.primhdr)
if not quiet:
print('CP Header: EXPNUM = ',self.expnum)
print('CP Header: PROCDATE = ',self.procdate)
print('CP Header: PLVER = ',self.plver)
print('CP Header: PLPROCID = ',self.plprocid)
self.obj = self.primhdr['OBJECT']
def get_good_image_subregion(self):
'''
Returns x0,x1,y0,y1 of the good region of this chip,
or None if no cut should be applied to that edge; returns
(None,None,None,None) if the whole chip is good.
This cut is applied in addition to any masking in the mask or
invvar map.
'''
return None,None,None,None
def get_expnum(self, primhdr):
return self.primhdr['EXPNUM']
def zeropoint(self, band):
return self.zp0[band]
def extinction(self, band):
return self.k_ext[band]
def set_hdu(self,ext):
self.ext = ext.strip()
self.ccdname= ext.strip()
self.expid = '{:08d}-{}'.format(self.expnum, self.ccdname)
hdulist= fitsio.FITS(self.fn)
self.image_hdu= hdulist[ext].get_extnum() #NOT ccdnum in header!
# use header
self.hdr = fitsio.read_header(self.fn, ext=ext)
# Sanity check
assert(self.ccdname.upper() == self.hdr['EXTNAME'].strip().upper())
self.ccdnum = np.int(self.hdr.get('CCDNUM', 0))
self.gain= self.get_gain(self.hdr)
# WCS
self.wcs = self.get_wcs()
# Pixscale is assumed CONSTANT! per camera
# From CP Header
hdrVal={}
# values we want
for ccd_col in ['width','height','fwhm_cp']:
# Possible keys in hdr for these values
for key in self.cp_header_keys[ccd_col]:
if key in self.hdr.keys():
hdrVal[ccd_col]= self.hdr[key]
break
for ccd_col in ['width','height','fwhm_cp']:
if ccd_col in hdrVal.keys():
#print('CP Header: %s = ' % ccd_col,hdrVal[ccd_col])
setattr(self, ccd_col, hdrVal[ccd_col])
else:
warning='Could not find %s, keys not in cp header: %s' % \
(ccd_col,self.cp_header_keys[ccd_col])
if ccd_col == 'fwhm_cp':
print('WARNING: %s' % warning)
self.fwhm_cp = np.nan
else:
raise KeyError(warning)
x0,x1,y0,y1 = self.get_good_image_subregion()
if x0 is None and x1 is None and y0 is None and y1 is None:
slc = None
else:
x0 = x0 or 0
x1 = x1 or self.width
y0 = y0 or 0
y1 = y1 or self.height
slc = slice(y0,y1),slice(x0,x1)
self.slc = slc
def read_bitmask(self):
dqfn= get_bitmask_fn(self.fn)
if self.slc is not None:
mask = fitsio.FITS(dqfn)[self.ext][self.slc]
else:
mask = fitsio.read(dqfn, ext=self.ext)
mask = self.remap_bitmask(mask)
return mask
def remap_bitmask(self, mask):
return mask
def read_weight(self, clip=True, clipThresh=0.1, scale=True, bitmask=None):
fn = get_weight_fn(self.fn)
if self.slc is not None:
wt = fitsio.FITS(fn)[self.ext][self.slc]
else:
wt = fitsio.read(fn, ext=self.ext)
if scale:
wt = self.scale_weight(wt)
if bitmask is not None:
# Set all masked pixels to have weight zero.
# bitmask value 1 = bad
wt[bitmask > 0] = 0.
if clip and np.sum(wt > 0) > 0:
# Additionally clamp near-zero (incl negative!) weight to zero,
# which arise due to fpack.
if clipThresh > 0.:
thresh = clipThresh * np.median(wt[wt > 0])
else:
thresh = 0.
wt[wt < thresh] = 0
assert(np.all(wt >= 0.))
assert(np.all(np.isfinite(wt)))
return wt
def read_image(self):
'''Read the image and header; scale the image.'''
f = fitsio.FITS(self.fn)[self.ext]
if self.slc is not None:
img = f[self.slc]
else:
img = f.read()
hdr = f.read_header()
img = self.scale_image(img)
return img, hdr
def scale_image(self, img):
return img
def scale_weight(self, img):
return img
def remap_invvar(self, invvar, primhdr, img, dq):
# By default, *do not* remap
return invvar
# A function that can be called by a subclasser's remap_invvar() method
def remap_invvar_shotnoise(self, invvar, primhdr, img, dq):
#
# All three cameras scale the image and weight to units of electrons.
# (actually, not DECam any more! But DECamMeasurer doesn't use this
# function.)
#
print('Remapping weight map for', self.fn)
const_sky = primhdr['SKYADU'] # e/s, Recommended sky level keyword from Frank
expt = primhdr['EXPTIME'] # s
with np.errstate(divide='ignore'):
var_SR = 1./invvar # e**2
print('median img:', np.median(img), 'vs sky estimate * exptime', const_sky*expt)
var_Astro = np.abs(img - const_sky * expt) # img in electrons; Poisson process so variance = mean
wt = 1./(var_SR + var_Astro) # 1/(e**2)
# Zero out NaNs and masked pixels
wt[np.isfinite(wt) == False] = 0.
wt[dq != 0] = 0.
return wt
def create_zero_one_mask(self,bitmask,good=[]):
"""Return zero_one_mask array given a bad pixel map and good pix values
bitmask: ood image
good: list of values to treat as good in the bitmask
"""
# 0 == good, 1 == bad
zero_one_mask= bitmask.copy()
for val in good:
zero_one_mask[zero_one_mask == val]= 0
zero_one_mask[zero_one_mask > 0]= 1
return zero_one_mask
def get_zero_one_mask(self,bitmask,good=[]):
"""Convert bitmask into a zero and ones mask, 1 = bad, 0 = good
bitmask: ood image
good: (optional) list of values to treat as good in the bitmask
default is to use appropiate values for the camera
"""
# Defaults
if len(good) == 0:
if self.camera == 'decam':
# 7 = transient
good=[7]
elif self.camera == 'mosaic':
# 5 is truly a cosmic ray
good=[]
elif self.camera == '90prime':
# 5 can be really bad for a good image because these are subtracted
# and interpolated stats
good= []
return self.create_zero_one_mask(bitmask,good=good)
def sensible_sigmaclip(self, arr, nsigma = 4.0):
'''sigmaclip returns unclipped pixels, lo,hi, where lo,hi are the
mean(goodpix) +- nsigma * sigma
'''
goodpix, lo, hi = sigmaclip(arr, low=nsigma, high=nsigma)
meanval = np.mean(goodpix)
sigma = (meanval - lo) / nsigma
return meanval, sigma
def get_sky_and_sigma(self, img, nsigma=3):
'''returns 2d sky image and sky rms'''
splinesky= False
if splinesky:
skyobj = SplineSky.BlantonMethod(img, None, 256)
skyimg = np.zeros_like(img)
skyobj.addTo(skyimg)
mnsky, skystd = self.sensible_sigmaclip(img - skyimg,nsigma=nsigma)
skymed= np.median(skyimg)
else:
#sky, sig1 = self.sensible_sigmaclip(img[1500:2500, 500:1000])
if self.camera in ['decam', 'megaprime']:
slc=[slice(1500,2500),slice(500,1500)]
elif self.camera in ['mosaic','90prime']:
slc=[slice(500,1500),slice(500,1500)]
else:
raise RuntimeError('unknown camera %s' % self.camera)
clip_vals,_,_ = sigmaclip(img[tuple(slc)],low=nsigma,high=nsigma)
skymed= np.median(clip_vals)
skystd= np.std(clip_vals)
skyimg= np.zeros(img.shape) + skymed
# MAD gives 10% larger value
# sig1= 1.4826 * np.median(np.abs(clip_vals))
return skyimg, skymed, skystd
def remove_sky_gradients(self, img):
# Ugly removal of sky gradients by subtracting median in first x and then y
H,W = img.shape
meds = np.array([np.median(img[:,i]) for i in range(W)])
meds = median_filter(meds, size=5)
img -= meds[np.newaxis,:]
meds = np.array([np.median(img[i,:]) for i in range(H)])
meds = median_filter(meds, size=5)
img -= meds[:,np.newaxis]
def match_ps1_stars(self, px, py, fullx, fully, radius, stars):
#print('Matching', len(px), 'PS1 and', len(fullx), 'detected stars with radius', radius)
I,J,d = match_xy(px, py, fullx, fully, radius)
#print(len(I), 'matches')
dx = px[I] - fullx[J]
dy = py[I] - fully[J]
return I,J,dx,dy
def fitstars(self, img, ierr, xstar, ystar, fluxstar):
'''Fit each star using a Tractor model.'''
import tractor
H, W = img.shape
fwhms = []
radius_pix = self.stampradius / self.pixscale
for ii, (xi, yi, fluxi) in enumerate(zip(xstar, ystar, fluxstar)):
#print('Fitting source', i, 'of', len(Jf))
ix = int(np.round(xi))
iy = int(np.round(yi))
xlo = int( max(0, ix-radius_pix) )
xhi = int( min(W, ix+radius_pix+1) )
ylo = int( max(0, iy-radius_pix) )
yhi = int( min(H, iy+radius_pix+1) )
xx, yy = np.meshgrid(np.arange(xlo, xhi), np.arange(ylo, yhi))
r2 = (xx - xi)**2 + (yy - yi)**2
keep = (r2 < radius_pix**2)
pix = img[ylo:yhi, xlo:xhi].copy()
ie = ierr[ylo:yhi, xlo:xhi].copy()
#print('fitting source at', ix,iy)
#print('number of active pixels:', np.sum(ie > 0), 'shape', ie.shape)
psf = tractor.NCircularGaussianPSF([4.0], [1.0])
tim = tractor.Image(data=pix, inverr=ie, psf=psf)
src = tractor.PointSource(tractor.PixPos(xi-xlo, yi-ylo),
tractor.Flux(fluxi))
tr = tractor.Tractor([tim], [src])
#print('Posterior before prior:', tr.getLogProb())
src.pos.addGaussianPrior('x', 0.0, 1.0)
#print('Posterior after prior:', tr.getLogProb())
tim.freezeAllBut('psf')
psf.freezeAllBut('sigmas')
# print('Optimizing params:')
# tr.printThawedParams()
#print('Parameter step sizes:', tr.getStepSizes())
optargs = dict(priors=False, shared_params=False)
for step in range(50):
dlnp, x, alpha = tr.optimize(**optargs)
#print('dlnp', dlnp)
#print('src', src)
#print('psf', psf)
if dlnp == 0:
break
# Now fit only the PSF size
tr.freezeParam('catalog')
# print('Optimizing params:')
# tr.printThawedParams()
for step in range(50):
dlnp, x, alpha = tr.optimize(**optargs)
#print('dlnp', dlnp)
#print('src', src)
#print('psf', psf)
if dlnp == 0:
break
fwhms.append(2.35 * psf.sigmas[0]) # [pixels]
#model = tr.getModelImage(0)
return np.array(fwhms)
def isolated_radec(self,ra,dec,nn=2,minsep=1./3600):
'''return indices of ra,dec for which the ra,dec points are
AT LEAST a distance minsep away from their nearest neighbor point'''
cat1 = SkyCoord(ra=ra*units.degree, dec=dec*units.degree)
cat2 = SkyCoord(ra=ra*units.degree, dec=dec*units.degree)
idx, d2d, d3d = cat1.match_to_catalog_3d(cat2,nthneighbor=nn)
b= np.array(d2d) >= minsep
return b
def get_ps1_cuts(self,ps1):
"""Returns bool of PS1 sources to keep
ps1: catalogue with ps1 data
"""
gicolor= ps1.median[:,0] - ps1.median[:,2]
return ((ps1.nmag_ok[:, 0] > 0) &
(ps1.nmag_ok[:, 1] > 0) &
(ps1.nmag_ok[:, 2] > 0) &
(gicolor > 0.4) &
(gicolor < 2.7))
def return_on_error(self,err_message='',
ccds=None, stars_photom=None, stars_astrom=None):
"""Sets ccds table err message, zpt to nan, and returns appropriately for self.run()
Args:
err_message: length <= 30
ccds, stars_photom, stars_astrom: (optional) tables partially filled by run()
"""
assert(len(err_message) > 0 & len(err_message) <= 30)
if ccds is None:
ccds= _ccds_table(self.camera)
ccds['image_filename'] = self.fn_base
ccds['err_message']= err_message
ccds['zpt']= np.nan
return ccds, stars_photom, stars_astrom
def run(self, ext=None, save_xy=False, psfex=False, splinesky=False, survey=None):
"""Computes statistics for 1 CCD
Args:
ext: ccdname
save_xy: save daophot x,y and x,y after various cuts to dict and save
to json
Returns:
ccds, stars_photom, stars_astrom
"""
self.set_hdu(ext)
#
t0= Time()
t0= ptime('Measuring CCD=%s from image=%s' % (self.ccdname,self.fn),t0)
# Initialize
ccds = _ccds_table(self.camera)
# FIXME -- could clean up paths here??
ccds['image_filename'] = self.fn_base
ccds['image_hdu'] = self.image_hdu
ccds['ccdnum'] = self.ccdnum
ccds['camera'] = self.camera
ccds['expnum'] = self.expnum
ccds['plver'] = self.plver
ccds['procdate'] = self.procdate
ccds['plprocid'] = self.plprocid
ccds['ccdname'] = self.ccdname
ccds['expid'] = self.expid
ccds['object'] = self.obj
ccds['propid'] = self.propid
ccds['filter'] = self.band
ccds['exptime'] = self.exptime
ccds['date_obs'] = self.date_obs
ccds['mjd_obs'] = self.mjd_obs
ccds['ut'] = self.ut
ccds['ra_bore'] = self.ra_bore
ccds['dec_bore'] = self.dec_bore
ccds['ha'] = self.ha
ccds['airmass'] = self.airmass
ccds['gain'] = self.gain
ccds['pixscale'] = self.pixscale
ccds['yshift'] = 'YSHIFT' in self.primhdr
ccds['width'] = self.width
ccds['height'] = self.height
ccds['fwhm_cp'] = self.fwhm_cp
hdr_fwhm = self.fwhm_cp
notneeded_cols= ['avsky']
for ccd_col in ['avsky', 'crpix1', 'crpix2', 'crval1', 'crval2',
'cd1_1','cd1_2', 'cd2_1', 'cd2_2']:
if ccd_col.upper() in self.hdr.keys():
#print('CP Header: %s = ' % ccd_col,self.hdr[ccd_col])
ccds[ccd_col]= self.hdr[ccd_col]
else:
if ccd_col in notneeded_cols:
ccds[ccd_col]= np.nan
else:
raise KeyError('Could not find %s, keys not in cp header:' \
% ccd_col,ccd_col)
exptime = ccds['exptime'].data[0]
airmass = ccds['airmass'].data[0]
print('Band {}, Exptime {}, Airmass {}'.format(self.band, exptime, airmass))
# WCS: 1-indexed so pixel pixelxy2radec(1,1) corresponds to img[0,0]
H = ccds['height'].data[0]
W = ccds['width'].data[0]
print('Image size:', W,H)
ccdra, ccddec = self.wcs.pixelxy2radec((W+1) / 2.0, (H+1) / 2.0)
ccds['ra'] = ccdra # [degree]
ccds['dec'] = ccddec # [degree]
t0= ptime('header-info',t0)
if not self.goodWcs:
print('WCS Failed on CCD {}'.format(self.ccdname))
return self.return_on_error(err_message='WCS Failed', ccds=ccds)
if self.exptime == 0:
print('Exptime = 0 on CCD {}'.format(self.ccdname))
return self.return_on_error(err_message='Exptime = 0', ccds=ccds)
self.bitmask = self.read_bitmask()
weight = self.read_weight(bitmask=self.bitmask, scale=False)
if np.all(weight == 0):
txt = 'All weight-map pixels are zero on CCD {}'.format(self.ccdname)
print(txt)
return self.return_on_error(txt,ccds=ccds)
# bizarro image CP20151119/k4m_151120_040715_oow_zd_v1.fits.fz
if np.all(np.logical_or(weight == 0, weight == 1)):
txt = 'All weight-map pixels are zero or one'
print(txt)
return self.return_on_error(txt,ccds=ccds)
weight = self.scale_weight(weight)
if psfex:
# Quick check for PsfEx file
psf = self.get_psfex_model()
if psf.psfex.sampling == 0.:
print('PsfEx model has SAMPLING=0')
nacc = psf.header.get('ACCEPTED')
print('PsfEx model number of stars accepted:', nacc)
return self.return_on_error(err_message='Bad PSF model', ccds=ccds)
self.img,hdr = self.read_image()
# Per-pixel error -- weight is 1/sig*2, scaled by scale_weight()
medweight = np.median(weight[(weight > 0) * (self.bitmask == 0)])
# Undo the weight scaling to get sig1 back into native image units
wscale = self.scale_weight(1.)
ccds['sig1'] = 1. / np.sqrt(medweight / wscale)
self.invvar = self.remap_invvar(weight, self.primhdr, self.img, self.bitmask)
t0= ptime('read image',t0)
# Measure the sky brightness and (sky) noise level.
zp0 = self.zeropoint(self.band)
#print('Computing the sky background.')
sky_img, skymed, skyrms = self.get_sky_and_sigma(self.img)
img_sub_sky= self.img - sky_img
# Bunch of sky estimates
# Median of absolute deviation (MAD), std dev = 1.4826 * MAD
print('sky from median of image= %.2f' % skymed)
skybr = zp0 - 2.5*np.log10(skymed / self.pixscale / self.pixscale / exptime)
print('Sky brightness: {:.3f} mag/arcsec^2 (assuming nominal zeropoint)'.format(skybr))
ccds['skyrms'] = skyrms / exptime # e/sec
ccds['skycounts'] = skymed / exptime # [electron/pix]
ccds['skymag'] = skybr # [mag/arcsec^2]
t0= ptime('measure-sky',t0)
# Load PS1 & Gaia catalogues
# We will only used detected sources that have PS1 or Gaia matches
# So cut to this super set immediately
ps1 = None
try:
ps1 = ps1cat(ccdwcs=self.wcs,
pattern= self.ps1_pattern).get_stars(magrange=None)
except OSError:
print('No PS1 stars found for this image -- outside the PS1 footprint, or in the Galactic plane?')
if ps1 is not None and len(ps1) == 0:
ps1 = None
# PS1 cuts
if ps1 is not None and len(ps1):
ps1.cut( self.get_ps1_cuts(ps1) )
if len(ps1) == 0:
ps1 = None
else:
# Convert to Legacy Survey mags
ps1.legacy_survey_mag = self.ps1_to_observed(ps1)
print(len(ps1), 'PS1 stars')
gaia = GaiaCatalog().get_catalog_in_wcs(self.wcs)
assert(gaia is not None)
assert(len(gaia) > 0)
gaia = GaiaCatalog.catalog_nantozero(gaia)
assert(gaia is not None)
print(len(gaia), 'Gaia stars')
# Move Gaia stars to the epoch of this image.
gaia.ra_orig = gaia.ra.copy()
gaia.dec_orig = gaia.dec.copy()
ra,dec = radec_at_mjd(gaia.ra, gaia.dec, gaia.ref_epoch.astype(float),
gaia.pmra, gaia.pmdec, gaia.parallax, self.mjd_obs)
gaia.ra = ra
gaia.dec = dec
if not psfex:
ccds,photom,astrom = self.run_apphot(ccds, ps1, gaia, skyrms, hdr_fwhm,
sky_img, ext=ext, save_xy=save_xy)
# yuck!
photom = astropy_to_astrometry_table(photom)
astrom = astropy_to_astrometry_table(astrom)
return ccds,photom,astrom
return self.run_psfphot(ccds, ps1, gaia, zp0, exptime, airmass, sky_img,
splinesky, survey)
def run_apphot(self, ccds, ps1, gaia, skyrms, hdr_fwhm, sky_img,
ext=None, save_xy=False):
t0= Time()
img_sub_sky = self.img - sky_img
# badpix5 test, all good PS1
if self.camera in ['90prime','mosaic']:
_, ps1_x, ps1_y = self.wcs.radec2pixelxy(ps1.ra_ok,ps1.dec_ok)
ps1_x-= 1.
ps1_y-= 1.
ap_for_ps1 = CircularAperture((ps1_x, ps1_y), 5.)
# special mask, only gt 0 where badpix eq 5
img_mask_5= np.zeros(self.bitmask.shape, dtype=self.bitmask.dtype)
img_mask_5[self.bitmask == 5]= 1
phot_for_mask_5 = aperture_photometry(img_mask_5, ap_for_ps1)
flux_for_mask_5 = phot_for_mask_5['aperture_sum']
ccds['goodps1']= len(ps1)
ccds['goodps1_wbadpix5']= len(ps1[flux_for_mask_5.data > 0])
# Detect stars on the image.
# 10 sigma, sharpness, roundness all same as IDL zeropoints (also the defaults)
# Exclude_border=True removes the stars with centroid on or out of ccd edge
# Good, but we want to remove with aperture touching ccd edge too
print('det_thresh = %d' % self.det_thresh)
#threshold=self.det_thresh * stddev_mad,
dao = DAOStarFinder(fwhm= hdr_fwhm,
threshold=self.det_thresh * skyrms,
sharplo=0.2, sharphi=1.0, roundlo=-1.0, roundhi=1.0,
exclude_border=False)
obj= dao(self.img)
if len(obj) < self.minstar:
dao.threshold /= 2.
obj= dao(self.img)
if len(obj) < self.minstar:
return self.return_on_error('dao found < %d sources' % self.minstar,ccds=ccds)
t0= ptime('detect-stars',t0)
# We for sure know that sources near edge could be bad
edge_sep = 1. + self.skyrad[1]
edge_sep_px = edge_sep/self.pixscale
ht,wid = self.img.shape
away_from_edge= (
(obj['xcentroid'] > edge_sep_px) &
(obj['xcentroid'] < wid - edge_sep_px) &
(obj['ycentroid'] > edge_sep_px) &
(obj['ycentroid'] < ht - edge_sep_px))
obj= obj[away_from_edge]
objra, objdec = self.wcs.pixelxy2radec(obj['xcentroid']+1, obj['ycentroid']+1)
nobj = len(obj)
print('{} sources detected with detection threshold {}-sigma minus edge sources'.format(nobj, self.det_thresh))
ccds['nstarfind']= nobj
if nobj < self.minstar:
return self.return_on_error('after edge cuts < %d sources' % self.minstar,ccds=ccds)
if save_xy:
# Arrays of length number of all daophot found sources
all_xy= fits_table()
all_xy.set('x', obj['xcentroid'].data)
all_xy.set('y', obj['ycentroid'].data)
all_xy.set('ra', objra)
all_xy.set('dec', objdec)
all_xy.writeto('%s_%s_all_xy.fits' %
(os.path.basename(self.fn).replace('.fits','').replace('.fz',''),
ext))
# Matching
matched= {}
# Photometry
matched['photom_obj'], matched['photom_ref'], _ = \
match_radec(objra, objdec, ps1.ra_ok, ps1.dec_ok,
self.match_radius/3600.0,
nearest=True)
t0= ptime('matching-for-photometer',t0)
if len(matched['photom_obj']) < self.minstar:
return self.return_on_error('photom matched < %d sources' % self.minstar,ccds=ccds)
stars_photom,err= self.do_Photometry(obj[matched['photom_obj']],
ps1[matched['photom_ref']],
ccds=ccds, save_xy=save_xy)
if len(err) > 0:
return self.return_on_error(err,ccds=ccds,
stars_photom=stars_photom)
t0= ptime('photutils-photometry',t0)
# Astrometry
matched['astrom_obj'], matched['astrom_ref'], _ = \
match_radec(objra, objdec, gaia.ra, gaia.dec,
self.match_radius/3600.0,
nearest=True)
t0= ptime('matching-for-astrometry',t0)
# Use gaia
if len(matched['astrom_obj']) < self.minstar:
return self.return_on_error('astrom gaia matched < %d sources' % self.minstar,ccds=ccds,stars_photom=stars_photom)
stars_astrom,err= self.do_Astrometry(
obj[matched['astrom_obj']],
ref_ra= gaia.ra[matched['astrom_ref']],
ref_dec= gaia.dec[matched['astrom_ref']],
ccds=ccds)
if len(err) > 0:
return self.return_on_error(err,ccds=ccds,
stars_photom=stars_photom,
stars_astrom=stars_astrom)
t0= ptime('did-astrometry',t0)
# FWHM
# Tractor on specific SN sources
ap = CircularAperture((stars_photom['x'], stars_photom['y']),
self.aprad / self.pixscale)
skyphot = aperture_photometry(sky_img, ap)
skyflux = skyphot['aperture_sum'].data
star_SN= stars_photom['apflux'].data / np.sqrt(stars_photom['apflux'].data + skyflux)
t0= ptime('photutils-photometry-SN',t0)
# Brightest N stars
sn_cut= ((star_SN >= 10.) &
(star_SN <= 100.))
if len(star_SN[sn_cut]) < 10.:
sn_cut= star_SN >= 10.
if len(star_SN[sn_cut]) < 10.:
sn_cut= np.ones(len(star_SN),bool)
i_low_hi= np.argsort(star_SN)[sn_cut]
# brightest stars in sample, at most self.tractor_nstars
sample=dict(x= stars_photom['x'][i_low_hi][-self.tractor_nstars:],
y= stars_photom['y'][i_low_hi][-self.tractor_nstars:],
apflux= stars_photom['apflux'][i_low_hi][-self.tractor_nstars:],
sn= star_SN[i_low_hi][-self.tractor_nstars:])
#ivar = np.zeros_like(img) + 1.0/sig1**2
# Hack! To avoid 1/0 and sqrt(<0) just considering Poisson Stats due to sky
ierr = 1.0/np.sqrt(sky_img)
fwhms = self.fitstars(img_sub_sky, ierr, sample['x'], sample['y'], sample['apflux'])
ccds['fwhm'] = np.median(fwhms) # fwhms= 2.35 * psf.sigmas
print('FWHM med=%f, std=%f, std_med=%f' % (np.median(fwhms),np.std(fwhms),np.std(fwhms)/len(sample['x'])))
#ccds['seeing'] = self.pixscale * np.median(fwhms)
t0= ptime('Tractor fit FWHM to %d/%d stars' % (len(sample['x']),len(stars_photom)), t0)
# RESULTS
print("RESULTS %s" % ext)
print('Photometry: %d stars' % ccds['nmatch_photom'])
print('Offset (mag) =%.4f, rms=0.4%f' % (ccds['phoff'],ccds['phrms']))
print('Zeropoint %.4f' % (ccds['zpt'],))
print('Transparency %.4f' % (ccds['transp'],))
print('Astrometry: %d stars' % ccds['nmatch_astrom'])
print('Offsets (arcsec) RA=%.6f, Dec=%.6f' % (ccds['raoff'], ccds['decoff']))
t0= ptime('all-computations-for-this-ccd',t0)
# Plots for comparing to Arjuns zeropoints*.ps
if self.verboseplots:
self.make_plots(stars,dmag,ccds['zpt'],ccds['transp'])
t0= ptime('made-plots',t0)
return ccds, stars_photom, stars_astrom
def run_psfphot(self, ccds, ps1, gaia, zp0, exptime, airmass, sky_img,
splinesky, survey):
t0= Time()
# Now put Gaia stars into the image and re-fit their centroids
# and fluxes using the tractor with the PsfEx PSF model.
# assume that the CP WCS has gotten us to within a few pixels
# of the right answer. Find Gaia stars, initialize Tractor
# sources there, optimize them and see how much they want to
# move.
psf = self.get_psfex_model()
# Just keep the CP FWHM measurement!!
ccds['fwhm'] = ccds['fwhm_cp']
#ccds['fwhm'] = psf.fwhm
if splinesky:
sky = self.get_splinesky()
print('Instantiating and subtracting sky model')
skymod = np.zeros_like(self.img)
sky.addTo(skymod)
# Apply the same transformation that was applied to the image...
skymod = self.scale_image(skymod)
#print('Old sky_img: avg', np.mean(sky_img), 'min/max', np.min(sky_img), np.max(sky_img))
#print('Skymod: avg', np.mean(skymod), 'min/max', skymod.min(), skymod.max())
fit_img = self.img - skymod
else:
fit_img = self.img - sky_img
with np.errstate(invalid='ignore'):
# sqrt(0.) can trigger complaints; https://github.com/numpy/numpy/issues/11448
ierr = np.sqrt(self.invvar)
# Gaia
ra,dec = radec_at_mjd(gaia.ra, gaia.dec, gaia.ref_epoch.astype(float),
gaia.pmra, gaia.pmdec, gaia.parallax, self.mjd_obs)
gaia.rename('source_id', 'gaia_sourceid')
gaia.ra_now = ra
gaia.dec_now = dec
gaia.rename('ra', 'ra_gaia')
gaia.rename('dec', 'dec_gaia')
for b in ['g', 'bp', 'rp']:
mag = gaia.get('phot_%s_mean_mag' % b)
sn = gaia.get('phot_%s_mean_flux_over_error' % b)
magerr = np.abs(2.5/np.log(10.) * 1./np.maximum(1., sn))
gaia.set('phot_%s_mean_mag_error' % b, magerr)
# FIXME -- NaNs?
gaia.flux0 = np.ones(len(gaia), np.float32)
# we set 'astrom' and omit 'photom'; it will get filled in with zeros.
gaia.astrom = np.ones(len(gaia), bool)
refs = [gaia]
if ps1 is not None:
# PS1 for photometry
# Initial flux estimate, from nominal zeropoint
ps1.flux0 = (10.**((zp0 - ps1.legacy_survey_mag) / 2.5) * exptime).astype(np.float32)
# we don't have/use proper motions for PS1 stars
ps1.rename('ra_ok', 'ra_now')
ps1.rename('dec_ok', 'dec_now')
ps1.ra_ps1 = ps1.ra_now.copy()
ps1.dec_ps1 = ps1.dec_now.copy()
ps1.ps1_objid = ps1.obj_id
for band in 'grizY':
i = ps1cat.ps1band.get(band, None)
if i is None:
print('No band', band, 'in PS1 catalog')
continue
ps1.set('ps1_'+band.lower(), ps1.median[:,i].astype(np.float32))
# we set 'photom' and omit 'astrom'; it will get filled in with zeros.
ps1.photom = np.ones (len(ps1), bool)
# Match PS1 to Gaia stars within 1".
I,J,d = match_radec(gaia.ra_gaia, gaia.dec_gaia,
ps1.ra_ps1, ps1.dec_ps1, 1./3600.,
nearest=True)
print(len(I), 'of', len(gaia), 'Gaia and', len(ps1), 'PS1 stars matched')
# Merged = PS1 + unmatched Gaia
if len(I):
# Merge columns for the matched stars
for c in gaia.get_columns():
G = gaia.get(c)
# If column exists in both (eg, ra_now, dec_now), override
# the PS1 value with the Gaia value; except for "photom".
if c in ps1.get_columns():
X = ps1.get(c)
else:
X = np.zeros(len(ps1), G.dtype)
X[J] = G[I]
ps1.set(c, X)
# unmatched Gaia stars
unmatched = np.ones(len(gaia), bool)
unmatched[I] = False
gaia.cut(unmatched)
del unmatched
refs.append(ps1)
if len(refs) == 1:
refs = refs[0]
else:
refs = merge_tables(refs, columns='fillzero')
cols = [('ra_gaia', np.double),
('dec_gaia', np.double),
('gaia_sourceid', np.int64),
('phot_g_mean_mag', np.float32),
('phot_g_mean_mag_error', np.float32),
('phot_bp_mean_mag', np.float32),
('phot_bp_mean_mag_error', np.float32),
('phot_rp_mean_mag', np.float32),
('phot_rp_mean_mag_error', np.float32),
('ra_ps1', np.double),
('dec_ps1', np.double),
('ps1_objid', np.int64),
('ps1_g', np.float32),
('ps1_r', np.float32),
('ps1_i', np.float32),
('ps1_z', np.float32),
('ps1_y', np.float32),
('ra_now', np.double),
('dec_now', np.double),
('flux0', np.float32),
('legacy_survey_mag', np.float32),
('astrom', bool),
('photom', bool),
]
refcols = refs.get_columns()
for c,dt in cols:
if not c in refcols:
refs.set(c, np.zeros(len(refs), dt))
refcols = refs.get_columns()
wantcols = dict(cols)
for c in refcols:
if not c in wantcols:
refs.delete_column(c)
continue
# dt = wantcols[c]
# rdt = refs.get(c).dtype
# if rdt != dt:
# print('Warning: column', c, 'has type', rdt, 'not', dt)
# print('(Cleaned) reference stars:')
# refs.about()
if False:
from astrometry.util.plotutils import PlotSequence
ps = PlotSequence('astromfit')
plt.clf()
plt.hist((fit_img * ierr).ravel(), range=(-5,5), bins=100)
plt.xlabel('Image pixel S/N')
ps.savefig()
# Run tractor fitting on the ref stars, using the PsfEx model.
phot = self.tractor_fit_sources(refs.ra_now, refs.dec_now, refs.flux0,
fit_img, ierr, psf)
print('Got photometry results for', len(phot), 'reference stars')
if len(phot) == 0:
return self.return_on_error('No photometry available',ccds=ccds)
# Cut to ref stars that were photometered
refs.cut(phot.iref)
phot.delete_column('iref')
refs.delete_column('flux0')
phot.raoff = (refs.ra_now - phot.ra_fit ) * 3600. * np.cos(np.deg2rad(refs.dec_now))
phot.decoff = (refs.dec_now - phot.dec_fit) * 3600.
dra = phot.raoff [refs.astrom]
ddec = phot.decoff[refs.astrom]
nastrom = len(dra)
raoff = np.median(dra)
decoff = np.median(ddec)
rastd = np.std(dra)
decstd = np.std(ddec)
ra_clip, _, _ = sigmaclip(dra, low=3., high=3.)
rarms = getrms(ra_clip)
dec_clip, _, _ = sigmaclip(ddec, low=3., high=3.)
decrms = getrms(dec_clip)
print('RA, Dec offsets (arcsec): %.4f, %.4f' % (raoff, decoff))
print('RA, Dec stddev (arcsec): %.4f, %.4f' % (rastd, decstd))
print('RA, Dec RMS (arcsec): %.4f, %.4f' % (rarms, decrms))
ok, = np.nonzero(phot.flux > 0)
phot.instpsfmag = np.zeros(len(phot), np.float32)
phot.instpsfmag[ok] = -2.5*np.log10(phot.flux[ok] / exptime)
# Uncertainty on psfmag
phot.dpsfmag = np.zeros(len(phot), np.float32)
phot.dpsfmag[ok] = np.abs((-2.5 / np.log(10.)) * phot.dflux[ok] / phot.flux[ok])
H,W = self.bitmask.shape
phot.bitmask = self.bitmask[np.clip(phot.y1, 0, H-1).astype(int),
np.clip(phot.x1, 0, W-1).astype(int)]
phot.psfmag = np.zeros(len(phot), np.float32)
dmag = (refs.legacy_survey_mag - phot.instpsfmag)[refs.photom]
if len(dmag):
dmag = dmag[np.isfinite(dmag)]
print('Zeropoint: using', len(dmag), 'good stars')
dmag, _, _ = sigmaclip(dmag, low=2.5, high=2.5)
nphotom = len(dmag)
print('Zeropoint: using', nphotom, 'stars after sigma-clipping')
zptstd = np.std(dmag)
zptmed = np.median(dmag)
dzpt = zptmed - zp0
kext = self.extinction(self.band)
transp = 10.**(-0.4 * (-dzpt - kext * (airmass - 1.0)))
print('Number of stars used for zeropoint median %d' % nphotom)
print('Zeropoint %.4f' % zptmed)
print('Offset from nominal: %.4f' % dzpt)
print('Scatter: %.4f' % zptstd)
print('Transparency %.4f' % transp)
ok = (phot.instpsfmag != 0)
phot.psfmag[ok] = phot.instpsfmag[ok] + zptmed
else:
nphotom = 0
dzpt = 0.
zptmed = 0.
zptstd = 0.
transp = 0.
for c in ['x0','y0','x1','y1','flux','raoff','decoff', 'psfmag',
'dflux','dx','dy']:
phot.set(c, phot.get(c).astype(np.float32))
phot.rename('x0', 'x_ref')
phot.rename('y0', 'y_ref')
phot.rename('x1', 'x_fit')
phot.rename('y1', 'y_fit')
phot.add_columns_from(refs)
# Save CCD-level information in the per-star table.
phot.ccd_raoff = np.zeros(len(phot), np.float32) + raoff
phot.ccd_decoff = np.zeros(len(phot), np.float32) + decoff
phot.ccd_phoff = np.zeros(len(phot), np.float32) + dzpt
phot.ccd_zpt = np.zeros(len(phot), np.float32) + zptmed
phot.expnum = np.zeros(len(phot), np.int64) + self.expnum
phot.ccdname = np.array([self.ccdname] * len(phot))
phot.filter = np.array([self.band] * len(phot))
# ugh, pad ccdname to 3 characters for DECam
if self.camera == 'decam' and len(self.ccdname) < 3:
phot.ccdname = phot.ccdname.astype('S3')
phot.exptime = np.zeros(len(phot), np.float32) + self.exptime
phot.gain = np.zeros(len(phot), np.float32) + self.gain
phot.airmass = np.zeros(len(phot), np.float32) + airmass
import photutils
apertures_arcsec_diam = [6, 7, 8]
for arcsec_diam in apertures_arcsec_diam:
ap = photutils.CircularAperture(np.vstack((phot.x_fit, phot.y_fit)).T,
arcsec_diam / 2. / self.pixscale)
with np.errstate(divide='ignore'):
err = 1./ierr
apphot = photutils.aperture_photometry(fit_img, ap, error=err, mask=(ierr==0))
phot.set('apflux_%i' % arcsec_diam, apphot.field('aperture_sum').data.astype(np.float32))
phot.set('apflux_%i_err' % arcsec_diam, apphot.field('aperture_sum_err').data.astype(np.float32))
# Add to the zeropoints table
ccds['raoff'] = raoff
ccds['decoff'] = decoff
ccds['rastddev'] = rastd
ccds['decstddev'] = decstd
ccds['rarms'] = rarms
ccds['decrms'] = decrms
ccds['phoff'] = dzpt
ccds['phrms'] = zptstd
ccds['zpt'] = zptmed
ccds['transp'] = transp
ccds['nmatch_photom'] = nphotom
ccds['nmatch_astrom'] = nastrom
# .ra,.dec = Gaia else PS1
phot.ra = phot.ra_gaia
phot.dec = phot.dec_gaia
I, = np.nonzero(phot.ra == 0)
phot.ra [I] = phot.ra_ps1 [I]
phot.dec[I] = phot.dec_ps1[I]
stars_astrom = phot
# Create subset table for Eddie's ubercal
stars_photom = phot.copy()
cols = ['ra', 'dec', 'flux', 'dflux', 'chi2', 'fracmasked', 'instpsfmag',
'dpsfmag',
'bitmask', 'x_fit', 'y_fit', 'gaia_sourceid', 'ra_gaia', 'dec_gaia',
'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag',
'phot_g_mean_mag_error', 'phot_bp_mean_mag_error',
'phot_rp_mean_mag_error',
'ps1_objid', 'ra_ps1', 'dec_ps1',
'ps1_g', 'ps1_r', 'ps1_i', 'ps1_z', 'ps1_y', 'legacy_survey_mag',
'expnum', 'ccdname', 'exptime', 'gain', 'airmass', 'filter',
'apflux_6', 'apflux_7', 'apflux_8',
'apflux_6_err', 'apflux_7_err', 'apflux_8_err',
'ra_now', 'dec_now', 'ra_fit', 'dec_fit', 'x_ref', 'y_ref'
]
for c in stars_photom.get_columns():
if not c in cols:
stars_photom.delete_column(c)
t0= ptime('all-computations-for-this-ccd',t0)
# Plots for comparing to Arjuns zeropoints*.ps
if self.verboseplots:
self.make_plots(stars,dmag,ccds['zpt'],ccds['transp'])
t0= ptime('made-plots',t0)
return ccds, stars_photom, stars_astrom
def ps1_to_observed(self, ps1):
colorterm = self.colorterm_ps1_to_observed(ps1.median, self.band)
ps1band = ps1cat.ps1band[self.band]
return ps1.median[:, ps1band] + np.clip(colorterm, -1., +1.)
def get_splinesky_merged_filename(self):
expstr = '%08i' % self.expnum
fn = os.path.join(self.calibdir, self.camera, 'splinesky-merged', expstr[:5],
'%s-%s.fits' % (self.camera, expstr))
return fn
def get_splinesky_unmerged_filename(self):
expstr = '%08i' % self.expnum
return os.path.join(self.calibdir, self.camera, 'splinesky', expstr[:5], expstr,
'%s-%s-%s.fits' % (self.camera, expstr, self.ext))
def get_splinesky(self):
# Find splinesky model file and read it
import tractor
from tractor.utils import get_class_from_name
# Look for merged file
fn = self.get_splinesky_merged_filename()
#print('Looking for file', fn)
if os.path.exists(fn):
print('Reading splinesky-merged {}'.format(fn))
T = fits_table(fn)
if validate_procdate_plver(fn, 'table', self.expnum, self.plver,
self.procdate, self.plprocid, data=T):
I, = np.nonzero((T.expnum == self.expnum) *
np.array([c.strip() == self.ext for c in T.ccdname]))
if len(I) == 1:
Ti = T[I[0]]
# Remove any padding
h,w = Ti.gridh, Ti.gridw
Ti.gridvals = Ti.gridvals[:h, :w]
Ti.xgrid = Ti.xgrid[:w]
Ti.ygrid = Ti.ygrid[:h]
skyclass = Ti.skyclass.strip()
clazz = get_class_from_name(skyclass)
fromfits = getattr(clazz, 'from_fits_row')
sky = fromfits(Ti)
return sky
# Look for single-CCD file
fn = self.get_splinesky_unmerged_filename()
#print('Reading file', fn)
if not os.path.exists(fn):
return None
print('Reading splinesky {}'.format(fn))
hdr = read_primary_header(fn)
if not validate_procdate_plver(fn, 'primaryheader', self.expnum, self.plver,
self.procdate, self.plprocid, data=hdr):
return None
try:
skyclass = hdr['SKY']
except NameError:
raise NameError('SKY not in header: skyfn={}'.format(fn))
clazz = get_class_from_name(skyclass)
if getattr(clazz, 'from_fits', None) is not None:
fromfits = getattr(clazz, 'from_fits')
sky = fromfits(fn, hdr)
else:
fromfits = getattr(clazz, 'fromFitsHeader')
sky = fromfits(hdr, prefix='SKY_')
return sky
def tractor_fit_sources(self, ref_ra, ref_dec, ref_flux, img, ierr,
psf, normalize_psf=True):
import tractor
plots = False
#plot_this = np.hypot(x - 118, y - 1276) < 5
plot_this = False
if plots:
from astrometry.util.plotutils import PlotSequence
ps = PlotSequence('astromfit')
print('Fitting positions & fluxes of %i stars' % len(ref_ra))
cal = fits_table()
# These x0,y0,x1,y1 are zero-indexed coords.
cal.x0 = []
cal.y0 = []
cal.x1 = []
cal.y1 = []
cal.flux = []
cal.dx = []
cal.dy = []
cal.dflux = []
cal.psfsum = []
cal.iref = []
cal.chi2 = []
cal.fracmasked = []
for istar in range(len(ref_ra)):
ok,x,y = self.wcs.radec2pixelxy(ref_ra[istar], ref_dec[istar])
x -= 1
y -= 1
# Fitting radius
R = 10
H,W = img.shape
xlo = int(x - R)
ylo = int(y - R)
if xlo < 0 or ylo < 0:
continue
xhi = xlo + R*2
yhi = ylo + R*2
if xhi >= W or yhi >= H:
continue
subimg = img[ylo:yhi+1, xlo:xhi+1]
# FIXME -- check that ierr is correct
subie = ierr[ylo:yhi+1, xlo:xhi+1]
subpsf = psf.constantPsfAt(x, y)
psfsum = np.sum(subpsf.img)
if normalize_psf:
# print('Normalizing PsfEx model with sum:', s)
subpsf.img /= psfsum
if np.all(subie == 0):
#print('Inverse-variance map is all zero')
continue
#print('PSF model:', subpsf)
#print('PSF image sum:', subpsf.img.sum())
tim = tractor.Image(data=subimg, inverr=subie, psf=subpsf)
flux0 = ref_flux[istar]
#print('Zp0', zp0, 'mag', ref.mag[istar], 'flux', flux0)
x0 = x - xlo
y0 = y - ylo
src = tractor.PointSource(tractor.PixPos(x0, y0),
tractor.Flux(flux0))
tr = tractor.Tractor([tim], [src])
tr.freezeParam('images')
optargs = dict(priors=False, shared_params=False)
# The initial flux estimate doesn't seem to work too well,
# so just for plotting's sake, fit flux first
src.freezeParam('pos')
tr.optimize(**optargs)
src.thawParam('pos')
#print('Optimizing position of Gaia star', istar)
if plots and plot_this:
plt.clf()
plt.subplot(2,2,1)
plt.imshow(subimg, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,2)
mod = tr.getModelImage(0)
plt.imshow(mod, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow((subimg - mod) * subie, interpolation='nearest', origin='lower')
plt.colorbar()
plt.suptitle('Before')
ps.savefig()
#print('Initial flux', flux0)
for step in range(50):
dlnp, x, alpha = tr.optimize(**optargs)
#print('delta position', src.pos.x - x0, src.pos.y - y0,
# 'flux', src.brightness, 'dlnp', dlnp)
if dlnp == 0:
break
#print('Getting variance estimate: thawed params:')
#tr.printThawedParams()
variance = tr.optimize(variance=True, just_variance=True, **optargs)
# Yuck -- if inverse-variance is all zero, weird-shaped result...
if len(variance) == 4 and variance[3] is None:
print('No variance estimate available')
continue
mod = tr.getModelImage(0)
chi = (subimg - mod) * subie
psfimg = mod / mod.sum()
# profile-weighted chi-squared
cal.chi2.append(np.sum(chi**2 * psfimg))
# profile-weighted fraction of masked pixels
#cal.fracmasked.append(np.sum(psfimg * (ierr == 0)))
cal.fracmasked.append(np.sum(psfimg * (subie == 0)))
cal.psfsum.append(psfsum)
cal.x0.append(x0 + xlo)
cal.y0.append(y0 + ylo)
cal.x1.append(src.pos.x + xlo)
cal.y1.append(src.pos.y + ylo)
cal.flux.append(src.brightness.getValue())
cal.iref.append(istar)
std = np.sqrt(variance)
cal.dx.append(std[0])
cal.dy.append(std[1])
cal.dflux.append(std[2])
if plots and plot_this:
plt.clf()
plt.subplot(2,2,1)
plt.imshow(subimg, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,2)
mod = tr.getModelImage(0)
plt.imshow(mod, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow((subimg - mod) * subie, interpolation='nearest', origin='lower')
plt.colorbar()
plt.suptitle('After')
ps.savefig()
cal.to_np_arrays()
cal.ra_fit,cal.dec_fit = self.wcs.pixelxy2radec(cal.x1 + 1, cal.y1 + 1)
return cal
def get_psfex_merged_filename(self):
expstr = '%08i' % self.expnum
fn = os.path.join(self.calibdir, self.camera, 'psfex-merged', expstr[:5],
'%s-%s.fits' % (self.camera, expstr))
return fn
def get_psfex_model(self):
import tractor
# Look for merged PsfEx file
fn = self.get_psfex_merged_filename()
expstr = '%08i' % self.expnum
#print('Looking for PsfEx file', fn)
if os.path.exists(fn):
print('Reading psfex-merged {}'.format(fn))
T = fits_table(fn)
if validate_procdate_plver(fn, 'table', self.expnum, self.plver,
self.procdate, self.plprocid, data=T):
I, = np.nonzero((T.expnum == self.expnum) *
np.array([c.strip() == self.ext for c in T.ccdname]))
if len(I) == 1:
Ti = T[I[0]]
# Remove any padding
degree = Ti.poldeg1
# number of terms in polynomial
ne = (degree + 1) * (degree + 2) // 2
Ti.psf_mask = Ti.psf_mask[:ne, :Ti.psfaxis1, :Ti.psfaxis2]
psfex = tractor.PsfExModel(Ti=Ti)
psf = tractor.PixelizedPsfEx(None, psfex=psfex)
psf.fwhm = Ti.psf_fwhm
psf.header = {}
return psf
# Look for single-CCD PsfEx file
fn = os.path.join(self.calibdir, self.camera, 'psfex', expstr[:5], expstr,
'%s-%s-%s.fits' % (self.camera, expstr, self.ext))
#print('Reading PsfEx file', fn)
if not os.path.exists(fn):
return None
print('Reading psfex {}'.format(fn))
hdr = read_primary_header(fn)
if not validate_procdate_plver(fn, 'primaryheader', self.expnum, self.plver,
self.procdate, self.plprocid, data=hdr):
return None
hdr = fitsio.read_header(fn, ext=1)
psf = tractor.PixelizedPsfEx(fn)
psf.header = hdr
psf.fwhm = hdr['PSF_FWHM']
return psf
def do_Photometry(self, obj,ps1, ccds,
save_xy=False):
"""Measure zeropoint relative to PS1
Args:
obj: ps1-matched sources detected with dao phot
ps1: ps1 source matched to obj
ccds: partially filled _ccds_table
save_xy: if True save a fits table containing
ps1_mag and apmag for matches sources and associated
photometric cuts
Returns:
stars_photom: fits table for stars
err_message: '' if okay, 'some error text' otherwise, this will end up being
stored in ccds['err_message']
"""
print('Photometry on %s stars' % len(ps1))
objra, objdec = self.wcs.pixelxy2radec(obj['xcentroid']+1, obj['ycentroid']+1)
cuts,phot= self.get_photometric_cuts(obj,cuts_only=False)
assert(len(phot['apflux']) == len(obj))
final_cut= ((cuts['good_flux_and_mag']) &
(cuts['no_badpix_in_ap_0']) &
(cuts['is_iso']))
if len(obj[final_cut]) == 0:
return _stars_table(),'photometry failed, no stars after cuts'
# Stars table
ccds['nmatch_photom'] = len(obj[final_cut])
print('Photometry %s stars after obj cuts' % ccds['nmatch_photom'])
stars_photom = _stars_table(nstars=ccds['nmatch_photom'])
stars_photom['apmag'] = phot['apmags'][final_cut]
stars_photom['ps1_mag'] = ps1.legacy_survey_mag[final_cut]
if save_xy:
# Save ps1_mag and apmag for every matched source
all_stars=fits_table()
all_stars.set('apmag', phot['apmags'].data)
all_stars.set('ps1_mag', ps1.legacy_survey_mag)
all_stars.set('match_x', obj['xcentroid'].data)
all_stars.set('match_y', obj['ycentroid'].data)
all_stars.set('match_ra', objra)
all_stars.set('match_dec', objdec)
# Then bool cuts for the above arrays
for key in cuts.keys():
all_stars.set(key, cuts[key])
# Avoid memoryview write error
for col in all_stars.get_columns():
all_stars.set(col,np.array(all_stars.get(col)))
all_stars.writeto('%s_%s_all_stars.fits' %
(os.path.basename(self.fn).replace('.fits','').replace('.fz',''),
self.ccdname))
# Add additional info
stars_photom['nmatch']= ccds['nmatch_photom']
self.add_ccd_info_to_stars_table(stars_photom, ccds)
star_kwargs= {"keep": final_cut,
"obj":obj,
"objra":objra,
"objdec":objdec,
"apflux":phot['apflux'],
"apskyflux":phot['apskyflux'],
"apskyflux_perpix":phot['apskyflux_perpix']}
self.add_obj_info_to_stars_table(stars_photom,**star_kwargs)
for ps1_band,ps1_iband in zip(['g','r','i','z'],[0,1,2,3]):
stars_photom['ps1_%s' % ps1_band]= ps1.median[final_cut, ps1_iband]
# Zeropoint
stars_photom['dmagall'] = stars_photom['ps1_mag'] - stars_photom['apmag']
dmag, _, _ = sigmaclip(stars_photom['dmagall'], low=2.5, high=2.5)
dmagmed = np.median(dmag)
dmagsig = np.std(dmag) # agrees with IDL codes, they just compute std
zp0 = self.zeropoint(self.band)
kext = self.extinction(self.band)
zptmed = zp0 + dmagmed
transp = 10.**(-0.4 * (zp0 - zptmed - kext * (self.airmass - 1.0)))
ccds['phoff'] = dmagmed
ccds['phrms'] = dmagsig
ccds['zpt'] = zptmed
ccds['transp'] = transp
# Badpix 5 test
if self.camera in ['90prime','mosaic']:
# good sources but treat badpix=5 as OK
final_cut= ((cuts['good_flux_and_mag']) &
(cuts['no_badpix_in_ap_0_5']) &
(cuts['is_iso']))
dmagall= ps1.legacy_survey_mag[final_cut] - phot['apmags'][final_cut]
dmag, _, _ = sigmaclip(dmagall, low=2.5, high=2.5)
dmagmed = np.median(dmag)
zp0 = self.zeropoint(self.band)
kext = self.extinction(self.band)
zptmed = zp0 + dmagmed
ccds['zpt_wbadpix5'] = zptmed
# star,empty string tuple if succeeded
return stars_photom,''
def do_Astrometry(self, obj,ref_ra,ref_dec, ccds):
"""Measure ra,dec offsets from Gaia or PS1
Args:
obj: ps1-matched sources detected with dao phot
ref_ra,ref_dec: ra and dec of ther ps1 or gaia sources matched to obj
ccds: partially filled _ccds_table
Returns:
stars_astrom: fits table for stars
err_message: '' if okay, 'some error text' otherwise, this will end up being
stored in ccds['err_message']
"""
print('Astrometry on %s stars' % len(obj))
# Cut to obj with good photometry
cuts= self.get_photometric_cuts(obj,cuts_only=True)
final_cut= ((cuts['good_flux_and_mag']) &
(cuts['no_badpix_in_ap_0']) &
(cuts['is_iso']))
if len(obj[final_cut]) == 0:
return _stars_table(),'Astromety failed, no stars after cuts'
ccds['nmatch_astrom'] = len(obj[final_cut])
print('Astrometry: matched %s sources within %.1f arcsec' %
(ccds['nmatch_astrom'], self.match_radius))
# Initialize
stars_astrom = _stars_table(nstars= ccds['nmatch_astrom'])
stars_astrom['nmatch']= ccds['nmatch_astrom']
self.add_ccd_info_to_stars_table(stars_astrom,
ccds)
# Fill
objra, objdec = self.wcs.pixelxy2radec(obj[final_cut]['xcentroid']+1,
obj[final_cut]['ycentroid']+1)
stars_astrom['radiff'] = (ref_ra[final_cut] - objra) * \
np.cos(np.deg2rad( objdec )) * 3600.0
stars_astrom['decdiff'] = (ref_dec[final_cut] - objdec) * 3600.0
ccds['raoff'] = np.median(stars_astrom['radiff'])
ccds['decoff'] = np.median(stars_astrom['decdiff'])
ccds['rastddev'] = np.std(stars_astrom['radiff'])
ccds['decstddev'] = np.std(stars_astrom['decdiff'])
ra_clip, _, _ = sigmaclip(stars_astrom['radiff'], low=3., high=3.)
ccds['rarms'] = getrms(ra_clip)
dec_clip, _, _ = sigmaclip(stars_astrom['decdiff'], low=3., high=3.)
ccds['decrms'] = getrms(dec_clip)
return stars_astrom,''
def get_photometric_cuts(self,obj,cuts_only):
"""Do aperture photometry and create a photometric cut base on those measurements
Args:
obj: sources detected with dao phot
cuts_only: the final photometric cut will be returned in either case
True to not compute extra things
Returns:
two dicts, cuts and phot
cuts: keys are ['good_flux_and_mag',
'no_badpix_in_ap_0','no_badpix_in_ap_0_5','is_iso']
phot: keys are ["apflux","apmags","apskyflux","apskyflux_perpix"]
"""
print('Performing aperture photometry')
cuts,phot= {},{}
ap = CircularAperture((obj['xcentroid'], obj['ycentroid']), self.aprad / self.pixscale)
if self.aper_sky_sub:
print('**WARNING** using sky apertures for local sky subtraction')
skyap = CircularAnnulus((obj['xcentroid'], obj['ycentroid']),
r_in=self.skyrad[0] / self.pixscale,
r_out=self.skyrad[1] / self.pixscale)
# Use skyap to subtractr local sky
apphot = aperture_photometry(img, ap)
#skyphot = aperture_photometry(img, skyap)
skyphot = aperture_photometry(img, skyap, mask= img_mask > 0)
apskyflux= skyphot['aperture_sum'] / skyap.area() * ap.area()
apskyflux_perpix= skyphot['aperture_sum'] / skyap.area()
apflux = apphot['aperture_sum'] - apskyflux
else:
# ON image not sky subtracted image
apphot = aperture_photometry(self.img, ap)
apflux = apphot['aperture_sum']
# Placeholders
#apskyflux= apflux.copy()
#apskyflux.fill(0.)
#apskyflux_perpix= apskyflux.copy()
# Get close enough sky/pixel in sky annulus
# Take cutout of size ~ rout x rout, use same pixels in this slice for sky level
rin,rout= self.skyrad[0]/self.pixscale, self.skyrad[1]/self.pixscale
rad= int(np.ceil(rout)) #
box= 2*rad + 1 # Odd integer so source exactly in center
use_for_sky= np.zeros((box,box),bool)
x,y= np.meshgrid(range(box),range(box)) # array valus are the indices
ind_of_center= rad
r= np.sqrt((x - ind_of_center)**2 + (y - ind_of_center)**2)
use_for_sky[(r > rin)*(r <= rout)]= True
# Get cutout around each source
apskyflux,apskyflux_perpix=[],[]
for x,y in zip(obj['xcentroid'].data,obj['ycentroid'].data):
xc,yc= int(x),int(y)
x_sl= slice(xc-rad,xc+rad+1)
y_sl= slice(yc-rad,yc+rad+1)
cutout= self.img[y_sl,x_sl]
assert(cutout.shape == use_for_sky.shape)
from astropy.stats import sigma_clipped_stats
mean, median, std = sigma_clipped_stats(cutout[use_for_sky], sigma=3.0, iters=5)
mode_est= 3*median - 2*mean
apskyflux_perpix.append( mode_est )
apskyflux_perpix = | np.array(apskyflux_perpix) | numpy.array |
import os
import csv
import logging
import random
import pdb
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
from model.heatmap_coder import (
gaussian_radius,
draw_umich_gaussian,
draw_gaussian_1D,
draw_ellip_gaussian,
draw_umich_gaussian_2D,
)
from structures.params_3d import ParamsList
from data.augmentations import get_composed_augmentations
from .kitti_utils import Calibration, read_label, approx_proj_center, refresh_attributes, show_heatmap, show_image_with_boxes, show_edge_heatmap
from config import TYPE_ID_CONVERSION
class KITTIDataset(Dataset):
def __init__(self, cfg, root, is_train=True, transforms=None, augment=True):
super(KITTIDataset, self).__init__()
self.root = root
self.image_dir = os.path.join(root, "image_2")
self.image_right_dir = os.path.join(root, "image_3")
self.label_dir = os.path.join(root, "label_2")
self.calib_dir = os.path.join(root, "calib")
self.split = cfg.DATASETS.TRAIN_SPLIT if is_train else cfg.DATASETS.TEST_SPLIT
self.is_train = is_train
self.transforms = transforms
self.imageset_txt = os.path.join(root, "ImageSets", "{}.txt".format(self.split))
assert os.path.exists(self.imageset_txt), "ImageSets file not exist, dir = {}".format(self.imageset_txt)
image_files = []
for line in open(self.imageset_txt, "r"):
base_name = line.replace("\n", "")
image_name = base_name + ".png"
image_files.append(image_name)
self.image_files = image_files
self.label_files = [i.replace(".png", ".txt") for i in self.image_files]
self.classes = cfg.DATASETS.DETECT_CLASSES
self.num_classes = len(self.classes)
self.num_samples = len(self.image_files)
# whether to use right-view image
self.use_right_img = cfg.DATASETS.USE_RIGHT_IMAGE & is_train
self.augmentation = get_composed_augmentations() if (self.is_train and augment) else None
# input and output shapes
self.input_width = cfg.INPUT.WIDTH_TRAIN
self.input_height = cfg.INPUT.HEIGHT_TRAIN
self.down_ratio = cfg.MODEL.BACKBONE.DOWN_RATIO
self.output_width = self.input_width // cfg.MODEL.BACKBONE.DOWN_RATIO
self.output_height = self.input_height // cfg.MODEL.BACKBONE.DOWN_RATIO
self.output_size = [self.output_width, self.output_height]
# maximal length of extracted feature map when appling edge fusion
self.max_edge_length = (self.output_width + self.output_height) * 2
self.max_objs = cfg.DATASETS.MAX_OBJECTS
# filter invalid annotations
self.filter_annos = cfg.DATASETS.FILTER_ANNO_ENABLE
self.filter_params = cfg.DATASETS.FILTER_ANNOS
# handling truncation
self.consider_outside_objs = cfg.DATASETS.CONSIDER_OUTSIDE_OBJS
self.use_approx_center = cfg.INPUT.USE_APPROX_CENTER # whether to use approximate representations for outside objects
self.proj_center_mode = cfg.INPUT.APPROX_3D_CENTER # the type of approximate representations for outside objects
# for edge feature fusion
self.enable_edge_fusion = cfg.MODEL.HEAD.ENABLE_EDGE_FUSION
# True
self.use_modify_keypoint_visible = cfg.INPUT.KEYPOINT_VISIBLE_MODIFY
PI = np.pi
self.orientation_method = cfg.INPUT.ORIENTATION
self.multibin_size = cfg.INPUT.ORIENTATION_BIN_SIZE
self.alpha_centers = np.array([0, PI / 2, PI, - PI / 2]) # centers for multi-bin orientation
# use '2D' or '3D' center for heatmap prediction
self.heatmap_center = cfg.INPUT.HEATMAP_CENTER
self.adjust_edge_heatmap = cfg.INPUT.ADJUST_BOUNDARY_HEATMAP # True
self.edge_heatmap_ratio = cfg.INPUT.HEATMAP_RATIO # radius / 2d box, 0.5
self.logger = logging.getLogger("monoflex.dataset")
self.logger.info("Initializing KITTI {} set with {} files loaded.".format(self.split, self.num_samples))
def __len__(self):
if self.use_right_img:
return self.num_samples * 2
else:
return self.num_samples
def get_image(self, idx):
img_filename = os.path.join(self.image_dir, self.image_files[idx])
img = Image.open(img_filename).convert('RGB')
return img
def get_right_image(self, idx):
img_filename = os.path.join(self.image_right_dir, self.image_files[idx])
img = Image.open(img_filename).convert('RGB')
return img
def get_calibration(self, idx, use_right_cam=False):
calib_filename = os.path.join(self.calib_dir, self.label_files[idx])
return Calibration(calib_filename, use_right_cam=use_right_cam)
def get_label_objects(self, idx):
if self.split != 'test':
label_filename = os.path.join(self.label_dir, self.label_files[idx])
return read_label(label_filename)
def get_edge_utils(self, image_size, pad_size, down_ratio=4):
img_w, img_h = image_size
x_min, y_min = np.ceil(pad_size[0] / down_ratio), np.ceil(pad_size[1] / down_ratio)
x_max, y_max = (pad_size[0] + img_w - 1) // down_ratio, (pad_size[1] + img_h - 1) // down_ratio
step = 1
# boundary idxs
edge_indices = []
# left
y = torch.arange(y_min, y_max, step)
x = torch.ones(len(y)) * x_min
edge_indices_edge = torch.stack((x, y), dim=1)
edge_indices_edge[:, 0] = torch.clamp(edge_indices_edge[:, 0], x_min)
edge_indices_edge[:, 1] = torch.clamp(edge_indices_edge[:, 1], y_min)
edge_indices_edge = torch.unique(edge_indices_edge, dim=0)
edge_indices.append(edge_indices_edge)
# bottom
x = torch.arange(x_min, x_max, step)
y = torch.ones(len(x)) * y_max
edge_indices_edge = torch.stack((x, y), dim=1)
edge_indices_edge[:, 0] = torch.clamp(edge_indices_edge[:, 0], x_min)
edge_indices_edge[:, 1] = torch.clamp(edge_indices_edge[:, 1], y_min)
edge_indices_edge = torch.unique(edge_indices_edge, dim=0)
edge_indices.append(edge_indices_edge)
# right
y = torch.arange(y_max, y_min, -step)
x = torch.ones(len(y)) * x_max
edge_indices_edge = torch.stack((x, y), dim=1)
edge_indices_edge[:, 0] = torch.clamp(edge_indices_edge[:, 0], x_min)
edge_indices_edge[:, 1] = torch.clamp(edge_indices_edge[:, 1], y_min)
edge_indices_edge = torch.unique(edge_indices_edge, dim=0).flip(dims=[0])
edge_indices.append(edge_indices_edge)
# top
x = torch.arange(x_max, x_min - 1, -step)
y = torch.ones(len(x)) * y_min
edge_indices_edge = torch.stack((x, y), dim=1)
edge_indices_edge[:, 0] = torch.clamp(edge_indices_edge[:, 0], x_min)
edge_indices_edge[:, 1] = torch.clamp(edge_indices_edge[:, 1], y_min)
edge_indices_edge = torch.unique(edge_indices_edge, dim=0).flip(dims=[0])
edge_indices.append(edge_indices_edge)
# concatenate
edge_indices = torch.cat([index.long() for index in edge_indices], dim=0)
return edge_indices
def encode_alpha_multibin(self, alpha, num_bin=2, margin=1 / 6):
# encode alpha (-PI ~ PI) to 2 classes and 1 regression
encode_alpha = np.zeros(num_bin * 2)
bin_size = 2 * np.pi / num_bin # pi
margin_size = bin_size * margin # pi / 6
bin_centers = self.alpha_centers
range_size = bin_size / 2 + margin_size
offsets = alpha - bin_centers
offsets[offsets > np.pi] = offsets[offsets > np.pi] - 2 * np.pi
offsets[offsets < -np.pi] = offsets[offsets < -np.pi] + 2 * np.pi
for i in range(num_bin):
offset = offsets[i]
if abs(offset) < range_size:
encode_alpha[i] = 1
encode_alpha[i + num_bin] = offset
return encode_alpha
def filtrate_objects(self, obj_list):
"""
Discard objects which are not in self.classes (or its similar classes)
:param obj_list: list
:return: list
"""
type_whitelist = self.classes
valid_obj_list = []
for obj in obj_list:
if obj.type not in type_whitelist:
continue
valid_obj_list.append(obj)
return valid_obj_list
def pad_image(self, image):
img = np.array(image)
h, w, c = img.shape
ret_img = np.zeros((self.input_height, self.input_width, c))
pad_y = (self.input_height - h) // 2
pad_x = (self.input_width - w) // 2
ret_img[pad_y: pad_y + h, pad_x: pad_x + w] = img
pad_size = np.array([pad_x, pad_y])
return Image.fromarray(ret_img.astype(np.uint8)), pad_size
def __getitem__(self, idx):
if idx >= self.num_samples:
# utilize right color image
idx = idx % self.num_samples
img = self.get_right_image(idx)
calib = self.get_calibration(idx, use_right_cam=True)
objs = None if self.split == 'test' else self.get_label_objects(idx)
use_right_img = True
# generate the bboxes for right color image
right_objs = []
img_w, img_h = img.size
for obj in objs:
corners_3d = obj.generate_corners3d()
corners_2d, _ = calib.project_rect_to_image(corners_3d)
obj.box2d = np.array([max(corners_2d[:, 0].min(), 0), max(corners_2d[:, 1].min(), 0),
min(corners_2d[:, 0].max(), img_w - 1), min(corners_2d[:, 1].max(), img_h - 1)], dtype=np.float32)
obj.xmin, obj.ymin, obj.xmax, obj.ymax = obj.box2d
right_objs.append(obj)
objs = right_objs
else:
# utilize left color image
img = self.get_image(idx)
calib = self.get_calibration(idx)
objs = None if self.split == 'test' else self.get_label_objects(idx)
use_right_img = False
original_idx = self.image_files[idx][:6]
objs = self.filtrate_objects(objs) # remove objects of irrelevant classes
# random horizontal flip
if self.augmentation is not None:
img, objs, calib = self.augmentation(img, objs, calib)
# pad image
img_before_aug_pad = np.array(img).copy()
img_w, img_h = img.size
img, pad_size = self.pad_image(img)
# for training visualize, use the padded images
ori_img = np.array(img).copy() if self.is_train else img_before_aug_pad
# the boundaries of the image after padding
x_min, y_min = int(np.ceil(pad_size[0] / self.down_ratio)), int(np.ceil(pad_size[1] / self.down_ratio))
x_max, y_max = (pad_size[0] + img_w - 1) // self.down_ratio, (pad_size[1] + img_h - 1) // self.down_ratio
if self.enable_edge_fusion:
# generate edge_indices for the edge fusion module
input_edge_indices = np.zeros([self.max_edge_length, 2], dtype=np.int64)
edge_indices = self.get_edge_utils((img_w, img_h), pad_size).numpy()
input_edge_count = edge_indices.shape[0]
input_edge_indices[:edge_indices.shape[0]] = edge_indices
input_edge_count = input_edge_count - 1 # explain ?
if self.split == 'test':
# for inference we parametrize with original size
target = ParamsList(image_size=img.size, is_train=self.is_train)
target.add_field("pad_size", pad_size)
target.add_field("calib", calib)
target.add_field("ori_img", ori_img)
if self.enable_edge_fusion:
target.add_field('edge_len', input_edge_count)
target.add_field('edge_indices', input_edge_indices)
if self.transforms is not None: img, target = self.transforms(img, target)
return img, target, original_idx
# heatmap
heat_map = np.zeros([self.num_classes, self.output_height, self.output_width], dtype=np.float32)
ellip_heat_map = np.zeros([self.num_classes, self.output_height, self.output_width], dtype=np.float32)
# classification
cls_ids = | np.zeros([self.max_objs], dtype=np.int32) | numpy.zeros |
# License: Apache-2.0
import pytest
import numpy as np
import xgboost
from xgboost import XGBClassifier
from gators.model_building.xgb_booster_builder import XGBBoosterBuilder
def test():
X_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = | np.array([0, 1, 1, 0]) | numpy.array |
#!/usr/bin/env python3
#
from __future__ import division, print_function
import numpy as np
class Transformation(object):
"""
Transforms from model to search space (and back).
"""
def transform(self, parameters, which_model, no_cells):
"""
Transform from model into search space.
"""
x = np.array([
np.log(parameters[0]),
parameters[1],
np.log(parameters[2]),
parameters[3],
np.log(parameters[4]),
parameters[5],
np.log(parameters[6]),
parameters[7],
np.log(parameters[8]),
parameters[9],
np.log(parameters[10]),
parameters[11],
np.log(parameters[12]),
parameters[13],
np.log(parameters[14]),
parameters[15],
np.log(parameters[16]),
parameters[17],
np.log(parameters[18]),
parameters[19],
np.log(parameters[20]),
parameters[21],
np.log(parameters[22]),
parameters[23]
])
self.n_params = len(x)
self.no_cells = no_cells
for i in range(self.no_cells):
x = np.append(x, parameters[self.n_params+i])
return x
def detransform(self, transformed_parameters, which_model, noise=False):
"""
Transform back from search space to model space.
"""
x = np.array([
np.exp(transformed_parameters[0]),
transformed_parameters[1],
np.exp(transformed_parameters[2]),
transformed_parameters[3],
np.exp(transformed_parameters[4]),
transformed_parameters[5],
| np.exp(transformed_parameters[6]) | numpy.exp |
import numpy as np
import matplotlib.pyplot as plt
from pyroomacoustics import dB, all_combinations
from pyroomacoustics.directivities import cardioid_func
from pyroomacoustics.doa import spher2cart
azimuth = np.radians(np.linspace(start=0, stop=360, num=361, endpoint=True))
colatitude = np.radians(np.linspace(start=0, stop=180, num=180, endpoint=True))
lower_gain = -40
""" 2D """
# get cartesian coordinates
cart = spher2cart(azimuth=azimuth)
direction = spher2cart(azimuth=225, degrees=True)
# compute response
resp = cardioid_func(x=cart, direction=direction, coef=0.5, magnitude=True)
resp_db = dB(np.array(resp))
# plot
plt.figure()
plt.polar(azimuth, resp_db)
plt.ylim([lower_gain, 0])
ax = plt.gca()
ax.yaxis.set_ticks(np.arange(start=lower_gain, stop=5, step=10))
plt.tight_layout()
""" 3D """
# get cartesian coordinates
spher_coord = all_combinations(azimuth, colatitude)
cart = spher2cart(azimuth=spher_coord[:, 0], colatitude=spher_coord[:, 1])
direction = spher2cart(azimuth=0, colatitude=45, degrees=True)
# compute response
resp = cardioid_func(x=cart, direction=direction, coef=0.25, magnitude=True)
# plot (surface plot)
fig = plt.figure()
RESP_2D = resp.reshape(len(azimuth), len(colatitude))
AZI, COL = | np.meshgrid(azimuth, colatitude) | numpy.meshgrid |
# -*- coding: utf-8 -*-
"""
This program automatically extracts and analyses data from a database of AIDS Data.
Data is from:
US Department of Health and Human Services (US DHHS), Centers for Disease Control and Prevention (CDC), National Center for HIV, STD and TB Prevention (NCHSTP), AIDS Public Information Data Set (APIDS) US Surveillance Data for 1981-2002, CDC WONDER On-line Database, December 2005. Accessed at http://wonder.cdc.gov/aids-v2002.html on Mar 9, 2017 2:26:39 PM"
Program was written for analysis of the database and is provided as is.
"""
import pypyodbc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from AIDSAnalysisProcedures import CreateDataGrid, contourplotVitalAge,contourplotVital,contourplotHIVExpByAgeLogNorm,surface3dAIDSByAgeGroup,contourplotAIDSByAgeGroup,contourplotAIDSByAgeGroupLogNorm,contourplotHIVExpByYear,contourplotHIVExpByYearLogNorm,contourplotHIVExpByAge
plt.close()
#using pypyodbc
#Connect to database (enter your own driver and Database path) and generate cursor
conn_str = r'DRIVER={};DBQ=;'
cnxn = pypyodbc.connect(conn_str)
crsr = cnxn.cursor()
#Extract Table Names
Table_details = []
for row in crsr.columns():
if 'MSys' not in row[2]: #ignore access default databases/tables
Table_details.append((row[2],row[3]))
np_tabledetails=np.array(Table_details)
Table_names = np.unique(np_tabledetails[:,0])
#This code currently assumes the first table in the database
TableChoice = Table_names[0]
#Extract all table column headings
Column_names = np_tabledetails[np_tabledetails[:,0]==TableChoice,1]
#Extract all the unique column entries and their frequency into a dataframe and save
df_BigCount=pd.DataFrame()
for name in Column_names[1:]:
#find all the unique values in the column, including nulls
sql = 'SELECT ' + str(name) + ', COUNT(*) FROM ' + str(TableChoice) + ' AS COUNT GROUP BY ' + str(name)
BigCount = crsr.execute(sql).fetchall()
df_interBigCount=pd.DataFrame(BigCount)
df_interBigCount['Column']=str(name)
df_BigCount=pd.concat([df_BigCount, df_interBigCount])
df_BigCount=df_BigCount[['Column',0,1]]
#DATA SETUP FOR ANALYSIS
#Set up a SQL string that extracts only complete data for analysis
sql = 'SELECT LOCATION, MONTH_DIAGNOSED_CODE, Age_at_Diagnosis_Code, HIV_Exposure_Category_Code, Vital_Status_Code, Cases FROM ' + str(TableChoice) + ' WHERE (LOCATION IS NOT NULL AND MONTH_DIAGNOSED_CODE IS NOT NULL AND Age_at_Diagnosis_Code IS NOT NULL AND HIV_Exposure_Category_Code IS NOT NULL AND Vital_Status_Code IS NOT NULL AND Cases IS NOT NULL)'
result = crsr.execute(sql).fetchall()
#Take the results and format them into a DataFrame with both string (location) and numeric (rest) data
df_result=pd.DataFrame(result)
#Replace hexadecimal age range code with numbers
df_result.iloc[:][2]=df_result.iloc[:][2].replace(['A','B','C'],['10','11','12'])
#Convert Month code to decimal number: from YYYY/MM (1995/01/02/03=1995/Jan/Feb/Mar)) to YYYY.MM (1995/0.0/0.083/0.0167 (Jan/Feb/Mar)
df_result.iloc[:][1]=df_result.iloc[:][1].replace(['/All Months','/01','/02','/03','/04','/05','/06','/07','/08','/09','/10','/11','/12'],['.000','.000','.083','.167','.250','.333','.417','.500','.583','.667','.750','.833','.917'],regex=True)
#convert numeric columns saved as strings to numbers
df_result.iloc[:][1]=df_result.iloc[:][1].apply(pd.to_numeric)#Year of diagnosis
df_result.iloc[:][2]=df_result.iloc[:][2].apply(pd.to_numeric)#Age at diagnosis code
df_result.iloc[:][3]=df_result.iloc[:][3].apply(pd.to_numeric)#HIV Exposure Category code
df_result.iloc[:][4]=df_result.iloc[:][4].apply(pd.to_numeric)#Vital Status Code
df_result.iloc[:][5]=df_result.iloc[:][5].apply(pd.to_numeric)#Number of cases
#Create the labels for any sort of plot, etc. The code number in the database acts as the index of the list, which is why "None" was added to the HIV Exposure category. There is no category 6, as well.
Vital_Code_Label = ['Alive Before 2001', 'Dead Before 2001']
Age_At_Diagnosis_Label = [ '< 1 Year Old',
'1-12 Years Old',
'13-19 Years Old',
'20-24 Years Old',
'25-29 Years Old',
'30-34 Years Old',
'35-39 Years Old \n or Age is Missing',
'40-44 Years Old',
'45-49 Years Old',
'50-54 Years Old',
'55-59 Years Old',
'60-64 Years Old',
'65+ Years Old']
HIV_Exposure_Category_Label = [
'Male homosexual/\nbisexual contact',
'IV drug use\n(female and hetero male)',
'Male homo/bisexual\nand IV drug use',
'Hemophilia/coagulation\ndisorder',
'Heterosexual contact\n with HIV',
'Receipt of blood, blood \ncomponents or tissue',
'Risk not reported\n or identified',
'Pediatric hemophilia',
'Mother with HIV\n or HIV risk',
'Pediatric receipt\n of blood',
'Pediatric risk not\n reported or identified']
#Data analysis and plots
#Bar plot of age at diagnosis for all years
np_AgeAtDiag=np.array([df_result.iloc[:][2], df_result.iloc[:][5]])
AgeResult=np.zeros((13,2))
for index in range(0,13):
AgeResult[index,0]=index
AgeResult[index,1]=sum(np_AgeAtDiag[1,np_AgeAtDiag[0,:]==index])
plt.close()
fig = plt.figure()
plt.bar(AgeResult[:,0],AgeResult[:,1])
plt.xticks(AgeResult[:,0],Age_At_Diagnosis_Label, rotation='vertical')
plt.ylabel('Number of Diagnoses')
plt.title('AIDS Diagnoses By Age Group: All Years')
plt.tight_layout()
plt.show()
Age_At_Diagnosis_Code=AgeResult[:,0]
#Surface and contour plot of age at diagnosis for per reporting year
#Separate the diagnoses into bins based on year and age bracket.
#Create the grid for plotting
#Take columns 1,2,5: Year of Diagnosis, Age at Diagnosis, and Cases
np_AgeAtDiagByYear=np.array([df_result.iloc[:][1], df_result.iloc[:][2], df_result.iloc[:][5]])
#create Datagrid
datagridAgeAtDiag=CreateDataGrid(np_AgeAtDiagByYear)
#plot results
x = np.unique(np_AgeAtDiagByYear[0,:])
y = np.unique(np_AgeAtDiagByYear[1,:])
z = datagridAgeAtDiag
surface3dAIDSByAgeGroup(x,y,z,Age_At_Diagnosis_Label)
contourplotAIDSByAgeGroup(x,y,z,Age_At_Diagnosis_Label,AgeResult[:,0])
contourplotAIDSByAgeGroupLogNorm(x,y,z,Age_At_Diagnosis_Label,AgeResult[:,0])
#Bar plot of all diagnoses by year
plt.close()
fig = plt.figure()
plt.bar(x,datagridAgeAtDiag.sum(axis=1),width=0.1)
plt.xlabel('Year')
plt.ylabel('Number of Diagnoses')
plt.title('AIDS Diagnoses By Year')
plt.tight_layout()
plt.xlim([1980,2005])
plt.show()
#Bar plot of cumulative diagnoses by year
plt.close()
fig = plt.figure()
plt.bar(x,np.cumsum(datagridAgeAtDiag.sum(axis=1)),width=0.1)
plt.xlabel('Year')
plt.ylabel('Cumulative Diagnoses')
plt.title('Cumulative AIDS Diagnoses By Year')
plt.tight_layout()
plt.xlim([1980,2005])
plt.show()
#Take columns 1,3,5: Year of Diagnosis, HIV Exposure Category, and Cases
#Bar plot of HIV Exposure code for all years
np_HIVExposeCat=np.array([df_result.iloc[:][3], df_result.iloc[:][5]])
HIVArray=np.zeros((13,2))
for index in range(0,13):
HIVArray[index,0]=index
HIVArray[index,1]=sum(np_HIVExposeCat[1,np_HIVExposeCat[0,:]==index])
#There are two categories labels that are created but unused: 0 and 6. Renive and refactor
HIVArray = np.delete(HIVArray,6,axis=0)
HIVArray = np.delete(HIVArray,0,axis=0)
for index in range(len(HIVArray)):
HIVArray[index,0]=index
#Bar Plot
plt.close()
fig = plt.figure()
plt.bar(HIVArray[:,0],HIVArray[:,1])
plt.xticks(HIVArray[:,0],HIV_Exposure_Category_Label, rotation='vertical')
plt.ylabel('Number of Diagnoses')
plt.title('AIDS Diagnoses By HIV Exposure Category: All Years')
plt.tight_layout()
plt.show()
#Bar Plot log scale
plt.close()
fig = plt.figure()
plt.bar(HIVArray[:,0],HIVArray[:,1],log=True)
plt.xticks(HIVArray[:,0],HIV_Exposure_Category_Label, rotation='vertical')
plt.ylabel('Number of Diagnoses')
plt.title('AIDS Diagnoses By HIV Exposure Category: All Years')
plt.tight_layout()
plt.show()
np_HIVExposeCatByYear=np.array([df_result.iloc[:][1], df_result.iloc[:][3], df_result.iloc[:][5]])
#create Datagrid
datagridHIVExpByYear=CreateDataGrid(np_HIVExposeCatByYear)
#plot results
x = np.unique(np_HIVExposeCatByYear[0,:])
y = [0,1,2,3,4,5,6,7,8,9,10]
z = datagridHIVExpByYear
contourplotHIVExpByYear(x,y,z,HIV_Exposure_Category_Label,y)
z[z==0]=0.1 #replace all zeros with 0.1 in order to produce a prettier contour graph
contourplotHIVExpByYearLogNorm(x,y,z,HIV_Exposure_Category_Label,y)
#Take columns 2,3,5: Age at Diagnosis, HIV Exposure Category, and Cases
np_HIVExposeCatByAge=np.array([df_result.iloc[:][2], df_result.iloc[:][3], df_result.iloc[:][5]])
#create Datagrid
datagridHIVExpByAge=CreateDataGrid(np_HIVExposeCatByAge)
#plot results
x = np.unique(np_HIVExposeCatByAge[0,:])
y = [0,1,2,3,4,5,6,7,8,9,10]
z = datagridHIVExpByAge
contourplotHIVExpByAge(x,y,z,HIV_Exposure_Category_Label,y,Age_At_Diagnosis_Label,Age_At_Diagnosis_Code)
z[z==0]=0.1
contourplotHIVExpByAgeLogNorm(x,y,z,HIV_Exposure_Category_Label,y,Age_At_Diagnosis_Label,Age_At_Diagnosis_Code)
#Take columns 1,3,4,5: Year of Diagnosis, HIV Exposure, Vital Stats, and Cases
np_VitalYear=np.array([df_result.iloc[:][1], df_result.iloc[:][3], df_result.iloc[:][4], df_result.iloc[:][5]])
#Separate data based upon vital stats. Set cases to zero so all dates can be represented in subsequent analysis
np_VitalYearZero=np_VitalYear
np_VitalYearZero[3,np_VitalYear[2,:]==1]=0
np_VitalYearZero=np.delete(np_VitalYearZero,2,axis=0)
datagridVitalYearZero=CreateDataGrid(np_VitalYearZero)
#Have to repeat due to a subtle bug in which both vital years were affected by the zeroing command
np_VitalYear=np.array([df_result.iloc[:][1], df_result.iloc[:][3], df_result.iloc[:][4], df_result.iloc[:][5]])
np_VitalYearOne=np_VitalYear
np_VitalYearOne[3,np_VitalYear[2,:]==0]=0
np_VitalYearOne=np.delete(np_VitalYearOne,2,axis=0)
datagridVitalYearOne=CreateDataGrid(np_VitalYearOne)
totalVitalDataGrid=datagridVitalYearZero+datagridVitalYearOne
#Calculate percentage of diagnoses dead at 2001
PercentVitalYearOne = np.round(np.divide(datagridVitalYearOne,totalVitalDataGrid,out=np.zeros_like(datagridVitalYearOne), where=totalVitalDataGrid!=0),2)
#plot results
x = np.unique(np_VitalYear[0,:])
y = [0,1,2,3,4,5,6,7,8,9,10]
z = PercentVitalYearOne
contourplotVital(x,y,z,HIV_Exposure_Category_Label,y)
#Take columns 1,2,4,5: Year of Diagnosis, Age At Exposure, Vital Stats, and Cases
np_VitalAgeYear=np.array([df_result.iloc[:][1], df_result.iloc[:][2], df_result.iloc[:][4], df_result.iloc[:][5]])
#Separate data based upon vital stats. Set cases to zero so all dates can be represented in subsequent analysis
np_VitalAgeYearZero=np_VitalAgeYear
np_VitalAgeYearZero[3,np_VitalAgeYear[2,:]==1]=0
np_VitalAgeYearZero=np.delete(np_VitalAgeYearZero,2,axis=0)
datagridVitalAgeYearZero=CreateDataGrid(np_VitalAgeYearZero)
#Have to repeat due to a subtle bug in which both vital years were affected by the zeroing command
np_VitalAgeYear=np.array([df_result.iloc[:][1], df_result.iloc[:][2], df_result.iloc[:][4], df_result.iloc[:][5]])
np_VitalAgeYearOne=np_VitalAgeYear
np_VitalAgeYearOne[3,np_VitalAgeYear[2,:]==0]=0
np_VitalAgeYearOne=np.delete(np_VitalAgeYearOne,2,axis=0)
datagridVitalAgeYearOne=CreateDataGrid(np_VitalAgeYearOne)
totalVitalAgeDataGrid=datagridVitalAgeYearZero+datagridVitalAgeYearOne
#Calculate percentage of diagnoses dead at 2001
PercentVitalAgeYearOne = np.round(np.divide(datagridVitalAgeYearOne,totalVitalAgeDataGrid,out=np.zeros_like(datagridVitalAgeYearOne), where=totalVitalAgeDataGrid!=0),2)
#plot results
x = np.unique(np_VitalAgeYear[0,:])
y = np.unique(np_VitalAgeYear[1,:])
z = PercentVitalAgeYearOne
contourplotVitalAge(x,y,z,Age_At_Diagnosis_Label,AgeResult[:,0])
#Bar chart showing total diagnoses and deaths by 2000
totalOne=datagridVitalAgeYearOne.sum(axis=1)
totalYear=totalVitalAgeDataGrid.sum(axis=1)
plt.close()
fig = plt.figure()
p1 = plt.bar(x,totalYear, width=0.1)
p2 = plt.bar(x,totalOne,width=0.1,color='#d62728')
plt.ylabel('Number of Diagnoses')
plt.xlabel('Year')
plt.title('AIDS Diagnoses By Year and Mortality by 2000')
plt.legend((p1[0],p2[0]),('Total Diagnoses','Dead by 2000'))
plt.xlim([1980,2005])
#Bar chart showing total cases and deaths by 2000
totalOne=datagridVitalAgeYearOne.sum(axis=1)
totalYear=totalVitalAgeDataGrid.sum(axis=1)
#create a fake data set to put on top of deaths from 2000 on becaus otherwise it fills to 2003 with a flat line.
yq= | np.array(x) | numpy.array |
"""
File: show_results.py
Author: <NAME>
TFG
"""
import argparse
import os
from itertools import cycle, product
import keras
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from sklearn import metrics
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import roc_curve, auc
from vis.utils import utils
from vis.visualization import visualize_saliency
def get_class(y):
"""
Get the class name depending on y value
Parameters
----------
y: int [0,1,2]
Returns
-------
Name of class
"""
y_string = []
for i in range(len(y)):
if y[i] == 0:
y_string.append('AD')
if y[i] == 1:
y_string.append('non-AD/MCI')
if y[i] == 2:
y_string.append('MCI')
return y_string
def plot_roc_curve(y_score, y, fname):
"""
Plots ROC curve for each class
Parameters
----------
y_score: Predicted classes
y: True classes
fname: File name where the ROC curves will be stored
Returns
-------
"""
# Plot linewidth.
lw = 2
n_classes = 3
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
classes = ['AD', 'non-AD/MCI', 'MCI']
# Plot all ROC curves
plt.figure(1)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(1 - fpr[i], tpr[i], color=color, lw=lw,
label='ROC for {0}: AUC = {1:0.2f}'
''.format(classes[i], roc_auc[i]))
plt.plot([1.02, -0.02], [-0.02, 1.02], 'k--', lw=lw)
plt.xlim([1.02, -0.02])
plt.ylim([-0.02, 1.02])
plt.xlabel('Specificity')
plt.ylabel('Sensitivity')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.savefig(fname)
# plt.show()
def plot_saliency_map(model, x, y, fname):
"""
Plots the model's average saliency map on the test set
Parameters
----------
model: Deep-learning model
x: Test images
y: Test labels
fname: File name to store the saliency map
Returns
-------
"""
# Find the index of the to be visualized layer above
layer_index = utils.find_layer_idx(model, 'dense_3')
# Swap softmax with linear to get better results
model.layers[layer_index].activation = keras.activations.linear
model = utils.apply_modifications(model)
# Calculate saliency_map and visualize it
saliency = np.zeros((512, 512))
m = 50
for i in range(m): # Get input
print(i)
input_image = x[i]
input_class = y[i] # Matplotlib preparations
saliency += visualize_saliency(model, layer_index, filter_indices=input_class, seed_input=input_image)
saliency /= m
fig = plt.figure()
cax = plt.imshow((saliency / saliency.max() * 255).astype(np.uint8), cmap='jet')
cbar = fig.colorbar(cax, ticks=[0, 110, 220])
cbar.ax.set_yticklabels(['Low', 'Medium', 'High']) # horizontal colorbar
plt.savefig(fname)
# plt.show()
def plot_tsne(model, x, y, fname):
"""
Plots t-SNE graphic on the train set
Parameters
----------
model: deep-learning model
x: train images
y: train labels
fname: file name where the t-SNE plot will be saved
Returns
-------
"""
# First apply PCA to reduce to 30 dims
pca = PCA(n_components=30)
# Then TSNE to reduce to 2 dims with 1000 iterations and learning rate of 200
tsne = TSNE(n_components=2, n_iter=1000, learning_rate=200)
# Get the output of layer 'dense_1' (1024 features) to reduce the dimension of that output
layer_name = 'dense_1'
intermediate_output = model.get_layer(layer_name).output
intermediate_model = keras.Model(inputs=model.input, outputs=intermediate_output)
intermediate_model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
loss='categorical_crossentropy',
metrics=['acc'])
# Get the features generated when passing X data
features = intermediate_model.predict(x)
# Apply PCA and t-SNE
pca_result = pca.fit_transform(features)
tsne_result = tsne.fit_transform(pca_result)
# Prepare data to be visualized
tsne_data = dict()
tsne_data['tsne-2d-one'] = tsne_result[:, 0]
tsne_data['tsne-2d-two'] = tsne_result[:, 1]
tsne_data['y'] = get_class(y)
# Visualize the data reduced to 2 dimensions
plt.figure(figsize=(16, 10))
sns.scatterplot(
x="tsne-2d-one", y="tsne-2d-two",
hue="y",
palette=sns.hls_palette(3, l=.6, s=.7),
data=tsne_data,
legend="full",
alpha=0.3
)
plt.savefig(fname)
# plt.show()
def plot_cm(y_test, y_pred):
"""
Show Specificity, sensitivity, precision, f1-score, TP, TN, FP, FN of each predicted class
Parameters
----------
y_test: True classes
y_pred: Predicted classes
Returns
-------
"""
class_names = ['AD', 'CN', 'MCI']
n_classes = 3
y_prd = [np.argmax(y) for y in y_pred]
for i in range(n_classes):
y_score = [y == i for y in y_prd]
y_score = np.array(y_score).astype(int)
y_true = y_test[:, i]
tn, fp, fn, tp = metrics.confusion_matrix(y_true, y_score).ravel()
specificity = tn / (tn + fp)
sensitivity = metrics.recall_score(y_true, y_score) # tp / (tp + fn)
precision = metrics.precision_score(y_true, y_score)
f1_score = metrics.f1_score(y_true, y_score)
print('############################################')
print('Metrics for class {}'.format(class_names[i]))
print('Sensitivity: ', sensitivity)
print('Specificity: ', specificity)
print('Precision: ', precision)
print('F1-Score: ', f1_score)
print('TP: ', tp)
print('TN: ', tn)
print('FP: ', fp)
print('FN: ', fn)
print()
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--directory", default=None, help="path to the directory where the images are stored")
ap.add_argument("-m", "--model", default=None, help="path to the file where the model is stored")
args = ap.parse_args()
base_dir = None
model_file = None
if args.directory is not None:
if not os.path.isdir(args.directory):
print("Directory \'%s\' does not exist" % args.directory)
return
base_dir = args.directory
else:
print("You must specify the directory where the images are stored (see help).")
return
if args.model is not None:
if not os.path.isfile(args.model):
print("File \'%s\' does not exist" % args.model)
return
model_file = args.model
else:
print("You must specify the file where the model is stored (see help).")
return
# Load the model architecture and its weights
model = load_model(model_file)
train_datagen = ImageDataGenerator(
rotation_range=8,
shear_range=np.pi / 16,
width_shift_range=0.10,
height_shift_range=0.10,
zoom_range=0.08,
horizontal_flip=False,
vertical_flip=False,
)
test_datagen = ImageDataGenerator()
# Set the batch size and calculate the number of steps per epoch
input_size = 512
batch_size = 8
train_dir = os.path.join(base_dir, 'train')
test_dir = os.path.join(base_dir, 'test')
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(input_size, input_size),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(input_size, input_size),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)
print(test_generator.class_indices)
nb_train_samples = len(train_generator.filenames)
nb_test_samples = len(test_generator.filenames)
x_train = []
y_train = []
x_test = []
y_test = []
batches = 0
for x_batch, y_batch in train_generator:
for i in range(len(y_batch)): # Get input
x_train.append(x_batch[i])
y_train.append(y_batch[i])
batches += 1
if batches >= nb_train_samples / batch_size:
# we need to break the loop by hand because
# the generator loops indefinitely
break
batches = 0
for x_batch, y_batch in test_generator:
for i in range(len(y_batch)): # Get input
x_test.append(x_batch[i])
y_test.append(y_batch[i])
batches += 1
if batches >= nb_test_samples / batch_size:
# we need to break the loop by hand because
# the generator loops indefinitely
break
print(test_generator.classes)
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
y_train2 = np.argmax(y_train, axis=1)
y_test2 = | np.argmax(y_test, axis=1) | numpy.argmax |
#C1949699
#
import math
import random
import re
import time
from turtle import numinput
from cv2 import INTER_AREA, INTER_BITS, INTER_CUBIC, INTER_LANCZOS4, INTER_LINEAR, INTER_LINEAR_EXACT, INTER_MAX, imread, imshow, waitKey
import numpy as np
import cv2
import os
from multiprocessing import Process, Manager
data_path = os.getcwd()
dataset_path = data_path+"\\cross_validation\\"
imageDataLocation = dataset_path+"\\images\\"
truthDataLocation = dataset_path+"\\truth\\"
LOG = []
datasetTrain = []
datasetVal = []
runTimeNumber = int(1)
def calculateSTD(evaluationList, evaluationMean):
n = 1
if len(evaluationList)!=1:
n = len(evaluationList)-1
sumX = 0
for score in evaluationList:
sumX+=(score-evaluationMean)**2
standardDeviation = sumX / n
standardDeviation = math.sqrt(standardDeviation)
return standardDeviation
def runTimeCount():
i = 1
fileList = os.listdir(data_path)
while "log"+str(i)+".txt" in fileList:
i+=1
return int(i)
def saveLog():
i = 1
fileList = os.listdir(data_path)
if "log1.txt" not in fileList:
with open("log"+str(i)+".txt","a") as f:
for line in LOG:
f.write(line+"\n")
else:
while "log"+str(i)+".txt" in fileList:
i+=1
with open("log"+str(i)+".txt","a") as f:
for line in LOG:
f.write(line+"\n")
LOG.clear()
def addToLog(line,varname):
# print("Line",line)
if varname == "BinaryMasks":
LOG.append(varname,line)
elif isinstance(line, list):
# log.append(varname)
LOG.append(str(varname+" "+f'{line}'.split('=')[0]))
elif isinstance(line, str or int):
# log.append(varname)
LOG.append(str(varname)+" "+str(line))
elif isinstance(line, float):
LOG.append(str(varname)+" "+str(line))
def calc_IoU(mask1, mask2): # From the question.
mask1_area = np.count_nonzero(mask1)
mask2_area = np.count_nonzero(mask2)
# print(mask1_area, " : ", mask2_area)
intersection = np.count_nonzero(np.logical_and( mask1, mask2))
# print("intersection",intersection)
iou = intersection/(mask1_area+mask2_area-intersection)
return iou
# def runTest():
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def select_new_dataset():
datasetTrain.clear()
datasetVal.clear()
files = os.listdir(imageDataLocation)
for i in range(14):
file = random.choice(files)
while file in datasetTrain:
file = random.choice(files)
datasetTrain.append(file)
for file in files:
if file not in datasetTrain:
datasetVal.append(file)
# datasetTrain.sort(key=natural_keys)
# datasetVal.sort(key=natural_keys)
interpolationType = [INTER_LINEAR,INTER_CUBIC,INTER_AREA,INTER_BITS,INTER_LANCZOS4,INTER_LINEAR_EXACT]
confidenceList = np.divide(np.subtract(np.arange(15,91,15),5),100)
def selectParameters3():#selecting the parameters that will be swapped for each iteration
PARAMETERS = list()
PARAMETERS.clear()
PARAMETERS.append(random.choice(confidenceList))
PARAMETERS.append(random.choice(interpolationType))
return PARAMETERS
net3 = cv2.dnn.readNetFromTensorflow("dnn/frozen_inference_graph_coco.pb","dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt")
def v3(img,numb,PARAMETERS,dictOfBinaryMask):
thres = PARAMETERS[0]
H, W, _ = img.shape
# Create black image with same dimensions as input image
black_image = np.zeros((H, W), np.uint8)
# Detect objects inside input image
blob = cv2.dnn.blobFromImage(img, swapRB=True)
net3.setInput(blob)
boxes, masks = net3.forward(["detection_out_final", "detection_masks"])
detection_count = boxes.shape[2]
for i in range(detection_count):
box = boxes[0, 0, i]
class_id = box[1]
confs = box[2]
if confs < thres:
continue
# Get box coordinates
box = boxes[0, 0, i, 3:7] * | np.array([W, H, W, H]) | numpy.array |
#!/usr/bin/env python
"""
This script is used to develop SpecViewer only. It should not be used for
production.
It creates a JSON file from a dictionary to be used in the QAP SpecViewer
development.
"""
import json
import os
import numpy as np
from scipy.ndimage import gaussian_filter1d
def main():
# Create fake source
| np.random.seed(0) | numpy.random.seed |
import numpy as np
from astropy.convolution import convolve
def smooth(x, window_len=9, window='hanning'):
"""
Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
Parameters
----------
x : array_like
the input signal
window_len : int
The length of the smoothing window
window : str
The type of window from 'flat', 'hanning', 'hamming', 'bartlett',
'blackman'
'flat' window will produce a moving average smoothing.
Returns
-------
out : The smoothed signal
Example
-------
>>> t=linspace(-2,2,0.1)
>>> x=sin(t)+randn(len(t))*0.1
>>> y=smooth(x)
See Also
--------
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman,
numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array
instead of a string
"""
if isinstance(x, list):
x = np.array(x)
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if len(x) < window_len:
print("length of x: ", len(x))
print("window_len: ", window_len)
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if (window_len % 2) == 0:
window_len = window_len + 1
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError(
"Window is on of 'flat', 'hanning', \
'hamming', 'bartlett', 'blackman'")
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.'+window+'(window_len)')
y = convolve(x, w/w.sum(), normalize_kernel=False, boundary='extend')
# return the smoothed signal
return y
def blurImage(im, n, ny=None, ftype='boxcar'):
"""
Smooths a 2D image by convolving with a filter
Parameters
----------
im : array_like
The array to smooth
n, ny : int
The size of the smoothing kernel
ftype : str
The type of smoothing kernel. Either 'boxcar' or 'gaussian'
Returns
-------
res: array_like
The smoothed vector with shape the same as im
"""
from scipy import signal
n = int(n)
if not ny:
ny = n
else:
ny = int(ny)
# keep track of nans
nan_idx = np.isnan(im)
im[nan_idx] = 0
g = signal.boxcar(n) / float(n)
if 'box' in ftype:
if im.ndim == 1:
g = signal.boxcar(n) / float(n)
elif im.ndim == 2:
g = signal.boxcar(n) / float(n)
g = np.tile(g, (1, ny, 1))
g = g / g.sum()
g = np.squeeze(g) # extra dim introduced in np.tile above
elif im.ndim == 3: # mutlidimensional binning
g = signal.boxcar(n) / float(n)
g = np.tile(g, (1, ny, 1))
g = g / g.sum()
elif 'gaussian' in ftype:
x, y = np.mgrid[-n:n+1, 0-ny:ny+1]
g = np.exp(-(x**2/float(n) + y**2/float(ny)))
g = g / g.sum()
if np.ndim(im) == 1:
g = g[n, :]
if np.ndim(im) == 3:
g = np.tile(g, (1, ny, 1))
improc = signal.convolve(im, g, mode='same')
improc[nan_idx] = np.nan
return improc
def count_to(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is, it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise Exception("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1]+1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def repeat_ind(n: np.array):
"""
Examples
--------
>>> n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
>>> res = repeat_ind(n)
>>> res = [2, 2, 2, 5, 5, 7, 7, 8]
That is the input specifies how many times to repeat the given index.
It is equivalent to something like this :
hstack((zeros(n_i,dtype=int)+i for i, n_i in enumerate(n)))
But this version seems to be faster, and probably scales better, at
any rate it encapsulates a task in a function.
"""
if n.ndim != 1:
raise Exception("n is supposed to be 1d array.")
res = [[idx]*a for idx, a in enumerate(n) if a != 0]
return np.concatenate(res)
def rect(r, w, deg=False):
"""
Convert from polar (r,w) to rectangular (x,y)
x = r cos(w)
y = r sin(w)
"""
# radian if deg=0; degree if deg=1
if deg:
w = np.pi * w / 180.0
return r * | np.cos(w) | numpy.cos |
"""
=============
Binary to EEG
=============
A transformer for Kafka that reads binary data and stream EEG data.
Binary -> Kafka-Transformer -> EEG
For examples and descriptions refers to documentation:
`Data storage handler <../A1-raw_cleaning.ipynb>`_
"""
import sys
import pickle
import struct
from functools import cached_property
import numpy as np
from datetime import datetime
# import rawutil
import logging
from kafka import KafkaConsumer, KafkaProducer
from typing import TypeVar, Dict, Tuple, Any
# from openbci_stream.utils import autokill_process
# autokill_process(name='binary_2_eeg')
DEBUG = ('--debug' in sys.argv)
if DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
# logging.getLogger('kafka').setLevel(logging.WARNING)
KafkaStream = TypeVar('kafka-stream')
########################################################################
class BinaryToEEG:
"""Kafka transformer with parallel implementation for processing binary raw
data into EEG microvolts. This script requires the Kafka daemon running and
enables an `auto-kill process <openbci_stream.utils.pid_admin.rst#module-openbci_stream.utils.pid_admin>`_
"""
BIN_HEADER = 0xa0
LAST_AUX_SHAPE = 0
# ----------------------------------------------------------------------
def __init__(self, board_id: str = ''):
""""""
self.board_id = board_id
self.consumer_binary = KafkaConsumer(bootstrap_servers=['localhost:9092'],
value_deserializer=pickle.loads,
auto_offset_reset='latest',
)
self.consumer_binary.subscribe([f'binary{self.board_id}'])
self.producer_eeg = KafkaProducer(bootstrap_servers=['localhost:9092'],
compression_type='gzip',
value_serializer=pickle.dumps,
)
self.remnant = b''
self.offset = None, None
# ----------------------------------------------------------------------
@cached_property
def scale_factor_eeg(self) -> float:
"""Vector with the correct factors for scale eeg data samples."""
gain = 24
# vref = 4.5 # for V
vref = 4500000 # for uV
return vref / (gain * ((2 ** 23) - 1))
# ----------------------------------------------------------------------
def consume(self) -> None:
"""Infinite loop for read Kafka stream."""
while True:
for record in self.consumer_binary:
logging.debug(f"processing {len(record.value['data'])}")
self.process(record)
# ----------------------------------------------------------------------
def process(self, record: KafkaStream) -> None:
"""Prepare the binary package for a successful unpack and stream.
Parameters
----------
record
Kafka stream with binary data.
"""
buffer = record.value
context = buffer['context']
context['timestamp.binary.consume'] = datetime.now().timestamp()
# Deserialice data
logging.debug(
f'Aligning data: renmant({len(self.remnant)}), buffer({len(buffer["data"])})')
data, self.remnant = self.align_data(self.remnant + buffer['data'])
logging.debug('aligned')
if not data.shape[0]:
logging.debug('No data after alignement')
self.remnant = b'' # reset deserialicig
return
logging.debug(
f'Deserilizing data: data({data.shape}), context({context})')
eeg_data, aux = self.deserialize(data, context)
logging.debug(
f'deserialized eeg_data({eeg_data.shape}), aux({aux.shape})')
# Stream
context['samples'] = eeg_data.shape[1]
context['timestamp.eeg'] = datetime.now().timestamp()
logging.debug(f'Streaming')
self.stream([eeg_data, aux], context)
# ----------------------------------------------------------------------
def align_data(self, binary: bytes) -> Tuple[np.ndarray, bytes]:
"""Align data following the headers and footers.
Parameters
----------
binary
Data raw from OpenBCI board.
Returns
-------
data_aligned
Numpy array of shape (`33, LENGTH`) with headers and footer aligned.
remnant
This bytes could be used for complete next binary input.
"""
logging.debug('Binary to np.ndarray')
data = np.array(list(binary))
# Search for the the first index with a `BIN_HEADER`
logging.debug('Looking for BIN_HEADER')
start = [np.median(np.roll(data, -i, axis=0)[::33])
== self.BIN_HEADER for i in range(33)].index(True)
if (start == 0) and (data.shape[0] % 33 == 0):
logging.debug('No alignment necesary')
data_aligned = data
remnant = b''
else:
# Fix the offset to complete 33 bytes divisible array
logging.debug('Alingnig...')
end = (data.shape[0] - start) % 33
logging.debug(
f'Alingnig data({len(data)}) at data({start}:-{end})')
data_aligned = data[start:-end]
logging.debug('Saving remnant')
remnant = binary[-end:]
logging.debug('Reshaping')
data_aligned = data_aligned.reshape(-1, 33)
return data_aligned, remnant
# ----------------------------------------------------------------------
def deserialize(self, data: np.ndarray, context: Dict[str, Any]) -> None:
"""From signed 24-bits integer to signed 32-bits integer.
Parameters
----------
data
Numpy array of shape (`33, LENGTH`)
context
Information from the acquisition side useful for deserializing and
that will be packaged back in the stream.
"""
# EGG
eeg_data = data[:, 2:26]
eeg_data = getattr(self, f'deserialize_eeg_{context["connection"]}')(
eeg_data, data[:, 1], context)
# Auxiliar
stop_byte = int((np.median(data[:, -1])))
aux = self.deserialize_aux(stop_byte, data[:, 26:32], context)
self.LAST_AUX_SHAPE = aux.shape
# Stream
channels = np.array(list(context['montage'].keys())) - 1
return eeg_data.T[channels], aux.T
# self.stream([eeg_data.T[channels], aux.T], eeg_data.shape[0], context)
# ----------------------------------------------------------------------
def deserialize_eeg_wifi(self, eeg: np.ndarray, ids: np.ndarray, context: Dict[str, Any]) -> np.ndarray:
"""From signed 24-bits integer to signed 32-bits integer by channels.
The `Cyton data format <https://docs.openbci.com/docs/02Cyton/CytonDataFormat>`_
says that only can send packages of 33 bits, when a Daisy board is
attached these same packages will be sent at double speed in favor to
keep the desired sample rate for 16 channels.
Parameters
----------
eeg
Numpy array in signed 24-bits integer (`8, LENGTH`)
ids
List of IDs for eeg data.
context
Information from the acquisition side useful for deserializing and
that will be packaged back in the stream.
Returns
-------
eeg_data
EEG data in microvolts, signed 32-bits integer, (`CHANNELS, LENGTH`),
if there is a Daisy board `CHANNELS` is 16, otherwise is 8.
"""
# eeg_data = np.array([[rawutil.unpack('>u', bytes(ch))[0]
# for ch in row.reshape(-1, 3).tolist()] for row in eeg])
eeg_data = np.array([struct.unpack('>i', (b'\0' if chunk[0] < 128 else b'\xff') + chunk)
for chunk in [bytes(ch.tolist()) for ch in eeg.reshape(-1, 3)]]).reshape(-1, 8)
eeg_data = eeg_data * self.scale_factor_eeg
if context['daisy']:
# # If offset, the pair index condition must change
if np.array(self.offset[0]).any():
eeg_data = np.concatenate(
[[self.offset[0]], eeg_data], axis=0)
ids = np.concatenate([[self.offset[1]], ids], axis=0)
# pair = not pair
if ids[0] != ids[1]:
eeg_data = np.delete(eeg_data, 0, axis=0)
ids = np.delete(ids, 0, axis=0)
# if not pair dataset, create an offeset
if eeg_data.shape[0] % 2:
self.offset = eeg_data[-1], ids[-1]
eeg_data = np.delete(eeg_data, -1, axis=0)
ids = np.delete(ids, -1, axis=0)
else:
self.offset = None, None
return eeg_data.reshape(-1, 16)
return eeg_data
# ----------------------------------------------------------------------
def deserialize_eeg_serial(self, eeg: np.ndarray, ids: np.ndarray, context: Dict[str, Any]) -> np.ndarray:
"""From signed 24-bits integer to signed 32-bits integer by channels.
The `Cyton data format <https://docs.openbci.com/docs/02Cyton/CytonDataFormat>`_
says that only can send packages of 33 bits, over serial (RFduino) this
limit is absolute, when a Daisy board is attached these same amount of
packages will be sent, in this case, the data must be distributed and
interpolated in order to complete the sample rate.
Parameters
----------
eeg
Numpy array in signed 24-bits integer (`8, LENGTH`)
ids
List of IDs for eeg data.
context
Information from the acquisition side useful for deserializing and
that will be packaged back in the stream.
Returns
-------
eeg_data
EEG data in microvolts, signed 32-bits integer, (`CHANNELS, LENGTH`),
if there is a Daisy board `CHANNELS` is 16, otherwise is 8.
"""
# eeg_data = np.array([[rawutil.unpack('>u', bytes(ch))[0]
# for ch in row.reshape(-1, 3).tolist()] for row in eeg])
eeg_data = np.array([struct.unpack('>i', (b'\0' if chunk[0] < 128 else b'\xff') + chunk)
for chunk in [bytes(ch.tolist()) for ch in eeg.reshape(-1, 3)]]).reshape(-1, 8)
eeg_data = eeg_data * self.scale_factor_eeg
if context['daisy']:
even = not ids[0] % 2
# If offset, the even index condition must change
if np.array(self.offset[0]).any():
eeg_data = np.concatenate(
[[self.offset[0]], eeg_data], axis=0)
ids = np.concatenate([[self.offset[1]], ids], axis=0)
even = not even
# if not even dataset, create an offset
if eeg_data.shape[0] % 2:
self.offset = eeg_data[-1], ids[-1]
eeg_data = np.delete(eeg_data, -1, axis=0)
ids = np.delete(ids, -1, axis=0)
# Data can start with a even or odd id
if even:
board = eeg_data[::2]
daisy = eeg_data[1::2]
else:
daisy = eeg_data[::2]
board = eeg_data[1::2]
board = np.array([np.interp(np.arange(0, p.shape[0], 0.5), np.arange(
p.shape[0]), p) for p in board.T]).T
daisy = np.array([np.interp( | np.arange(0, p.shape[0], 0.5) | numpy.arange |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert | np.array_equal(image_aug, image) | numpy.array_equal |
import numpy as np
from math import ceil
from scipy.stats import norm
from TaPR import compute_precision_recall
from data_loader import _count_anomaly_segments
n_thresholds = 1000
def _simulate_thresholds(rec_errors, n, verbose):
# maximum value of the anomaly score for all time steps in the test data
thresholds, step_size = [], abs(np.max(rec_errors) - np.min(rec_errors)) / n
th = np.min(rec_errors)
if verbose:
print(f'Threshold Range: ({np.max(rec_errors)}, {np.min(rec_errors)}) with Step Size: {step_size}')
for i in range(n):
thresholds.append(float(th))
th = th + step_size
return thresholds
def _flatten_anomaly_scores(values, stride, flatten=False):
flat_seq = []
if flatten:
for i, x in enumerate(values):
if i == len(values) - 1:
flat_seq = flat_seq + list(np.ravel(x).astype(float))
else:
flat_seq = flat_seq + list(np.ravel(x[:stride]).astype(float))
else:
flat_seq = list(np.ravel(values).astype(float))
return flat_seq
def compute_anomaly_scores(x, rec_x, scoring='square', x_val=None, rec_val=None):
# average anomaly scores from different sensors/channels/metrics/variables (in case of multivariate time series)
if scoring == 'absolute':
return np.mean(np.abs(x - rec_x), axis=-1)
elif scoring == 'square':
return np.mean(np.square(x - rec_x), axis=-1)
elif scoring == 'normal':
if x_val is not None and rec_val is not None:
val_rec_err = x_val - rec_val
test_rec_err = x - rec_x
mu, std = norm.fit(val_rec_err)
return (test_rec_err - mu).T * std ** -1 * (test_rec_err - mu)
def compute_metrics(anomaly_scores, labels, label_segments=None, n=n_thresholds, delta=0.01, alpha=0.5, theta=0.5, stride=1, verbose=False):
if label_segments is None:
label_segments = []
thresholds = _simulate_thresholds(anomaly_scores, n, verbose)
correct_count, correct_ratio = [], []
precision, recall, f1 = [], [], []
flat_seq = _flatten_anomaly_scores(anomaly_scores, stride, flatten=len(anomaly_scores.shape) == 2)
print('here1', len(thresholds))
for th in thresholds:
pred_anomalies = np.zeros(len(flat_seq)).astype(int) # default with no anomaly
pred_anomalies[np.where(np.array(flat_seq) > th)[0]] = 1 # assign 1 if scores > threshold
_, pred_segments = _count_anomaly_segments(pred_anomalies)
if len(labels) != len(pred_anomalies):
print(f'evaluating with unmatch shape: Labels: {len(labels)} vs. Preds: {len(pred_anomalies)}')
labels = labels[-len(pred_anomalies):] # ref. OmniAnomaly
print(f'evaluating with unmatch shape: Labels: {len(labels)} vs. Preds: {len(pred_anomalies)}')
anomaly_lengths = []
for seg in label_segments:
anomaly_lengths.append(len(seg))
TaD = 0 if len(anomaly_lengths) == 0 else np.ceil(np.mean(anomaly_lengths) * delta).astype(int)
TaP, TaR = compute_precision_recall(pred_anomalies, labels, theta=theta, delta=TaD, alpha=alpha, verbose=verbose)
count, ratio = compute_accuracy(pred_segments, label_segments, delta)
precision.append(float(TaP))
recall.append(float(TaR))
f1.append(float(2 * (TaP * TaR) / (TaP + TaR + 1e-7)))
correct_count.append(int(count))
correct_ratio.append(float(ratio))
return {
'precision': | np.mean(precision) | numpy.mean |
"""Module with abstract classes."""
__all__ = ['Inlet', 'UnitOperation',
'RtdModel', 'UserInterface',
'PDF', 'ChromatographyLoadBreakthrough',
'ParameterSetList']
__version__ = '0.7.1'
__author__ = '<NAME>'
import typing as _typing
import numpy as _np
from abc import ABC as _ABC, abstractmethod as _abstractmethod
from collections import OrderedDict as _OrderedDict
from bio_rtd import logger as _logger
from bio_rtd import adj_par as _adj_par
from bio_rtd import utils as _utils
class DefaultLoggerLogic(_ABC):
# noinspection PyProtectedMember
"""Default binding of the `RtdLogger` to a class.
The class holds a reference to a :class:`bio_rtd.logger.RtdLogger`
instance. When the class receives the instance, it plants a data
tree into it. If the class is asked to provide the instance before
it received one, then an instance of
:class:`bio_rtd.logger.DefaultLogger` is created and passed on.
Parameters
----------
logger_parent_id
Custom unique id that belongs to the instance of the class.
The data tree of this instance is stored in
:class:`bio_rtd.logger.RtdLogger` under the `logger_parent_id`.
Examples
--------
>>> logger_parent_id = "parent_unit_operation"
>>> l = DefaultLoggerLogic(logger_parent_id)
>>> isinstance(l.log, _logger.DefaultLogger)
True
>>> # Log error: DefaultLogger raises RuntimeError.
>>> l.log.e("Error Description")
Traceback (most recent call last):
RuntimeError: Error Description
>>> # Log waring: DefaultLogger prints it.
>>> l.log.w("Warning Description")
Warning Description
>>> # Log info: DefaultLogger ignores it.
>>> l.log.i("Info")
>>> l.log.log_data = True
>>> l.log.log_level = _logger.RtdLogger.DEBUG
>>> l.log.i_data(l._log_tree, "a", 3) # store value in logger
>>> l.log.d_data(l._log_tree, "b", 7) # store at DEBUG level
>>> l.log.get_data_tree(logger_parent_id)["b"]
7
>>> l.log = _logger.StrictLogger()
>>> # Log waring: StrictLogger raises RuntimeError.
>>> l.log.w("Warning Info")
Traceback (most recent call last):
RuntimeError: Warning Info
See Also
--------
:class:`bio_rtd.logger.DefaultLogger`
"""
def __init__(self, logger_parent_id: str):
self._instance_id = logger_parent_id
self._log_entity_id = logger_parent_id
self._logger: _typing.Union[_logger.RtdLogger, None] = None
self._log_tree = dict() # place to store logged data
@property
def log(self) -> _logger.RtdLogger:
"""Reference of the `RtdLogger` instance.
Setter also plants instance data tree into passed logger.
If logger is requested, but not yet set, then a
:class:`bio_rtd.logger.DefaultLogger` is instantiated.
"""
if self._logger is None:
self.log = _logger.DefaultLogger() # init default logger
return self._logger
@log.setter
def log(self, logger: _logger.RtdLogger):
self._logger = logger
self._logger.set_data_tree(self._log_entity_id, self._log_tree)
def set_logger_from_parent(self, parent_id: str,
logger: _logger.RtdLogger):
"""Inherit logger from parent.
Parameters
----------
parent_id
Unique identifier of parent instance.
logger
Logger from parent instance.
"""
self._logger = logger
self._log_entity_id = f"{parent_id}/{self._instance_id}"
self._logger.set_data_tree(self._log_entity_id,
self._log_tree)
class Inlet(DefaultLoggerLogic, _ABC):
"""Generates starting flow rate and concentration profiles.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
species_list
List with names of simulating process fluid species.
inlet_id
Unique identifier of an instance. It is stored in :attr:`uo_id`.
gui_title
Readable title of an instance.
"""
def __init__(self, t: _np.ndarray, species_list: _typing.Sequence[str],
inlet_id: str, gui_title: str):
super().__init__(inlet_id) # logger
# Assert proper time vector.
assert t[0] == 0, "t should start with 0"
assert len(t.shape) == 1, "t should be a 1D np.ndarray"
self._t = _np.linspace(0, t[-1], t.size)
self._dt = t[-1] / (t.size - 1)
assert _np.all(_np.abs(self._t - t) < 0.001 * self._dt / t.size), \
"t should have a fixed step size"
# Species
self.species_list: _typing.Sequence[str] = species_list
"""List with names of simulating process fluid species."""
self._n_species = len(self.species_list)
# Strings
self.uo_id: str = inlet_id
"""Unique identifier of the instance."""
self.gui_title: str = gui_title
"""Human readable title (for plots)."""
# Placeholders
self.adj_par_list: _typing.Sequence[_adj_par.AdjustableParameter] = ()
"""List of adjustable parameters exposed to the GUI."""
# Outputs
self._f_out = _np.zeros_like(t)
self._c_out = _np.zeros([self._n_species, t.size])
def get_t(self) -> _np.ndarray:
"""Get simulation time vector."""
return self._t
def get_n_species(self) -> int:
"""Get number of process fluid species."""
return self._n_species
@_abstractmethod
def refresh(self): # pragma: no cover
"""Updates output profiles.
Internally it updates `self._f_out` and `self._c_out` based on
instance attribute values.
"""
pass
def get_result(self) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Get flow rate and concentration profiles.
Returns
-------
f_out
Flow rate profile.
c_out
Concentration profile.
"""
return self._f_out, self._c_out
class UnitOperation(DefaultLoggerLogic, _ABC):
"""Processes flow rate and concentration profiles.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
gui_title
Readable title for GUI.
"""
def __init__(self, t: _np.ndarray, uo_id: str, gui_title: str = ""):
super().__init__(uo_id) # logger
# simulation time vector
assert t[0] == 0, "Time vector must start with 0"
self._t = t
self._dt = t[-1] / (t.size - 1) # time step
# id and title
self.uo_id: str = uo_id
"""Unique identifier of the instance"""
self.gui_title: str = gui_title
"""Readable title for GUI"""
# adjustable parameter list
self.adj_par_list = []
"""list of :class:`bio_rtd.adj_par.AdjustableParameter`: List
of adjustable parameters exposed to the GUI."""
# hide unit operation from plots
self.gui_hidden: bool = False
"""Hide the of the unit operation (default False)."""
# start-up phase (optional initial delay)
self.discard_inlet_until_t: float = -1
"""Discard inlet until given time."""
self.discard_inlet_until_min_c: _np.ndarray = _np.array([])
"""Discard inlet until given concentration is reached."""
self.discard_inlet_until_min_c_rel: _np.ndarray = _np.array([])
"""Discard inlet until given concentration relative to is reached.
Specified concentration is relative to the max concentration.
"""
self.discard_inlet_n_cycles: int = -1
"""Discard first n cycles of the periodic inlet flow rate profile."""
# shout-down phase (optional early stop)
self.discard_outlet_until_t: float = -1
"""Discard outlet until given time."""
self.discard_outlet_until_min_c: _np.ndarray = _np.array([])
"""Discard outlet until given concentration is reached."""
self.discard_outlet_until_min_c_rel: _np.ndarray = _np.array([])
"""Discard outlet until given concentration relative to is reached.
Specified concentration is relative to the max concentration.
"""
self.discard_outlet_n_cycles: int = -1
"""Discard first n cycles of the periodic outlet flow rate profile."""
# placeholders, populated during simulation
self._c: _np.ndarray = _np.array([]) # concentration profiles
self._f: _np.array = _np.array([]) # flow rate profile
self._n_species: int = 0 # number of species
def _assert_valid_species_list(self, species: _typing.Sequence[int]):
"""Species indexes start with 0.
List must be ordered in ascending order (to prevent bugs).
List must have unique values (again, to prevent bugs).
"""
if len(species) == 0:
self.log.w("Species list is empty")
else:
assert max(species) < self._n_species, \
"Index of species should be less than number of species"
assert min(species) >= 0, \
"Index of species should not be negative"
assert len(set(species)) == len(species), \
"Vector with species should not have duplicate values"
assert list(set(species)) == species, \
"Values in vector with species must be ascending"
def _is_flow_box_shaped(self) -> bool:
"""Constant profile with optional leading or trailing zeros."""
assert _np.all(self._f >= 0), "Flow rate is negative!!!"
if _np.all(self._f == 0):
self.log.w("Flow rate is 0!")
return False
max_flow_start, max_flow_end = \
_utils.vectors.true_start_and_end(self._f == self._f.max())
if _np.any(self._f[:max_flow_start] > 0):
return False
elif _np.any(self._f[max_flow_end:] > 0):
return False
elif _np.any(self._f[max_flow_start:max_flow_end] != self._f.max()):
return False
else:
return True
def _i_flow_on(self) -> _typing.Sequence[int]:
"""Detect when the flow rate switches from off to on.
In case of periodic flow rate, the function returns all
switching events.
Returns
-------
i_interval_start
Indexes of time points at which the flow rate turns on.
Each index corresponds to a leading non-zero value.
"""
if _np.all(self._f == 0):
self.log.w("Flow rate is 0!")
return []
assert _np.all(self._f[self._f != 0] == self._f.max()), \
"flow rate must have a constant 'on' value"
return list(_np.argwhere(_np.diff(self._f, prepend=0) > 0).flatten())
def _assert_periodic_flow(self) -> _typing.Tuple[_typing.Sequence[int],
float,
float]:
"""Assert and provides info about periodic flow rate.
Only last period is allowed to be shorter than others.
Returns
-------
i_flow_start_list
Indexes of time-points at which the flow rate gets turned on
i_flow_on_average
Number of time-points of flow 'on' interval.
t_cycle_duration
Duration of average cycle ('on' + 'off' interval)
"""
# Get all flow on positions.
i_flow_start_list = self._i_flow_on()
assert len(i_flow_start_list) > 0, "Flow is 0"
assert len(i_flow_start_list) > 1, \
"Periodic flow must have at least 2 cycles"
# Get average cycle duration.
t_cycle_duration = _np.mean(_np.diff(i_flow_start_list)) * self._dt
assert _np.all(_np.abs(
| _np.diff(i_flow_start_list) | numpy.diff |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.