prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import torch
import matplotlib.pylab
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
DEFAULT_COLORMAP = matplotlib.pylab.cm.jet
def flow_to_image(flow: np.ndarray, autoscale: bool = False) -> np.ndarray:
"""
Applies colour map to flow which should be a 2 channel image tensor HxWx2. Returns a HxWx3 numpy image
Code adapted from: https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
u = flow[0, :, :]
v = flow[1, :, :]
# Convert to polar coordinates
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = np.max(rad)
# Normalise flow maps
if autoscale:
u /= maxrad + np.finfo(float).eps
v /= maxrad + np.finfo(float).eps
# visualise flow with cmap
return np.uint8(compute_color(u, v) * 255)
def _normalise(image: np.ndarray) -> np.ndarray:
lower = np.min(image)
delta = np.max(image) - lower
if delta == 0:
delta = 1
image = (image.astype(np.float32) - lower) / delta
return image
def apply_colour_map(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = False
) -> np.ndarray:
"""
Applies a colour map to the given 1 or 2 channel numpy image. if 2 channel, must be 2xHxW.
Returns a HxWx3 numpy image
"""
if image.ndim == 2 or (image.ndim == 3 and image.shape[0] == 1):
if image.ndim == 3:
image = image[0]
# grayscale scalar image
if autoscale:
image = _normalise(image)
return cmap(image)[:, :, :3]
if image.shape[0] == 2:
# 2 dimensional UV
return flow_to_image(image, autoscale=autoscale)
if image.shape[0] == 3:
# normalise rgb channels
if autoscale:
image = _normalise(image)
return | np.transpose(image, axes=[1, 2, 0]) | numpy.transpose |
import numpy as np
from scipy.optimize import curve_fit
import sys
from collections import OrderedDict
"""
This header script stores a number of autocorrelation classes and handlers.
It's intent is to containerise and obscure the varied levels of accuracy needed to fit
a large number of C(t) from external scripts.
"""
class autoCorrelations:
"""
Container class for handling a set of autocorrelation models, e.g. from a single file or protein.
Adapted to also contain target data
"""
def __init__(self):
self.nModels = 0
self.model = OrderedDict()
self.nTargets = 0
self.DeltaT = OrderedDict()
self.Decay = OrderedDict()
self.dDecay = OrderedDict()
def get_names(self):
return np.array( [k for k in self.model.keys()] )
def get_params_as_list(self):
keys = self.model.keys()
S2 = [ self.model[k].S2 for k in keys ]
C = [ self.model[k].C for k in keys ]
tau = [ self.model[k].tau for k in keys ]
S2Fast = [self.model[k].calc_S2Fast() for k in keys ]
return S2, C, tau, S2Fast
def set_zeta(self, zeta):
for m in self.model.values():
m.set_zeta(zeta)
def get_zeta(self):
for m in self.model.values():
return m.get_zeta()
def add_model(self, key, name=None, listC=[], listTau=[], S2=None, bS2Fast=False, bSort=True):
"""
Return the last added model for further manipulation.
"""
if name is None:
name=key
self.model[key] = autoCorrelationModel(name, listC, listTau, S2, bS2Fast, bSort)
self.nModels = len(self.model)
return self.model[key]
def get_nth_model(self, n):
keys=self.get_names()
return self.model[keys[n]]
def remove_model(self, key=None, index=None):
if not key is None:
self.model.pop(key)
elif not index is None:
keys = [k for k in self.model.keys()]
self.model.pop(keys[index])
else:
print("= = = ERROR in autoCorrelations.remove_model(); it needs at least one optional argument!)", file=sys.stderr)
return
self.nModels=len(self.model)
def rename_models(self, listNames):
if len(listNames) != len(self.model):
print("= = = ERROR in autoCorrelations.rename_model(); length of lists are not equal!", file=sys.stderr)
return
for k, n in zip(self.model.keys(),listNames):
self.model[k].name=n
def report(self):
print("Number of C(t) models loaded:", self.nModels )
print("Number of targets loaded:", self.nTargets )
def report_all_models(self):
for m in self.model.values():
m.report()
def add_target(self, key, DeltaT, Decay, dDecay):
self.DeltaT[key] = DeltaT
self.Decay[key] = Decay
self.dDecay[key] = dDecay
self.nTargets = len(self.DeltaT)
def import_target_array(self, keys, DeltaT, Decay, dDecay=None):
if dDecay is None:
for i, k in enumerate(keys):
self.add_target(k, DeltaT[i], Decay[i], None)
else:
for i, k in enumerate(keys):
self.add_target(k, DeltaT[i], Decay[i], dDecay[i])
def rescale_time(self, f):
if self.nModels > 0:
for k in self.model.keys():
self.model[k].tau *= f
if self.nTargets > 0:
for k in self.model.keys():
self.DeltaT[k] *= f
def export(self, fileName, style='xmgrace'):
fp = open(fileName, 'w')
s = 0
for k,m in self.model.items():
# = = = Report outputs the parameters in a way that is encoded.
m.report(style='xmgrace', fp=fp)
dt=self.DeltaT[k] ; Ct=self.Decay[k]
ymodel=m.eval(dt)
#Print the fitted Ct model into file
print( "@s%d legend \"Res %d\"" % (s, m.name), file=fp )
for j in range(len(ymodel)):
print("%8g %8g" % (dt[j], ymodel[j]), file=fp )
print( '&', file=fp )
for j in range(len(ymodel)):
print("%8g %8g" % (dt[j], Ct[j]), file=fp )
print( '&', file=fp )
s+=2
# WIP
return
class autoCorrelationModel:
"""
A class that handles modelling of an autocorrelation function via a set of exponentials.
Contains basic fitting and reporting funcationalities.
The transient components are normally sorted on creation from fast to slow.
"""
# = = = Class dictionary.
dictGreek=np.array(['a','b','g','d','e','z','h'])
def __init__(self, name='Fit', listC=[], listTau=[], S2=None, bS2Fast=False, bSort=True):
self.name = name
self.nParams = 0
self.tau = np.array(listTau, dtype=float)
self.C = np.array(listC, dtype=float)
self.bS2Fast = bS2Fast
self.S2 = S2
self.nComps = len(self.C)
self.nParams = len(self.C)+len(self.tau)
self.bHasFit = False
self.zeta = 1.0
if bS2Fast:
self.nParams += 1
if self.S2 == None:
print("= = = ERROR: S2 must be given in fitPatam initialisation is bS2Fast is set to True!")
sys.exit(1)
if self.S2 == None:
self.S2 = 1.0 - np.sum(self.C)
self.check_consistency()
if self.nComps>1 and bSort:
self.sort_components()
def check_consistency(self):
if self.nComps<1:
return
if len(self.C) != len(self.tau):
print("= = = ERROR: transient components in fitParam initialisation do not have matching number of parameters!")
sys.exit(1)
if not self.bS2Fast:
# All components must add to 1.0
sumS = self.S2+np.sum(self.C)
if not np.all( (np.isclose(sumS, 1.0, rtol=1e-6)) ):
print("= = = ERROR: Contribution of components in fitParam initialisation do not sum sufficeintly close to 1.00!")
sys.exit(1)
def copy(self):
new = autoCorrelationModel()
new.copy_from(self)
return new
def copy_from(self, src):
self.name = src.name
self.nParams = src.nParams
self.tau = np.copy(src.tau)
self.C = np.copy(src.C)
self.bS2Fast = src.bS2Fast
self.S2 = src.S2
self.nComps = src.nComps
self.bHasFit = src.bHasFit
if src.bHasFit:
self.set_uncertainties_from_list( src.get_uncertainties_as_list() )
self.chiSq = src.chiSq
def add_transient_component(self, C, tau):
self.tau = np.append(self.tau, tau)
self.C = np.append(self.C, C)
self.nComps += 1
self.nParams += 2
def calc_S2Fast(self):
if self.bS2Fast:
return 1.0 - self.S2 - np.sum(self.C)
else:
return 0.0
def sort_components(self):
inds = np.argsort(self.tau)
self.tau = self.tau[inds]
self.C = self.C[inds]
if self.bHasFit:
self.dtau = self.dtau[inds]
self.dC = self.dC[inds]
def set_zeta(self, zeta):
"""
QM zero-point vibrations, which universally damps all components constants C and S2,
leaving the sum to be <1.0 where S2Fast is not involved.
This is meant for downstram spin relaxation computations that ignore the S2Fast component.
It currently does not affect the test for unity or the computation of S2fast.
The fast component should ideally incorporate for zeta such that the total sum is still 1.0.
"""
self.zeta=zeta
def get_zeta(self):
return self.zeta
def report(self, style='stdout', fp=sys.stdout ):
if style == 'stdout':
print( "Name: %s" % self.name, file=fp )
if self.bHasFit:
print( ' chi-Square: %g ' % self.chiSq, file=fp )
if self.bS2Fast:
print( " S2_fast: %g" % self.calc_S2Fast(), file=fp)
for i in range(self.nComps):
print( " component %s, const.: %g +- %g" % (autoCorrelationModel.dictGreek[i], self.C[i], self.dC[i]), file=fp )
print( " component %s, tau: %g +- %g" % (autoCorrelationModel.dictGreek[i], self.tau[i], self.dtau[i]), file=fp )
print( " S2_0: %g +- %g" % (self.S2, self.dS2), file=fp )
else:
if self.bS2Fast:
print( " S2_fast: %g" % self.calc_S2Fast(), file=fp)
for i in range(self.nComps):
print( " component %s, const.: %g " % (autoCorrelationModel.dictGreek[i], self.C[i]), file=fp )
print( " component %s, tau: %g " % (autoCorrelationModel.dictGreek[i], self.tau[i]), file=fp )
print( " S2_0: %g" % self.S2, file=fp )
elif style == 'xmgrace':
# Print header into the Ct model file
print( '# Residue: %s ' % self.name, file=fp )
if self.bHasFit:
print( '# Chi-Square: %g ' % self.chiSq, file=fp )
if self.bS2Fast:
print( '# Param S2_fast: %g +- 0.0' % self.calc_S2Fast(), file=fp )
print( '# Param S2_0: %g +- %g' % (self.S2, self.dS2), file=fp )
else:
print( '# Param S2_0: %g +- 0.0' % self.S2, file=fp )
for i in range(self.nComps):
print( '# Param C_%s: %g +- %g' % (autoCorrelationModel.dictGreek[i], self.C[i], self.dC[i]), file=fp )
print( '# Param tau_%s: %g +- %g' % (autoCorrelationModel.dictGreek[i], self.tau[i], self.dtau[i]), file=fp )
else:
if self.bS2Fast:
print( '# Param S2_fast: %g' % self.calc_S2Fast(), file=fp )
print( '# Param S2_0: %g' % self.S2, file=fp )
for i in range(self.nComps):
print( '# Param C_%s: %g' % (autoCorrelationModel.dictGreek[i], self.C[i]), file=fp )
print( '# Param tau_%s: %g' % (autoCorrelationModel.dictGreek[i], self.tau[i]), file=fp )
else:
print("= = = ERROR: fitParam.report() does not recognise the style argument! "
"Choices are: stdout, xmgrace", file=sys.stderr)
def eval(self, DeltaT):
"""
Vectorised computation function. dt is expected to be a 1-D array that is broadcast to a new axis 0.
"""
return self.zeta*( self.S2+np.sum(self.C[:,np.newaxis]*np.exp(-1.0*DeltaT[np.newaxis,:]/self.tau[:,np.newaxis]),axis=0) )
def calc_chiSq(self, DeltaT, Decay, dDecay=None):
if dDecay is None:
return np.mean(np.square(self.eval(DeltaT)-Decay))
else:
return np.mean(np.square(self.eval(DeltaT)-Decay)/dDecay)
def optimised_curve_fitting(self, DeltaT, Decay, dDecay=None, listDoG=[2,3,5,7,9], chiSqThreshold=0.5, fp=sys.stdout):
"""
Conduct multiple curve fits over a set of degreee of freedoms given by listDoG.
"""
print("= = = Conducting optimised fit for %s with %s degrees of freedoms..." % (self.name, str(listDoG)), file=fp)
bFirst=True ; prev=self.copy()
for nParams in listDoG:
self.set_nParams( nParams )
chiSq, bQuality = self.conduct_curve_fitting(DeltaT, Decay, dDecay, bReInitialise=True)
print(" ...fit with %i params yield chiSq of %g" % (nParams, chiSq), file=fp)
if bFirst:
if np.all(bQuality):
prev.copy_from(self)
bFirst=False
continue
if not | np.all(bQuality) | numpy.all |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
class SEIRSModel():
"""
A class to simulate the Deterministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, initN, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, p=0,
beta_D=None, sigma_D=None, gamma_D=None, mu_D=None,
theta_E=0, theta_I=0, psi_E=0, psi_I=0, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = numpy.array([int(initN)])
self.numE = numpy.array([int(initE)])
self.numI = numpy.array([int(initI)])
self.numD_E = numpy.array([int(initD_E)])
self.numD_I = numpy.array([int(initD_I)])
self.numR = numpy.array([int(initR)])
self.numF = numpy.array([int(initF)])
self.numS = numpy.array([self.N[-1] - self.numE[-1] - self.numI[-1] - self.numD_E[-1] - self.numD_I[-1] - self.numR[-1] - self.numF[-1]])
assert(self.numS[0] >= 0), "The specified initial population size N must be greater than or equal to the initial compartment counts."
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, gamma, xi, mu_I, mu_0, nu,
beta_D, sigma_D, gamma_D, mu_D, theta_E, theta_I, psi_E, psi_I, q):
S, E, I, D_E, D_I, R, F = variables # varibles is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = - (beta*S*I)/N - q*(beta_D*S*D_I)/N + xi*R + nu*N - mu_0*S
dE = (beta*S*I)/N + q*(beta_D*S*D_I)/N - sigma*E - theta_E*psi_E*E - mu_0*E
dI = sigma*E - gamma*I - mu_I*I - theta_I*psi_I*I - mu_0*I
dDE = theta_E*psi_E*E - sigma_D*D_E - mu_0*D_E
dDI = theta_I*psi_I*I + sigma_D*D_E - gamma_D*D_I - mu_D*D_I - mu_0*D_I
dR = gamma*I + gamma_D*D_I - xi*R - mu_0*R
dF = mu_I*I + mu_D*D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=0.1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = numpy.arange(start=self.t, stop=self.t+runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t+runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = [self.numS[-1], self.numE[-1], self.numI[-1], self.numD_E[-1], self.numD_I[-1], self.numR[-1], self.numF[-1]]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(lambda t, X: SEIRSModel.system_dfes(t, X, self.beta, self.sigma, self.gamma, self.xi, self.mu_I, self.mu_0, self.nu,
self.beta_D, self.sigma_D, self.gamma_D, self.mu_D, self.theta_E, self.theta_I, self.psi_E, self.psi_I, self.q
),
t_span=[self.t, self.tmax], y0=init_cond, t_eval=t_eval
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = numpy.append(self.tseries, solution['t'])
self.numS = numpy.append(self.numS, solution['y'][0])
self.numE = numpy.append(self.numE, solution['y'][1])
self.numI = numpy.append(self.numI, solution['y'][2])
self.numD_E = numpy.append(self.numD_E, solution['y'][3])
self.numD_I = numpy.append(self.numD_I, solution['y'][4])
self.numR = numpy.append(self.numR, solution['y'][5])
self.numF = numpy.append(self.numF, solution['y'][6])
self.t = self.tseries[-1]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'gamma', 'xi', 'mu_I', 'mu_0', 'nu',
'beta_D', 'sigma_D', 'gamma_D', 'mu_D',
'theta_E', 'theta_I', 'psi_E', 'psi_I', 'q']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if(param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param])!=numCheckpoints):
checkpoints[param] = [getattr(self, param)]*numCheckpoints
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(not checkpoints):
self.run_epoch(runtime=self.tmax, dt=dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime-self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if(self.t < self.tmax):
self.run_epoch(runtime=self.tmax-self.t, dt=dt)
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.N if plot_percentages else self.numF
Eseries = self.numE/self.N if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.N if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.N if plot_percentages else self.numD_I
Iseries = self.numI/self.N if plot_percentages else self.numI
Rseries = self.numR/self.N if plot_percentages else self.numR
Sseries = self.numS/self.N if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.N/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.N/100)] / (self.N if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.N if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_E=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_E=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'gamma':gamma, 'xi':xi, 'mu_I':mu_I, 'mu_0':mu_0, 'nu':nu,
'beta_D':beta_D, 'sigma_D':sigma_D, 'gamma_D':gamma_D, 'mu_D':mu_D,
'beta_local':beta_local, 'beta_D_local':beta_D_local, 'p':p,'q':q,
'theta_E':theta_E, 'theta_I':theta_I, 'phi_E':phi_E, 'phi_I':phi_I, 'psi_E':phi_E, 'psi_I':psi_I }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_I = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array([self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0]) + [self.I]*int(self.numI[0]) + [self.D_E]*int(self.numD_E[0]) + [self.D_I]*int(self.numD_I[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoI': {'currentState':self.E, 'newState':self.I},
'ItoR': {'currentState':self.I, 'newState':self.R},
'ItoF': {'currentState':self.I, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'ItoDI': {'currentState':self.I, 'newState':self.D_I},
'DEtoDI': {'currentState':self.D_E, 'newState':self.D_I},
'DItoR': {'currentState':self.D_I, 'newState':self.R},
'DItoF': {'currentState':self.D_I, 'newState':self.F},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_I'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][0] = self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
import time
updatestart = time.time()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.xi = numpy.array(self.parameters['xi']).reshape((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_I = numpy.array(self.parameters['mu_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_I'], shape=(self.numNodes,1))
self.mu_0 = numpy.array(self.parameters['mu_0']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = numpy.array(self.parameters['nu']).reshape((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.p = numpy.array(self.parameters['p']).reshape((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (numpy.array(self.parameters['beta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (numpy.array(self.parameters['sigma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.gamma_D = (numpy.array(self.parameters['gamma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D'], shape=(self.numNodes,1))) if self.parameters['gamma_D'] is not None else self.gamma
self.mu_D = (numpy.array(self.parameters['mu_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_D'], shape=(self.numNodes,1))) if self.parameters['mu_D'] is not None else self.mu_I
self.theta_E = numpy.array(self.parameters['theta_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_I = numpy.array(self.parameters['theta_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_I'], shape=(self.numNodes,1))
self.phi_E = numpy.array(self.parameters['phi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_I = numpy.array(self.parameters['phi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_I'], shape=(self.numNodes,1))
self.psi_E = numpy.array(self.parameters['psi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_E'], shape=(self.numNodes,1))
self.psi_I = numpy.array(self.parameters['psi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_I'], shape=(self.numNodes,1))
self.q = numpy.array(self.parameters['q']).reshape((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = numpy.array(self.parameters['beta_local'])
else: # is numpy.ndarray
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.reshape((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = numpy.array(self.parameters['beta_D_local'])
else: # is numpy.ndarray
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.reshape((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, numpy.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes,1) # sums of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert(self.numNodes == self.numNodes_Q), "The normal and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (numpy.any(self.psi_I) and (numpy.any(self.theta_I) or numpy.any(self.phi_I)))
or (numpy.any(self.psi_E) and (numpy.any(self.theta_E) or numpy.any(self.phi_E))) )
self.tracing_scenario = ( (numpy.any(self.psi_E) and numpy.any(self.phi_E))
or (numpy.any(self.psi_I) and numpy.any(self.phi_I)) )
self.vitality_scenario = (numpy.any(self.mu_0) and numpy.any(self.nu))
self.resusceptibility_scenario = (numpy.any(self.xi))
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def calc_propensities(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
transmissionTerms_I = numpy.zeros(shape=(self.numNodes,1))
if(numpy.any(self.numI[self.tidx])
and numpy.any(self.beta!=0)):
transmissionTerms_I = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_beta, self.X==self.I) )
transmissionTerms_DI = numpy.zeros(shape=(self.numNodes,1))
if(self.testing_scenario
and numpy.any(self.numD_I[self.tidx])
and numpy.any(self.beta_D)):
transmissionTerms_DI = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_Q_beta_D, self.X==self.D_I) )
numContacts_D = numpy.zeros(shape=(self.numNodes,1))
if(self.tracing_scenario
and (numpy.any(self.numD_E[self.tidx]) or numpy.any(self.numD_I[self.tidx]))):
numContacts_D = numpy.asarray( scipy.sparse.csr_matrix.dot( self.A, ((self.X==self.D_E)|(self.X==self.D_I)) ) )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = ( self.p*((self.beta*self.numI[self.tidx] + self.q*self.beta_D*self.numD_I[self.tidx])/self.N[self.tidx])
+ (1-self.p)*numpy.divide((transmissionTerms_I + transmissionTerms_DI), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0)
)*(self.X==self.S)
propensities_EtoI = self.sigma*(self.X==self.E)
propensities_ItoR = self.gamma*(self.X==self.I)
propensities_ItoF = self.mu_I*(self.X==self.I)
# propensities_EtoDE = ( self.theta_E + numpy.divide((self.phi_E*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_E*(self.X==self.E)
propensities_EtoDE = (self.theta_E + self.phi_E*numContacts_D)*self.psi_E*(self.X==self.E)
# propensities_ItoDI = ( self.theta_I + numpy.divide((self.phi_I*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_I*(self.X==self.I)
propensities_ItoDI = (self.theta_I + self.phi_I*numContacts_D)*self.psi_I*(self.X==self.I)
propensities_DEtoDI = self.sigma_D*(self.X==self.D_E)
propensities_DItoR = self.gamma_D*(self.X==self.D_I)
propensities_DItoF = self.mu_D*(self.X==self.D_I)
propensities_RtoS = self.xi*(self.X==self.R)
propensities__toS = self.nu*(self.X!=self.F)
propensities = numpy.hstack([propensities_StoE, propensities_EtoI,
propensities_ItoR, propensities_ItoF,
propensities_EtoDE, propensities_ItoDI, propensities_DEtoDI,
propensities_DItoR, propensities_DItoF,
propensities_RtoS, propensities__toS])
columns = ['StoE', 'EtoI', 'ItoR', 'ItoF', 'EtoDE', 'ItoDI', 'DEtoDI', 'DItoR', 'DItoF', 'RtoS', '_toS']
return propensities, columns
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def increase_data_series_length(self):
self.tseries= numpy.pad(self.tseries, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numS = numpy.pad(self.numS, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numE = numpy.pad(self.numE, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI = numpy.pad(self.numI, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_E = numpy.pad(self.numD_E, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_I = numpy.pad(self.numD_I, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numR = numpy.pad(self.numR, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numF = numpy.pad(self.numF, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.N = numpy.pad(self.N, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
if(self.store_Xseries):
self.Xseries = numpy.pad(self.Xseries, [(0, 5*self.numNodes), (0,0)], mode=constant, constant_values=0)
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.pad(self.nodeGroupData[groupName]['numS'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numE'] = numpy.pad(self.nodeGroupData[groupName]['numE'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI'] = numpy.pad(self.nodeGroupData[groupName]['numI'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_E'] = numpy.pad(self.nodeGroupData[groupName]['numD_E'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_I'] = numpy.pad(self.nodeGroupData[groupName]['numD_I'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numR'] = numpy.pad(self.nodeGroupData[groupName]['numR'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numF'] = numpy.pad(self.nodeGroupData[groupName]['numF'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['N'] = numpy.pad(self.nodeGroupData[groupName]['N'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def finalize_data_series(self):
self.tseries= numpy.array(self.tseries, dtype=float)[:self.tidx+1]
self.numS = numpy.array(self.numS, dtype=float)[:self.tidx+1]
self.numE = numpy.array(self.numE, dtype=float)[:self.tidx+1]
self.numI = numpy.array(self.numI, dtype=float)[:self.tidx+1]
self.numD_E = numpy.array(self.numD_E, dtype=float)[:self.tidx+1]
self.numD_I = numpy.array(self.numD_I, dtype=float)[:self.tidx+1]
self.numR = numpy.array(self.numR, dtype=float)[:self.tidx+1]
self.numF = numpy.array(self.numF, dtype=float)[:self.tidx+1]
self.N = numpy.array(self.N, dtype=float)[:self.tidx+1]
if(self.store_Xseries):
self.Xseries = self.Xseries[:self.tidx+1, :]
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.array(self.nodeGroupData[groupName]['numS'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numE'] = numpy.array(self.nodeGroupData[groupName]['numE'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI'] = numpy.array(self.nodeGroupData[groupName]['numI'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_E'] = numpy.array(self.nodeGroupData[groupName]['numD_E'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_I'] = numpy.array(self.nodeGroupData[groupName]['numD_I'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numR'] = numpy.array(self.nodeGroupData[groupName]['numR'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numF'] = numpy.array(self.nodeGroupData[groupName]['numF'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['N'] = numpy.array(self.nodeGroupData[groupName]['N'], dtype=float)[:self.tidx+1]
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_iteration(self):
if(self.tidx >= len(self.tseries)-1):
# Room has run out in the timeseries storage arrays; double the size of these arrays:
self.increase_data_series_length()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. Generate 2 random numbers uniformly distributed in (0,1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
r1 = numpy.random.rand()
r2 = numpy.random.rand()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2. Calculate propensities
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities, transitionTypes = self.calc_propensities()
# Terminate when probability of all events is 0:
if(propensities.sum() <= 0.0):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 3. Calculate alpha
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_flat = propensities.ravel(order='F')
cumsum = propensities_flat.cumsum()
alpha = propensities_flat.sum()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 4. Compute the time until the next event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tau = (1/alpha)*numpy.log(float(1/r1))
self.t += tau
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 5. Compute which event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
transitionIdx = numpy.searchsorted(cumsum,r2*alpha)
transitionNode = transitionIdx % self.numNodes
transitionType = transitionTypes[ int(transitionIdx/self.numNodes) ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 6. Update node states and data series
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
assert(self.X[transitionNode] == self.transitions[transitionType]['currentState'] and self.X[transitionNode]!=self.F), "Assertion error: Node "+str(transitionNode)+" has unexpected current state "+str(self.X[transitionNode])+" given the intended transition of "+str(transitionType)+"."
self.X[transitionNode] = self.transitions[transitionType]['newState']
self.tidx += 1
self.tseries[self.tidx] = self.t
self.numS[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.S), a_min=0, a_max=self.numNodes)
self.numE[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.E), a_min=0, a_max=self.numNodes)
self.numI[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.I), a_min=0, a_max=self.numNodes)
self.numD_E[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_E), a_min=0, a_max=self.numNodes)
self.numD_I[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_I), a_min=0, a_max=self.numNodes)
self.numR[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.R), a_min=0, a_max=self.numNodes)
self.numF[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.F), a_min=0, a_max=self.numNodes)
self.N[self.tidx] = numpy.clip((self.numS[self.tidx] + self.numE[self.tidx] + self.numI[self.tidx] + self.numD_E[self.tidx] + self.numD_I[self.tidx] + self.numR[self.tidx]), a_min=0, a_max=self.numNodes)
if(self.store_Xseries):
self.Xseries[self.tidx,:] = self.X.T
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][self.tidx] = numpy.clip((self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]), a_min=0, a_max=self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Terminate if tmax reached or num infectious and num exposed is 0:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(self.t >= self.tmax or (self.numI[self.tidx]<1 and self.numE[self.tidx]<1 and self.numD_E[self.tidx]<1 and self.numD_I[self.tidx]<1)):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, checkpoints=None, print_interval=10, verbose='t'):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
for chkpt_param, chkpt_values in checkpoints.items():
assert(isinstance(chkpt_values, (list, numpy.ndarray)) and len(chkpt_values)==numCheckpoints), "Expecting a list of values with length equal to number of checkpoint times ("+str(numCheckpoints)+") for each checkpoint parameter."
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
print_reset = True
running = True
while running:
running = self.run_iteration()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Handle checkpoints if applicable:
if(checkpoints):
if(self.t >= checkpointTime):
if(verbose is not False):
print("[Checkpoint: Updating parameters]")
# A checkpoint has been reached, update param values:
if('G' in list(checkpoints.keys())):
self.update_G(checkpoints['G'][checkpointIdx])
if('Q' in list(checkpoints.keys())):
self.update_Q(checkpoints['Q'][checkpointIdx])
for param in list(self.parameters.keys()):
if(param in list(checkpoints.keys())):
self.parameters.update({param: checkpoints[param][checkpointIdx]})
# Update parameter data structures and scenario flags:
self.update_parameters()
# Update the next checkpoint time:
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(print_interval):
if(print_reset and (int(self.t) % print_interval == 0)):
if(verbose=="t"):
print("t = %.2f" % self.t)
if(verbose==True):
print("t = %.2f" % self.t)
print("\t S = " + str(self.numS[self.tidx]))
print("\t E = " + str(self.numE[self.tidx]))
print("\t I = " + str(self.numI[self.tidx]))
print("\t D_E = " + str(self.numD_E[self.tidx]))
print("\t D_I = " + str(self.numD_I[self.tidx]))
print("\t R = " + str(self.numR[self.tidx]))
print("\t F = " + str(self.numF[self.tidx]))
print_reset = False
elif(not print_reset and (int(self.t) % 10 != 0)):
print_reset = True
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.numNodes if plot_percentages else self.numF
Eseries = self.numE/self.numNodes if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.numNodes if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.numNodes if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.numNodes if plot_percentages else self.numD_I
Iseries = self.numI/self.numNodes if plot_percentages else self.numI
Rseries = self.numR/self.numNodes if plot_percentages else self.numR
Sseries = self.numS/self.numNodes if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.numNodes/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.numNodes/100)] / (self.numNodes if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.numNodes if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_I=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_I=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SymptomaticSEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
with Symptom Presentation Compartments
===================================================
Params:
G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (global interactions)
beta_local Rate(s) of transmission between adjacent individuals (optional)
beta_A Rate of transmission (global interactions)
beta_A_local Rate(s) of transmission between adjacent individuals (optional)
sigma Rate of progression to infectious state (inverse of latent period)
lamda Rate of progression to infectious (a)symptomatic state (inverse of prodromal period)
eta Rate of progression to hospitalized state (inverse of onset-to-admission period)
gamma Rate of recovery for non-hospitalized symptomatic individuals (inverse of symptomatic infectious period)
gamma_A Rate of recovery for asymptomatic individuals (inverse of asymptomatic infectious period)
gamma_H Rate of recovery for hospitalized symptomatic individuals (inverse of hospitalized infectious period)
mu_H Rate of death for hospitalized individuals (inverse of admission-to-death period)
xi Rate of re-susceptibility (upon recovery)
mu_0 Rate of baseline death
nu Rate of baseline birth
a Probability of an infected individual remaining asymptomatic
h Probability of a symptomatic individual being hospitalized
f Probability of death for hospitalized individuals (case fatality rate)
p Probability of individuals interacting with global population
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission for individuals with detected infections (global interactions)
beta_D_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of progression to infectious state for individuals with detected infections
lamda_D Rate of progression to infectious (a)symptomatic state for individuals with detected infections
eta_D Rate of progression to hospitalized state for individuals with detected infections
gamma_D_S Rate of recovery for non-hospitalized symptomatic individuals for individuals with detected infections
gamma_D_A Rate of recovery for asymptomatic individuals for individuals with detected infections
theta_E Rate of random testing for exposed individuals
theta_pre Rate of random testing for infectious pre-symptomatic individuals
theta_S Rate of random testing for infectious symptomatic individuals
theta_A Rate of random testing for infectious asymptomatic individuals
phi_E Rate of testing when a close contact has tested positive for exposed individuals
phi_pre Rate of testing when a close contact has tested positive for infectious pre-symptomatic individuals
phi_S Rate of testing when a close contact has tested positive for infectious symptomatic individuals
phi_A Rate of testing when a close contact has tested positive for infectious asymptomatic individuals
d_E Probability of positive test for exposed individuals
d_pre Probability of positive test for infectious pre-symptomatic individuals
d_S Probability of positive test for infectious symptomatic individuals
d_A Probability of positive test for infectious asymptomatic individuals
q Probability of individuals with detected infection interacting with global population
initE Initial number of exposed individuals
initI_pre Initial number of infectious pre-symptomatic individuals
initI_S Initial number of infectious symptomatic individuals
initI_A Initial number of infectious asymptomatic individuals
initH Initial number of hospitalized individuals
initR Initial number of recovered individuals
initF Initial number of infection-related fatalities
initD_E Initial number of detected exposed individuals
initD_pre Initial number of detected infectious pre-symptomatic individuals
initD_S Initial number of detected infectious symptomatic individuals
initD_A Initial number of detected infectious asymptomatic individuals
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, lamda, gamma,
eta=0, gamma_A=None, gamma_H=None, mu_H=0, xi=0, mu_0=0, nu=0, a=0, h=0, f=0, p=0,
beta_local=None, beta_A=None, beta_A_local=None,
Q=None, lamda_D=None, beta_D=None, beta_D_local=None, sigma_D=None, eta_D=None, gamma_D_S=None, gamma_D_A=None,
theta_E=0, theta_pre=0, theta_S=0, theta_A=0, phi_E=0, phi_pre=0, phi_S=0, phi_A=0,
d_E=1, d_pre=1, d_S=1, d_A=1, q=0,
initE=0, initI_pre=0, initI_S=0, initI_A=0, initH=0, initR=0, initF=0,
initD_E=0, initD_pre=0, initD_S=0, initD_A=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'lamda':lamda, 'gamma':gamma,
'eta':eta, 'gamma_A':gamma_A, 'gamma_H':gamma_H, 'mu_H':mu_H,
'xi':xi, 'mu_0':mu_0, 'nu':nu, 'a':a, 'h':h, 'f':f, 'p':p,
'beta_local':beta_local, 'beta_A':beta_A, 'beta_A_local':beta_A_local,
'lamda_D':lamda_D, 'beta_D':beta_D, 'beta_D_local':beta_D_local, 'sigma_D':sigma_D,
'eta_D':eta_D, 'gamma_D_S':gamma_D_S, 'gamma_D_A':gamma_D_A,
'theta_E':theta_E, 'theta_pre':theta_pre, 'theta_S':theta_S, 'theta_A':theta_A,
'phi_E':phi_E, 'phi_pre':phi_pre, 'phi_S':phi_S, 'phi_A':phi_A,
'd_E':d_E, 'd_pre':d_pre, 'd_S':d_S, 'd_A':d_A, 'q':q,
'initE':initE, 'initI_pre':initI_pre, 'initI_S':initI_S, 'initI_A':initI_A,
'initH':initH, 'initR':initR, 'initF':initF,
'initD_E':initD_E, 'initD_pre':initD_pre, 'initD_S':initD_S, 'initD_A':initD_A }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo 4-6 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*6 events/timesteps expected; initialize numNodes*6 timestep slots to start
# (will be expanded during run if needed for some reason)
self.tseries = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI_pre = numpy.zeros(5*self.numNodes)
self.numI_S = numpy.zeros(5*self.numNodes)
self.numI_A = numpy.zeros(5*self.numNodes)
self.numH = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_pre = numpy.zeros(5*self.numNodes)
self.numD_S = numpy.zeros(5*self.numNodes)
self.numD_A = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI_pre[0] = int(initI_pre)
self.numI_S[0] = int(initI_S)
self.numI_A[0] = int(initI_A)
self.numH[0] = int(initH)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numD_E[0] = int(initD_E)
self.numD_pre[0] = int(initD_pre)
self.numD_S[0] = int(initD_S)
self.numD_A[0] = int(initD_A)
self.numS[0] = (self.numNodes - self.numE[0] - self.numI_pre[0] - self.numI_S[0] - self.numI_A[0] - self.numH[0] - self.numR[0]
- self.numD_E[0] - self.numD_pre[0] - self.numD_S[0] - self.numD_A[0] - self.numF[0])
self.N[0] = self.numNodes - self.numF[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I_pre = 3
self.I_S = 4
self.I_A = 5
self.H = 6
self.R = 7
self.F = 8
self.D_E = 9
self.D_pre = 10
self.D_S = 11
self.D_A = 12
self.X = numpy.array( [self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0])
+ [self.I_pre]*int(self.numI_pre[0]) + [self.I_S]*int(self.numI_S[0]) + [self.I_A]*int(self.numI_A[0])
+ [self.H]*int(self.numH[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])
+ [self.D_E]*int(self.numD_E[0]) + [self.D_pre]*int(self.numD_pre[0]) + [self.D_S]*int(self.numD_S[0]) + [self.D_A]*int(self.numD_A[0])
).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoIPRE': {'currentState':self.E, 'newState':self.I_pre},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'IPREtoIS': {'currentState':self.I_pre, 'newState':self.I_S},
'IPREtoIA': {'currentState':self.I_pre, 'newState':self.I_A},
'IPREtoDPRE': {'currentState':self.I_pre, 'newState':self.D_pre},
'IStoH': {'currentState':self.I_S, 'newState':self.H},
'IStoR': {'currentState':self.I_S, 'newState':self.R},
'IStoDS': {'currentState':self.I_S, 'newState':self.D_S},
'IAtoR': {'currentState':self.I_A, 'newState':self.R},
'IAtoDA': {'currentState':self.I_A, 'newState':self.D_A},
'HtoR': {'currentState':self.H, 'newState':self.R},
'HtoF': {'currentState':self.H, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'DEtoDPRE': {'currentState':self.D_E, 'newState':self.D_pre},
'DPREtoDS': {'currentState':self.D_pre, 'newState':self.D_S},
'DPREtoDA': {'currentState':self.D_pre, 'newState':self.D_A},
'DStoH': {'currentState':self.D_S, 'newState':self.H},
'DStoR': {'currentState':self.D_S, 'newState':self.R},
'DAtoR': {'currentState':self.D_A, 'newState':self.R},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_pre'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_S'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_A'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numH'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_pre'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_S'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_A'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI_pre'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_pre)
self.nodeGroupData[groupName]['numI_S'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_S)
self.nodeGroupData[groupName]['numI_A'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_A)
self.nodeGroupData[groupName]['numH'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.H)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_pre'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_pre)
self.nodeGroupData[groupName]['numD_I_S'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_S)
self.nodeGroupData[groupName]['numD_I_A'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_A)
self.nodeGroupData[groupName]['N'][0] = self.numNodes - self.numF[0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.beta_A = (numpy.array(self.parameters['beta_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_A'], shape=(self.numNodes,1))) if self.parameters['beta_A'] is not None else self.beta
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.lamda = numpy.array(self.parameters['lamda']).reshape((self.numNodes, 1)) if isinstance(self.parameters['lamda'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['lamda'], shape=(self.numNodes,1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.eta = numpy.array(self.parameters['eta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['eta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['eta'], shape=(self.numNodes,1))
self.gamma_A = (numpy.array(self.parameters['gamma_A']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_A'], shape=(self.numNodes,1))) if self.parameters['gamma_A'] is not None else self.gamma
self.gamma_H = (numpy.array(self.parameters['gamma_H']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_H'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_H'], shape=(self.numNodes,1))) if self.parameters['gamma_H'] is not None else self.gamma
self.mu_H = numpy.array(self.parameters['mu_H']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_H'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_H'], shape=(self.numNodes,1))
self.xi = numpy.array(self.parameters['xi']).reshape((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_0 = numpy.array(self.parameters['mu_0']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = numpy.array(self.parameters['nu']).reshape((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.a = numpy.array(self.parameters['a']).reshape((self.numNodes, 1)) if isinstance(self.parameters['a'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['a'], shape=(self.numNodes,1))
self.h = numpy.array(self.parameters['h']).reshape((self.numNodes, 1)) if isinstance(self.parameters['h'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['h'], shape=(self.numNodes,1))
self.f = numpy.array(self.parameters['f']).reshape((self.numNodes, 1)) if isinstance(self.parameters['f'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['f'], shape=(self.numNodes,1))
self.p = numpy.array(self.parameters['p']).reshape((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (numpy.array(self.parameters['beta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (numpy.array(self.parameters['sigma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.lamda_D = (numpy.array(self.parameters['lamda_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['lamda_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['lamda_D'], shape=(self.numNodes,1))) if self.parameters['lamda_D'] is not None else self.lamda
self.gamma_D_S = (numpy.array(self.parameters['gamma_D_S']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_D_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D_S'], shape=(self.numNodes,1))) if self.parameters['gamma_D_S'] is not None else self.gamma
self.gamma_D_A = (numpy.array(self.parameters['gamma_D_A']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_D_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D_A'], shape=(self.numNodes,1))) if self.parameters['gamma_D_A'] is not None else self.gamma
self.eta_D = (numpy.array(self.parameters['eta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['eta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['eta_D'], shape=(self.numNodes,1))) if self.parameters['eta_D'] is not None else self.eta
self.theta_E = numpy.array(self.parameters['theta_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_pre = numpy.array(self.parameters['theta_pre']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_pre'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_pre'], shape=(self.numNodes,1))
self.theta_S = numpy.array(self.parameters['theta_S']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_S'], shape=(self.numNodes,1))
self.theta_A = numpy.array(self.parameters['theta_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_A'], shape=(self.numNodes,1))
self.phi_E = numpy.array(self.parameters['phi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_pre = numpy.array(self.parameters['phi_pre']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_pre'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_pre'], shape=(self.numNodes,1))
self.phi_S = numpy.array(self.parameters['phi_S']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_S'], shape=(self.numNodes,1))
self.phi_A = numpy.array(self.parameters['phi_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_A'], shape=(self.numNodes,1))
self.d_E = numpy.array(self.parameters['d_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_E'], shape=(self.numNodes,1))
self.d_pre = numpy.array(self.parameters['d_pre']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_pre'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_pre'], shape=(self.numNodes,1))
self.d_S = numpy.array(self.parameters['d_S']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_S'], shape=(self.numNodes,1))
self.d_A = numpy.array(self.parameters['d_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_A'], shape=(self.numNodes,1))
self.q = numpy.array(self.parameters['q']).reshape((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = numpy.array(self.parameters['beta_local'])
else: # is numpy.ndarray
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.reshape((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_A_local'] is not None):
if(isinstance(self.parameters['beta_A_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_A_local'], list)):
self.beta_A_local = numpy.array(self.parameters['beta_A_local'])
else: # is numpy.ndarray
self.beta_A_local = self.parameters['beta_A_local']
if(self.beta_A_local.ndim == 1):
self.beta_A_local.reshape((self.numNodes, 1))
elif(self.beta_A_local.ndim == 2):
self.beta_A_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_A_local = numpy.full_like(self.beta_A, fill_value=self.parameters['beta_A_local'])
else:
self.beta_A_local = self.beta_A
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = numpy.array(self.parameters['beta_D_local'])
else: # is numpy.ndarray
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.reshape((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_A values by the adjacency matrix ("transmission weight connections")
if(self.beta_A_local.ndim == 1):
self.A_beta_A = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_A_local, (1,self.numNodes))).tocsr()
elif(self.beta_A_local.ndim == 2):
self.A_beta_A = scipy.sparse.csr_matrix.multiply(self.A, self.beta_A_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, numpy.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes,1) # sums of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert(self.numNodes == self.numNodes_Q), "The normal and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (numpy.any(self.d_E) and (numpy.any(self.theta_E) or numpy.any(self.phi_E)))
or (numpy.any(self.d_pre) and (numpy.any(self.theta_pre) or numpy.any(self.phi_pre)))
or (numpy.any(self.d_S) and (numpy.any(self.theta_S) or numpy.any(self.phi_S)))
or (numpy.any(self.d_A) and (numpy.any(self.theta_A) or numpy.any(self.phi_A))) )
self.tracing_scenario = ( ( | numpy.any(self.d_E) | numpy.any |
import numpy as np
import pandas as pd
newlist = []
import os
from os import walk
from random import shuffle
import random
import numpy as np
from sklearn import preprocessing
import pandas as pd
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
from sklearn.metrics import accuracy_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout,concatenate,Input,Conv1D, MaxPooling1D,Flatten
import pandas as pd
from keras.models import Model
import numpy as np
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split, KFold
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import train_test_split,KFold
# The GPU id to use
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="3"
#Files of CSV we get from preprocessing
def gettingfilenames(path):
f= []
for (dirpath,dirnames,filenames) in walk(path):
f.extend(filenames)
return f
def meanvalue(array):
horizantal = [sum(c) for c in zip(*array)]
horizantal1 = [float(pixel / len(array)) for pixel in horizantal]
horizantal1 = np.array(horizantal1)
return horizantal1
def model():
input_img = Input(shape=(100, 100, 1))
model1 = Conv2D(50, kernel_size=(3, 3), activation='tanh', dilation_rate=(2, 2), padding='valid')(input_img)
model1 = MaxPooling2D(pool_size=(2, 2))(model1)
model1 = Dropout(0.1)(model1)
model1 = Conv2D(50, kernel_size=(3, 3), activation='tanh', dilation_rate=(2, 2), padding='valid')(model1)
model1 = MaxPooling2D(pool_size=(2, 2))(model1)
model1 = Dropout(0.1)(model1)
model1 = Conv2D(50, kernel_size=(3, 3), activation='tanh', dilation_rate=(2, 2), padding='valid')(model1)
model1 = MaxPooling2D(pool_size=(2, 2))(model1)
model1 = Dropout(0.1)(model1)
outmodelf = Flatten()(model1)
model = Dense(units=50, activation='tanh', input_dim=50, kernel_initializer='uniform')(outmodelf)
model = Dense(units=5, activation='softmax', kernel_initializer='uniform')(model)
model = Model(input_img, model)
return model
files2 = gettingfilenames("text")
files1 = gettingfilenames("img")
files3 = gettingfilenames("table")
files4 = gettingfilenames("math")
files5 = gettingfilenames("lined")
##Data Balancing
newlist1 = []
newlist2 = []
newlist3 = []
newlist4 = []
newlist5 = []
leng = []
for g in range(0,len(files1)):
df = pd.read_csv("img/%s" % str(files1[g]))
print("img/%s" % str(files1[g]))
x1 = df.values
newlist1.extend(x1)
for g1 in range(0,len(files2)):
df1 = pd.read_csv("text/%s" % str(files2[g1]))
print("text/%s" % str(files2[g1]))
x2 = df1.values
newlist2.extend(x2)
for g2 in range(0,len(files3)):
df2 = pd.read_csv("table/%s" % str(files3[g2]))
print("table/%s" % str(files3[g2]))
x3 = df2.values
newlist3.extend(x3)
for g3 in range(0,len(files4)):
df3 = pd.read_csv("math/%s" % str(files4[g3]))
print("math/%s" % str(files4[g3]))
x4 = df3.values
newlist4.extend(x4)
for g4 in range(0,len(files5)):
df4 = pd.read_csv("lined/%s"%str(files5[g4]))
print("lined/%s" % str(files5[g4]))
x5 = df4.values
newlist5.extend(x5)
leng.append((len(newlist1),len(newlist2),len(newlist3),len(newlist4),len(newlist5)))
leng = sorted(leng[0])
print(leng[0])
newlist1n = random.sample(newlist1,leng[0])
newlist2n = random.sample(newlist2,leng[0])
newlist3n = random.sample(newlist3,leng[0])
newlist4n = random.sample(newlist4,leng[0])
newlist5n = random.sample(newlist5,leng[0])
newlist = newlist1n+newlist2n+newlist3n+newlist4n+newlist5n
newlist = random.sample(newlist, len(newlist))
X = []
Y = []
# Reshaping into 100 * 100 Boxes
for t in range(len(newlist)):
x2 = np.reshape(newlist[t][1:-1], (-1, 100))
y = newlist[t][-1]
X.append(x2)
Y.append(y)
# print("X abnd y done")
acc = []
kf = KFold(n_splits = 5)
train_index_list = []
test_index_list = []
print ("preprocessing done")
results = []
X = np.array(X)
Y = np.array(Y)
lb = preprocessing.LabelBinarizer()
Y = lb.fit_transform(Y)
FPR = []
TPR = []
F1score = []
precision = []
recall = []
index_train_l = []
index_test_l = []
for train_index, test_index in kf.split(X):
index_train_l.append(train_index)
index_test_l.append(test_index)
labelpredict = []
labeltest = []
for q in range(len(index_test_l)):
# for train_index,test_index in kf.split(X):
labelpredict = []
labeltest = []
X = np.array(X)
Y = np.array(Y)
X_train, X_test = X[index_train_l[q]], X[index_test_l[q]]
y_train, y_test = Y[index_train_l[q]], Y[index_test_l[q]]
model = Model()
SGD = optimizers.SGD(lr=0.1)
model.compile(optimizer=SGD, loss='mean_squared_error', metrics=['accuracy'])
X1_test = | np.expand_dims(X_test, axis=3) | numpy.expand_dims |
# This file is part of DagAmendment, the reference implementation of:
#
# <NAME> and <NAME> (2021).
# DAG Amendment for Inverse Control of Parametric Shapes
# ACM Transactions on Graphics (Proc. SIGGRAPH 2021), 173:1-173:14.
#
# Copyright (c) 2020-2021 -- Télécom Paris (<NAME> <<EMAIL>>)
#
# The MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided “as is”, without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and non-infringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the Software.
# no bpy here
from ..props import IntProperty, FloatProperty, BoolProperty
import numpy as np
from numpy.linalg import norm
from .AbstractJFilter import AbstractJFilter
class NextJFilter(AbstractJFilter):
"""
Experiments toward the next jfilter
"""
diffparam_label = "Next JFilter"
diffparam_default = True
extra_radius: IntProperty(
name = "Extra Radius",
description = "Offset added to the radius to define where to sample points of negative influence",
default = 40,
)
contrast_dropout_threshold: FloatProperty(
name = "Contrast Dropout Threshold",
description = "Ratio to the best contrast (between inner and outer radii) bellow which a parameter's influence is ignored",
default = 0.75,
min = 0.0,
max = 1.0,
)
variation_dropout_threshold: FloatProperty(
name = "Variation Dropout Threshold",
description = "Ratio to the least coefficient of variation beyond which the parameter is considered too noisy and hence ignored",
default = 5.0,
min = 1.0,
)
def __init__(self):
self.min_least_variation = 1e-2
def reduce_jacobian(self, brush_radius, sample_points):
# The value exposed to the user is the inverse of lambda_v (more intuitive)
lambda_v = 1 / self.variation_dropout_threshold
lambda_c = self.contrast_dropout_threshold
inside_brush_mask = | norm(sample_points.ss_offsets, ord=2, axis=1) | numpy.linalg.norm |
from __future__ import print_function, division
import torch
import os
from os.path import exists, join, basename
from skimage import io
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from geotnf.transformation import GeometricTnf
from torch.autograd import Variable
from geotnf.transformation import homography_mat_from_4_pts
class SynthDataset(Dataset):
"""
Synthetically transformed pairs dataset for training with strong supervision
Args:
csv_file (string): Path to the csv file with image names and transformations.
training_image_path (string): Directory with all the images.
transform (callable): Transformation for post-processing the training pair (eg. image normalization)
Returns:
Dict: {'image': full dataset image, 'theta': desired transformation}
"""
def __init__(self,
dataset_csv_path,
dataset_csv_file,
dataset_image_path,
output_size=(480,640),
geometric_model='affine',
dataset_size=0,
transform=None,
random_sample=False,
random_t=0.5,
random_s=0.5,
random_alpha=1/6,
random_t_tps=0.4,
four_point_hom=True):
self.out_h, self.out_w = output_size
# read csv file
self.train_data = pd.read_csv(os.path.join(dataset_csv_path,dataset_csv_file))
self.random_sample = random_sample
self.random_t = random_t
self.random_t_tps = random_t_tps
self.random_alpha = random_alpha
self.random_s = random_s
self.four_point_hom = four_point_hom
self.dataset_size = dataset_size
if dataset_size!=0:
dataset_size = min((dataset_size,len(self.train_data)))
self.train_data = self.train_data.iloc[0:dataset_size,:]
self.img_names = self.train_data.iloc[:,0]
if self.random_sample==False:
self.theta_array = self.train_data.iloc[:, 1:].values().astype('float')
# copy arguments
self.dataset_image_path = dataset_image_path
self.transform = transform
self.geometric_model = geometric_model
self.affineTnf = GeometricTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False)
def __len__(self):
return len(self.train_data)
def __getitem__(self, idx):
if self.random_sample and self.dataset_size==1:
np.random.seed(1) # for debugging purposes
# read image
img_name = os.path.join(self.dataset_image_path, self.img_names[idx])
image = io.imread(img_name)
# read theta
if self.random_sample==False:
theta = self.theta_array[idx, :]
if self.geometric_model=='affine':
# reshape theta to 2x3 matrix [A|t] where
# first row corresponds to X and second to Y
# theta = theta[[0,1,4,2,3,5]].reshape(2,3)
theta = theta[[3,2,5,1,0,4]] #.reshape(2,3)
if self.geometric_model=='tps':
theta = np.expand_dims(np.expand_dims(theta,1),2)
if self.geometric_model=='afftps':
theta[[0,1,2,3,4,5]] = theta[[3,2,5,1,0,4]]
else:
if self.geometric_model=='affine' or self.geometric_model=='afftps':
rot_angle = (np.random.rand(1)-0.5)*2*np.pi/12; # between -np.pi/12 and np.pi/12
sh_angle = (np.random.rand(1)-0.5)*2*np.pi/6; # between -np.pi/6 and np.pi/6
lambda_1 = 1+(2*np.random.rand(1)-1)*0.25; # between 0.75 and 1.25
lambda_2 = 1+(2*np.random.rand(1)-1)*0.25; # between 0.75 and 1.25
tx=(2*np.random.rand(1)-1)*0.25; # between -0.25 and 0.25
ty=(2*np.random.rand(1)-1)*0.25;
R_sh = np.array([[ | np.cos(sh_angle[0]) | numpy.cos |
import unittest
import numpy as np
from UncertainSCI.families import JacobiPolynomials
class JacobiTestCase(unittest.TestCase):
"""
Performs basic tests for univariate Jacobi polynomials
"""
def setUp(self):
self.longMessage = True
# """
# Evaluation of orthogonal polynomials.
# """
def test_ratio(self):
""" Evaluation of orthogonal polynomial ratios. """
alpha = -1. + 10*np.random.rand(1)[0]
beta = -1. + 10*np.random.rand(1)[0]
J = JacobiPolynomials(alpha=alpha, beta=beta)
N = int(np.ceil(60*np.random.rand(1)))
x = (1 + 5*np.random.rand(1)) * (1 + np.random.rand(50))
y = (1 + 5*np.random.rand(1)) * (-1 - np.random.rand(50))
x = np.concatenate([x, y])
P = J.eval(x, range(N+1))
rdirect = np.zeros([x.size, N+1])
rdirect[:, 0] = P[:, 0]
rdirect[:, 1:] = P[:, 1:]/P[:, :-1]
r = J.r_eval(x, range(N+1))
delta = 1e-6
errs = np.abs(r-rdirect)
i, j = np.where(errs > delta)[:2]
if i.size > 0:
errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, n={2:d}, x={3:1.6f}'.format(alpha, beta, j[0], x[i[0]])
else:
errstr = ''
self.assertAlmostEqual(np.linalg.norm(errs, ord=np.inf), 0, delta=delta, msg=errstr)
def test_gq(self):
"""Gaussian quadrature integration accuracy"""
alpha = -1. + 10*np.random.rand(1)[0]
beta = -1. + 10*np.random.rand(1)[0]
J = JacobiPolynomials(alpha=alpha, beta=beta)
N = int(np.ceil(60*np.random.rand(1)))
x, w = J.gauss_quadrature(N)
w /= w.sum() # Force probability measure
V = J.eval(x, range(2*N))
integrals = np.dot(w, V)
integrals[0] -= V[0, 0] # Exact value
self.assertAlmostEqual(np.linalg.norm(integrals, ord=np.inf), 0.)
class IDistTestCase(unittest.TestCase):
"""
Tests for induced distributions.
"""
def test_idist_legendre(self):
"""Evaluation of Legendre induced distribution function."""
J = JacobiPolynomials(alpha=0., beta=0.)
n = int(np.ceil(25*np.random.rand(1))[0])
M = 25
x = -1. + 2*np.random.rand(M)
# JacobiPolynomials method
F1 = J.idist(x, n)
y, w = J.gauss_quadrature(n+1)
# Exact: integrate density
F2 = np.zeros(F1.shape)
for xind, xval in enumerate(x):
yquad = (y+1)/2.*(xval+1) - 1.
F2[xind] = np.dot(w, J.eval(yquad, n)**2) * (xval+1)/2
self.assertAlmostEqual(np.linalg.norm(F1-F2, ord=np.inf), 0.)
def test_fidist_jacobi(self):
"""Fast induced sampling routine for Jacobi polynomials."""
alpha = np.random.random()*11 - 1.
beta = | np.random.random() | numpy.random.random |
import networkx as nx
import numpy as np
from networks.abstract_network import Network
from networks.tools import save_graph_figure
from settings import PLANT_POLLINATOR_CSV_PATH
class ECO1(Network):
name = 'ECO1'
def __init__(self):
super().__init__()
def _create_adjacency_matrix(self):
plant_pollinator_list = []
with open(PLANT_POLLINATOR_CSV_PATH, 'r') as csv_file:
for line in csv_file.readlines():
split_line = line.strip().split(',')
plant_pollinator_list.append([int(item) for item in split_line])
m = np.array(plant_pollinator_list)
plants = m.shape[0]
pollinators = m.shape[1]
b = | np.zeros((pollinators, pollinators)) | numpy.zeros |
from os import listdir
from os.path import isfile, join
import pickle
import numpy as np
TASK_DICT = {'MRPC': 'mrpc', 'STS-B': 'STSBenchmark', 'SST-2': 'SST2'}
class BaseEncoder():
def __init__(self, model_name, encode_capacity, path_cache):
self.model_name = model_name
self.encode_capacity = encode_capacity
self.path_cache = path_cache
self.model = None
self.tokenizer = None
self.count = 0
def parse_model_name_to_cache_name(self, model_name, task, location):
if '/' in model_name:
temp = model_name.split('/')
task, model, exp_name, seed, ckpt = temp[5:]
task = TASK_DICT[task]
return "{}_{}_{}_{}_{}.pickle".format(task, model, exp_name, seed, ckpt)
else:
return "{}_{}_{}.pickle".format(model_name, task, location)
def load_cache(self, task, location):
cache_name = self.parse_model_name_to_cache_name(self.model_name, task, location)
onlyfiles = [f for f in listdir(self.path_cache) if isfile(join(self.path_cache, f))]
# ====== Look Up existing cache ====== #
if cache_name in onlyfiles:
print("cache Found {}".format(cache_name))
with open(join(self.path_cache, cache_name), 'rb') as f:
cache = pickle.load(f)
print("cache Loaded")
self.flag_cache_save = False
return cache
else:
print("cache not Found {}".format(cache_name))
self.flag_cache_save = True
return {}
def save_cache(self, task, location):
if self.flag_cache_save:
print("Start saving cache")
cache_name = self.parse_model_name_to_cache_name(self.model_name, task, location)
with open(join(self.path_cache, cache_name), 'wb') as f:
pickle.dump(self.cache, f, pickle.HIGHEST_PROTOCOL)
print("Saved cache {}".format(cache_name))
else:
print("Skipping saving cache")
def prepare(self, task, location):
self.cache = self.load_cache(task, location)
if bool(self.cache):
self.model = None
self.tokenizer = None
self.count = 0
else:
self.model, self.tokenizer = self.construct_encoder()
def get_mini_batch_size(self, sentences):
seq_length = max([len(tokens) for tokens in sentences])
mini_batch_size = self.encode_capacity // seq_length + 1
return mini_batch_size
def get_head_embedding(self, output, layer, head, head_size):
if head == -1:
embedding = output[:, layer, :]
else:
embedding = output[:, layer, head * head_size:(head + 1) * head_size]
return embedding
def get_multi_head_embedding(self, output, heads, head_size):
if len(heads) == 1: # If single attention head is probed
layer, head = heads[0]
embedding = self.get_head_embedding(output, layer, head, head_size)
else: # If multiple attention head is selected
list_embedding = []
for layer, head in heads:
embedding = self.get_head_embedding(output, layer, head, head_size)
list_embedding.append(embedding)
embedding = | np.concatenate(list_embedding, axis=1) | numpy.concatenate |
from equadratures.parameter import Parameter
from equadratures.poly import Poly
from equadratures.basis import Basis
from equadratures.scalers import scaler_minmax, scaler_meanvar, scaler_custom
import equadratures.plot as plot
import numpy as np
import scipy
import scipy.io
from scipy.linalg import orth, sqrtm
from scipy.spatial import ConvexHull
from scipy.special import comb
from scipy.optimize import linprog
import warnings
class Subspaces(object):
""" This class defines a subspaces object. It can be used for polynomial-based subspace dimension reduction.
Parameters
----------
method : str
The method to be used for subspace-based dimension reduction. Two options:
- ``active-subspace``, which uses ideas in [1] and [2] to compute a dimension-reducing subspace with a global polynomial approximant. Gradients evaluations of the polynomial approximation are used to compute the averaged outer product of the gradient covariance matrix. The polynomial approximation in the original full-space can be provided via ``full_space_poly``. Otherwise, it is fit internally to the data provided via ``sample_points`` and ``sample_outputs``.
- ``variable-projection`` [3], where a Gauss-Newton optimisation problem is solved to compute both the polynomial coefficients and its subspace, with the data provided via ``sample_points`` and ``sample_outputs``.
full_space_poly : Poly, optional
An instance of Poly fitted to the full-space data, to use for the AS computation.
sample_points : numpy.ndarray, optional
Array with shape (number_of_observations, dimensions) that corresponds to a set of sample points over the parameter space.
sample_outputs : numpy.ndarray, optional
Array with shape (number_of_observations, 1) that corresponds to model evaluations at the sample points.
subspace_dimension : int, optional
The dimension of the *active* subspace.
param_args : dict, optional
Arguments passed to parameters of the AS polynomial. (see :class:`~equadratures.parameter.Parameter`)
poly_args : dict , optional
Arguments passed to constructing polynomial used for AS computation. (see :class:`~equadratures.poly.Poly`)
dr_args : dict, optional
Arguments passed to customise the VP optimiser. See documentation for :meth:`~equadratures.subspaces.Subspaces._get_variable_projection` in source.
Examples
--------
Obtaining a 2D subspace via active subspaces on user data
>>> mysubspace = Subspaces(method='active-subspace', sample_points=X, sample_outputs=Y)
>>> eigs = mysubspace.get_eigenvalues()
>>> W = mysubspace.get_subspace()[:, :2]
>>> e = mysubspace.get_eigenvalues()
Obtaining a 2D subspace via active subspaces with a Poly object (remember to call set_model() on Poly first)
>>> mysubspace = Subspaces(method='active-subspace', full_space_poly=my_poly)
>>> eigs = mysubspace.get_eigenvalues()
>>> W = mysubspace.get_subspace()[:, :2]
>>> e = mysubspace.get_eigenvalues()
Obtaining a 2D subspace via variable projection on user data
>>> mysubspace = Subspaces(method='variable-projection', sample_points=X, sample_outputs=Y)
>>> W = mysubspace.get_subspace()[:, :2]
References
----------
1. <NAME>., (2015) Active Subspaces: Emerging Ideas for Dimension Reduction in Parameter Studies. SIAM Spotlights.
2. <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2018) Turbomachinery Active Subspace Performance Maps. Journal of Turbomachinery, 140(4), 041003. `Paper <http://turbomachinery.asmedigitalcollection.asme.org/article.aspx?articleid=2668256>`__.
3. <NAME>., <NAME>., (2018) Data-driven Polynomial Ridge Approximation Using Variable Projection. SIAM Journal of Scientific Computing, 40(3), A1566-A1589. `Paper <https://epubs.siam.org/doi/abs/10.1137/17M1117690>`__.
"""
def __init__(self, method, full_space_poly=None, sample_points=None, sample_outputs=None,
subspace_dimension=2, polynomial_degree=2, param_args=None, poly_args=None, dr_args=None):
self.full_space_poly = full_space_poly
self.sample_points = sample_points
self.Y = None # for the zonotope vertices
self.sample_outputs = sample_outputs
self.method = method
self.subspace_dimension = subspace_dimension
self.polynomial_degree = polynomial_degree
my_poly_args = {'method': 'least-squares', 'solver_args': {}}
if poly_args is not None:
my_poly_args.update(poly_args)
self.poly_args = my_poly_args
my_param_args = {'distribution': 'uniform', 'order': self.polynomial_degree, 'lower': -1, 'upper': 1}
if param_args is not None:
my_param_args.update(param_args)
# I suppose we can detect if lower and upper is present to decide between these categories?
bounded_distrs = ['analytical', 'beta', 'chebyshev', 'arcsine', 'truncated-gaussian', 'uniform']
unbounded_distrs = ['gaussian', 'normal', 'gumbel', 'logistic', 'students-t', 'studentst']
semi_bounded_distrs = ['chi', 'chi-squared', 'exponential', 'gamma', 'lognormal', 'log-normal', 'pareto', 'rayleigh', 'weibull']
if dr_args is not None:
if 'standardize' in dr_args:
dr_args['standardise'] = dr_args['standardize']
if self.method.lower() == 'active-subspace' or self.method.lower() == 'active-subspaces':
self.method = 'active-subspace'
if dr_args is not None:
self.standardise = getattr(dr_args, 'standardise', True)
else:
self.standardise = True
if self.full_space_poly is None:
# user provided input/output data
N, d = self.sample_points.shape
if self.standardise:
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
param = Parameter(**my_param_args)
if param_args is not None:
if (hasattr(dr_args, 'lower') or hasattr(dr_args, 'upper')) and self.standardise:
warnings.warn('Points standardised but parameter range provided. Overriding default ([-1,1])...',
UserWarning)
myparameters = [param for _ in range(d)]
mybasis = Basis("total-order")
mypoly = Poly(myparameters, mybasis, sampling_args={'sample-points': self.std_sample_points,
'sample-outputs': self.sample_outputs},
**my_poly_args)
mypoly.set_model()
self.full_space_poly = mypoly
else:
# User provided polynomial
# Standardise according to distribution specified. Only care about the scaling (not shift)
# TODO: user provided callable with parameters?
user_params = self.full_space_poly.parameters
d = len(user_params)
self.sample_points = self.full_space_poly.get_points()
if self.standardise:
scale_factors = np.zeros(d)
centers = np.zeros(d)
for dd, p in enumerate(user_params):
if p.name.lower() in bounded_distrs:
scale_factors[dd] = (p.upper - p.lower) / 2.0
centers[dd] = (p.upper + p.lower) / 2.0
elif p.name.lower() in unbounded_distrs:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = p.mean
else:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = 0.0
self.param_scaler = scaler_custom(centers, scale_factors)
self.std_sample_points = self.param_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
if not hasattr(self.full_space_poly, 'coefficients'):
raise ValueError('Please call set_model() first on poly.')
self.sample_outputs = self.full_space_poly.get_model_evaluations()
# TODO: use dr_args for resampling of gradient points
as_args = {'grad_points': None}
if dr_args is not None:
as_args.update(dr_args)
self._get_active_subspace(**as_args)
elif self.method == 'variable-projection':
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
if dr_args is not None:
vp_args = {'gamma':0.1, 'beta':1e-4, 'tol':1e-7, 'maxiter':1000, 'U0':None, 'verbose':False}
vp_args.update(dr_args)
self._get_variable_projection(**vp_args)
else:
self._get_variable_projection()
def get_subspace_polynomial(self):
""" Returns a polynomial defined over the dimension reducing subspace.
Returns
-------
Poly
A Poly object that defines a polynomial over the subspace. The distribution of parameters
is assumed to be uniform and the maximum and minimum bounds for each parameter are defined by the maximum
and minimum values of the project samples.
"""
# TODO: Try correlated poly here
active_subspace = self._subspace[:, 0:self.subspace_dimension]
projected_points = np.dot(self.std_sample_points, active_subspace)
myparameters = []
for i in range(0, self.subspace_dimension):
param = Parameter(distribution='uniform', lower=np.min(projected_points[:, i]),
upper=np.max(projected_points[:, i]), order=self.polynomial_degree)
myparameters.append(param)
mybasis = Basis("total-order")
subspacepoly = Poly(myparameters, mybasis, method='least-squares',
sampling_args={'sample-points': projected_points,
'sample-outputs': self.sample_outputs})
subspacepoly.set_model()
return subspacepoly
def get_eigenvalues(self):
""" Returns the eigenvalues of the dimension reducing subspace. Note: this option is
currently only valid for method ``active-subspace``.
Returns
-------
numpy.ndarray
Array of shape (dimensions,) corresponding to the eigenvalues of the above mentioned covariance matrix.
"""
if self.method == 'active-subspace':
return self._eigenvalues
else:
print('Only the active-subspace method yields eigenvalues.')
def get_subspace(self):
""" Returns the dimension reducing subspace.
Returns
-------
numpy.ndarray
Array of shape (dimensions, dimensions) where the first ``subspace_dimension`` columns
contain the dimension reducing subspace, while the remaining columns contain its orthogonal complement.
"""
return self._subspace
def _get_active_subspace(self, grad_points=None, **kwargs):
""" Private method to compute active subspaces. """
if grad_points is None:
X = self.full_space_poly.get_points()
else:
if hasattr(self, 'data_scaler'):
X = self.data_scaler.transform(grad_points)
else:
# Either no standardisation, or user provided poly + param scaling
X = grad_points.copy()
M, d = X.shape
if d != self.sample_points.shape[1]:
raise ValueError('In _get_active_subspace: dimensions of gradient evaluation points mismatched with input dimension!')
alpha = 2.0
num_grad_lb = alpha * self.subspace_dimension * np.log(d)
if M < num_grad_lb:
warnings.warn('Number of gradient evaluation points is likely to be insufficient. Consider resampling!', UserWarning)
polygrad = self.full_space_poly.get_polyfit_grad(X)
if hasattr(self, 'param_scaler'):
# Evaluate gradient in transformed coordinate space
polygrad = self.param_scaler.div[:, np.newaxis] * polygrad
weights = np.ones((M, 1)) / M
R = polygrad.transpose() * weights
C = np.dot(polygrad, R )
# Compute eigendecomposition!
e, W = np.linalg.eigh(C)
idx = e.argsort()[::-1]
eigs = e[idx]
eigVecs = W[:, idx]
if hasattr(self, 'data_scaler'):
scale_factors = 2.0 / (self.data_scaler.Xmax - self.data_scaler.Xmin)
eigVecs = scale_factors[:, np.newaxis] * eigVecs
eigVecs = np.linalg.qr(eigVecs)[0]
self._subspace = eigVecs
self._eigenvalues = eigs
def _get_variable_projection(self, gamma=0.1, beta=1e-4, tol=1e-7, maxiter=1000, U0=None, verbose=False):
""" Private method to obtain an active subspace in inputs design space via variable projection.
Note: It may help to standardize outputs to zero mean and unit variance
Parameters
----------
gamma : float, optional
Step length reduction factor (0,1).
beta : float, optional
Armijo tolerance for backtracking line search (0,1).
tol : float, optional
Tolerance for convergence, measured in the norm of residual over norm of f.
maxiter : int, optional
Maximum number of optimisation iterations.
U0 : numpy.ndarray, optional
Initial guess for active subspace.
verbose : bool, optional
Set to ``True`` for debug messages.
"""
# NOTE: How do we know these are the best values of gamma and beta?
M, m = self.std_sample_points.shape
if U0 is None:
Z = np.random.randn(m, self.subspace_dimension)
U, _ = np.linalg.qr(Z)
else:
U = orth(U0)
y = np.dot(self.std_sample_points,U)
minmax = np.zeros((2, self.subspace_dimension))
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
# Construct the affine transformation
eta = 2 * np.divide((y - minmax[0,:]), (minmax[1,:]-minmax[0,:])) - 1
# Construct the Vandermonde matrix step 6
V, poly_obj = vandermonde(eta, self.polynomial_degree)
V_plus = np.linalg.pinv(V)
coeff = np.dot(V_plus, self.sample_outputs)
res = self.sample_outputs - np.dot(V,coeff)
# R = np.linalg.norm(res)
# TODO: convergence criterion??
for iteration in range(0,maxiter):
# Construct the Jacobian step 9
J = jacobian_vp(V, V_plus, U, self.sample_outputs, poly_obj, eta, minmax, self.std_sample_points)
# Calculate the gradient of Jacobian (step 10)
G = np.zeros((m, self.subspace_dimension))
# NOTE: Can be vectorised
for i in range(0, M):
G += res[i]*J[i, :, :]
# conduct the SVD for J_vec
vec_J = np.reshape(J, (M, m*self.subspace_dimension))
Y, S, Z = np.linalg.svd(vec_J,full_matrices=False) # step 11
# obtain delta
delta = np.dot(Y[:,:-self.subspace_dimension**2].T, res)
delta = np.dot(np.diag(1/S[:-self.subspace_dimension**2]), delta)
delta = -np.dot(Z[:-self.subspace_dimension**2,:].T, delta).reshape(U.shape)
# carry out Gauss-Newton step
vec_delta=delta.flatten() # step 12
# vectorize G step 13
vec_G = G.flatten()
alpha = np.dot(vec_G.T, vec_delta)
norm_G = np.dot(vec_G.T, vec_G)
# check alpha step 14
if alpha >= 0:
delta = -G
alpha = -norm_G
# SVD on delta step 17
Y, S, Z = np.linalg.svd(delta, full_matrices=False)
UZ = np.dot(U,Z.T)
t = 1
for iter2 in range(0,20):
U_new = np.dot(UZ, np.diag(np.cos(S*t))) + np.dot(Y, np.diag(np.sin(S*t)))#step 19
U_new = orth(U_new)
# Update the values with the new U matrix
y = np.dot(self.std_sample_points, U_new)
minmax[0,:] = np.amin(y, axis=0)
minmax[1,:] = np.amax(y, axis=0)
eta = 2 * np.divide((y - minmax[0,:]), (minmax[1,:]-minmax[0,:])) - 1
V_new, poly_obj = vandermonde(eta, self.polynomial_degree)
V_plus_new = np.linalg.pinv(V_new)
coeff_new = np.dot(V_plus_new, self.sample_outputs)
res_new = self.sample_outputs - np.dot(V_new,coeff_new)
R_new = np.linalg.norm(res_new)
if np.linalg.norm(res_new) <= np.linalg.norm(res)+alpha*beta*t or t < 1e-10: # step 21
break
t = t * gamma
dist_change = subspace_dist(U, U_new)
U = U_new
V = V_new
# coeff = coeff_new
V_plus = V_plus_new
res = res_new
# R = R_new
if dist_change < tol:
if verbose:
print("VP finished with %d iterations" % iteration)
break
if iteration == maxiter - 1 and verbose:
print("VP finished with %d iterations" % iteration)
active_subspace = U
inactive_subspace = _null_space(active_subspace.T)
self._subspace = np.hstack([active_subspace, inactive_subspace])
def get_zonotope_vertices(self, num_samples=10000, max_count=100000):
""" Returns the vertices of the zonotope -- the projection of the high-dimensional space over the computed
subspace.
Parameters
----------
num_samples : int, optional
Number of samples per iteration to check.
max_count : int, optional
Maximum number of iteration.
Returns
-------
numpy.ndarray
Array of shape (number of vertices, ``subspace_dimension``).
Note
----
This routine has been adapted from <NAME>'s zonotope_vertices() function; see reference below.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., (2016) Python Active-Subspaces Utility Library. Journal of Open Source Software, 1(5), 79. `Paper <http://joss.theoj.org/papers/10.21105/joss.00079>`__.
"""
m = self._subspace.shape[0]
n = self.subspace_dimension
W = self._subspace[:, :n]
if n == 1:
y0 = np.dot(W.T, np.sign(W))[0]
if y0 < -y0:
yl, yu = y0, -y0
xl, xu = np.sign(W), -np.sign(W)
else:
yl, yu = -y0, y0
xl, xu = -np.sign(W), np.sign(W)
Y = np.array([yl, yu]).reshape((2,1))
X = np.vstack((xl.reshape((1,m)), xu.reshape((1,m))))
self.Y = Y
return Y
else:
total_vertices = 0
for i in range(n):
total_vertices += comb(m-1,i)
total_vertices = int(2*total_vertices)
Z = np.random.normal(size=(num_samples, n))
X = get_unique_rows(np.sign(np.dot(Z, W.transpose())))
X = get_unique_rows(np.vstack((X, -X)))
N = X.shape[0]
count = 0
while N < total_vertices:
Z = np.random.normal(size=(num_samples, n))
X0 = get_unique_rows(np.sign(np.dot(Z, W.transpose())))
X0 = get_unique_rows(np.vstack((X0, -X0)))
X = get_unique_rows(np.vstack((X, X0)))
N = X.shape[0]
count += 1
if count > max_count:
break
num_vertices = X.shape[0]
if total_vertices > num_vertices:
print('Warning: {} of {} vertices found.'.format(num_vertices, total_vertices))
Y = np.dot(X, W)
self.Y = Y.reshape((num_vertices, n))
return self.Y
def get_linear_inequalities(self):
""" Returns the linear inequalities defining the zonotope vertices, i.e., Ax<=b.
Returns
-------
tuple
Tuple (A,b), containing the numpy.ndarray's A and b; where A is the matrix for setting the linear inequalities,
and b is the right-hand-side vector for setting the linear inequalities.
"""
if self.Y is None:
self.Y = self.get_zonotope_vertices()
n = self.Y.shape[1]
if n == 1:
A = np.array([[1],[-1]])
b = np.array([[max(self.Y)],[min(self.Y)]])
return A, b
else:
convexHull = ConvexHull(self.Y)
A = convexHull.equations[:,:n]
b = -convexHull.equations[:,n]
return A, b
def get_samples_constraining_active_coordinates(self, inactive_samples, active_coordinates):
""" A hit and run type sampling strategy for generating samples at a given coordinate in the active subspace
by varying its coordinates along the inactive subspace.
Parameters
----------
inactive_samples : int
The number of inactive samples required.
active_coordiantes : numpy.ndarray
The active subspace coordinates.
Returns
-------
numpy.ndarray
Array containing the full-space coordinates.
Note
----
This routine has been adapted from <NAME>'s hit_and_run() function; see reference below.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., (2016) Python Active-Subspaces Utility Library. Journal of Open Source Software, 1(5), 79. `Paper <http://joss.theoj.org/papers/10.21105/joss.00079>`__.
"""
y = active_coordinates
N = inactive_samples
W1 = self._subspace[:, :self.subspace_dimension]
W2 = self._subspace[:, self.subspace_dimension:]
m, n = W1.shape
s = np.dot(W1, y).reshape((m, 1))
normW2 = np.sqrt(np.sum(np.power(W2, 2), axis=1)).reshape((m, 1))
A = np.hstack((np.vstack((W2, -W2.copy())), np.vstack((normW2, normW2.copy()))))
b = np.vstack((1 - s, 1 + s)).reshape((2 * m, 1))
c = np.zeros((m - n + 1, 1))
c[-1] = -1.0
# print()
zc = linear_program_ineq(c, -A, -b)
z0 = zc[:-1].reshape((m - n, 1))
# define the polytope A >= b
s = np.dot(W1, y).reshape((m, 1))
A = np.vstack((W2, -W2))
b = np.vstack((-1 - s, -1 + s)).reshape((2 * m, 1))
# tolerance
ztol = 1e-6
eps0 = ztol / 4.0
Z = np.zeros((N, m - n))
for i in range(N):
# random direction
bad_dir = True
count, maxcount = 0, 50
while bad_dir:
d = np.random.normal(size=(m - n, 1))
bad_dir = np.any(np.dot(A, z0 + eps0 * d) <= b)
count += 1
if count >= maxcount:
Z[i:, :] = np.tile(z0, (1, N - i)).transpose()
yz = np.vstack([np.repeat(y[:, np.newaxis], N, axis=1), Z.T])
return np.dot(self._subspace, yz).T
# find constraints that impose lower and upper bounds on eps
f, g = b - np.dot(A, z0), np.dot(A, d)
# find an upper bound on the step
min_ind = np.logical_and(g <= 0, f < -np.sqrt(np.finfo(np.float).eps))
eps_max = np.amin(f[min_ind] / g[min_ind])
# find a lower bound on the step
max_ind = np.logical_and(g > 0, f < -np.sqrt(np.finfo(np.float).eps))
eps_min = np.amax(f[max_ind] / g[max_ind])
# randomly sample eps
eps1 = np.random.uniform(eps_min, eps_max)
# take a step along d
z1 = z0 + eps1 * d
Z[i, :] = z1.reshape((m - n,))
# update temp var
z0 = z1.copy()
yz = np.vstack([np.repeat(y[:, np.newaxis], N, axis=1), Z.T])
return np.dot(self._subspace, yz).T
def plot_sufficient_summary(self, ax=None, X_test=None, y_test=None, show=True, poly=True, uncertainty=False, legend=False, scatter_kwargs={}, plot_kwargs={}):
""" Generates a sufficient summary plot for 1D or 2D polynomial ridge approximations.
See :meth:`~equadratures.plot.plot_sufficient_summary` for full description. """
return plot.plot_sufficient_summary(self, ax, X_test, y_test, show, poly, uncertainty, legend, scatter_kwargs, plot_kwargs)
def plot_2D_contour_zonotope(self, mysubspace, minmax=[- 3.5, 3.5], grid_pts=180, show=True, ax=None):
""" Generates a 2D contour plot of the polynomial ridge approximation.
See :meth:`~equadratures.plot.plot_2D_contour_zonotope` for full description. """
return plot.plot_2D_contour_zonotope(self,minmax,grid_pts,show,ax)
def plot_samples_from_second_subspace_over_first(self, mysubspace_2, axs=None, no_of_samples=500, minmax=[- 3.5, 3.5], grid_pts=180, show=True):
"""
Generates a zonotope plot where samples from the second subspace are projected over the first.
See :meth:`~equadratures.plot.plot_samples_from_second_subspace_over_first` for full description.
"""
return plot.plot_samples_from_second_subspace_over_first(self,mysubspace_2, axs, no_of_samples, minmax, grid_pts, show)
def vandermonde(eta, p):
# TODO: Try using a "correlated" basis here?
_, n = eta.shape
listing = []
for i in range(0, n):
listing.append(p)
Object=Basis('total-order',listing)
# Establish n Parameter objects
params = []
P = Parameter(order=p, lower=-1, upper=1, distribution='uniform')
for i in range(0, n):
params.append(P)
# Use the params list to establish the Poly object
poly_obj = Poly(params, Object, method='least-squares')
V = poly_obj.get_poly(eta)
V = V.T
return V, poly_obj
def vector_AS(list_of_polys, R = None, alpha=None, k=None, samples=None, bootstrap=False, bs_trials = 50
, J = None, save_path = None):
# Find AS directions to vector val func
# analogous to computeActiveSubspace
# Since we are dealing with *one* vector val func we should have just one input space
# Take the first of the polys.
poly = list_of_polys[0]
if samples is None:
d = poly.dimensions
if alpha is None:
alpha = 4
if k is None or k > d:
k = d
M = int(alpha * k * np.log(d))
X = np.zeros((M, d))
for j in range(0, d):
X[:, j] = np.reshape(poly.parameters[j].getSamples(M), M)
else:
X = samples
M, d = X.shape
n = len(list_of_polys) # number of outputs
if R is None:
R = np.eye(n)
elif len(R.shape) == 1:
R = np.diag(R)
if J is None:
J = jacobian_vec(list_of_polys,X)
if not(save_path is None):
np.save(save_path,J)
J_new = np.matmul(sqrtm(R), np.transpose(J,[2,0,1]))
JtJ = np.matmul(np.transpose(J_new,[0,2,1]), J_new)
H = np.mean(JtJ,axis=0)
# Compute P_r by solving generalized eigenvalue problem...
# Assume sigma = identity for now
e, W = np.linalg.eigh(H)
eigs = np.flipud(e)
eigVecs = | np.fliplr(W) | numpy.fliplr |
"""
Typical usage
=================
To demonstrate the use of the Hankel Transform class, we will give an example
of propagating a radially-symmetric beam using the beam propagation method.
In this case, it will be a simple Gaussian beam propagating way from focus and
diverging.
First we will use a loop over :math:`z` position, and then we will demonstrate
the vectorisation of the :func:`.HankelTransforms.iqdht` (and
:func:`~.HankelTransforms.qdht`) functions.
"""
# %%
# First import the standard libraries
import matplotlib.pyplot as plt
import numpy as np
# %%
# Then the functions from this package
from pyhank import HankelTransform
# noinspection PyUnresolvedReferences
from helper import gauss1d, imagesc
# %%
# Initialise radius grid
nr = 1024 # Number of sample points
r_max = 5e-3 # Maximum radius (5mm)
r = np.linspace(0, r_max, nr)
# %%
# Initialise :math:`z` grid
Nz = 200 # Number of z positions
z_max = 0.1 # Maximum propagation distance
z = np.linspace(0, z_max, Nz) # Propagation axis
# %%
# Set up beam parameters
Dr = 100e-6 # Beam radius (100um)
lambda_ = 488e-9 # wavelength 488nm
k0 = 2 * np.pi / lambda_ # Vacuum k vector
# %%
# Set up a :class:`.HankelTransform` object, telling it the order (``0``) and
# the radial grid.
H = HankelTransform(order=0, radial_grid=r)
# %%
# Set up the electric field profile at :math:`z = 0`, and resample onto the correct radial grid
# (``transformer.r``) as required for the QDHT.
Er = gauss1d(r, 0, Dr) # Initial field
ErH = H.to_transform_r(Er) # Resampled field
# %%
# Perform Hankel Transform
# ------------------------
# Convert from physical field to physical wavevector
EkrH = H.qdht(ErH)
# %%
# Propagate the beam - loop
# -------------------------
# Do the propagation in a loop over :math:`z`
# Pre-allocate an array for field as a function of r and z
Erz = np.zeros((nr, Nz), dtype=complex)
kz = np.sqrt(k0 ** 2 - H.kr ** 2)
for i, z_loop in enumerate(z):
phi_z = kz * z_loop # Propagation phase
EkrHz = EkrH * np.exp(1j * phi_z) # Apply propagation
ErHz = H.iqdht(EkrHz) # iQDHT
Erz[:, i] = H.to_original_r(ErHz) # Interpolate output
Irz = np.abs(Erz) ** 2
# %%
# Plotting
# --------
# Plot the initial field and radial wavevector distribution (given by the
# Hankel transform)
plt.figure()
plt.plot(r * 1e3, np.abs(Er) ** 2, r * 1e3, np.unwrap(np.angle(Er)),
H.r * 1e3, np.abs(ErH) ** 2, H.r * 1e3, np.unwrap(np.angle(ErH)), '+')
plt.title('Initial electric field distribution')
plt.xlabel('Radial co-ordinate (r) /mm')
plt.ylabel('Field intensity /arb.')
plt.legend(['$|E(r)|^2$', '$\\phi(r)$', '$|E(H.r)|^2$', '$\\phi(H.r)$'])
plt.axis([0, 1, 0, 1])
plt.figure()
plt.plot(H.kr, | np.abs(EkrH) | numpy.abs |
import numpy as np
def test_dgelsd():
A = np.array([[0,1],[1,1],[2,1],[3,1]],np.double)
B = np.array([-1,0.2,0.9,2.1],np.double)
work = np.zeros(802,np.double);
answer = np.linalg.lapack_lite.dgelsd(
4,2,1,A,4,B,4,np.zeros(2),-1,0,work,802,np.zeros(20,np.int32),0)
print(answer)
print(B)
def test_lstsq_1():
A = np.array([[0,1],[1,1],[2,1],[3,1]],np.double)
B = np.array([-1,0.2,0.9,2.1],np.double)
answer = np.linalg.lstsq(A,B)
print(answer)
def test_lstsq_2():
A = np.array([
[-3,1],[-0.9,1],[-1.8,1],
[3.2,1],[1,1],[3.3,1]
],np.double)
B = np.array([3.9,2.3,2,-1.4,-1,-0.1],np.double)
answer = np.linalg.lstsq(A,B)
print(answer)
def test_svd_4x3():
A = np.array([
[-3,6,-1],
[11,-3,0],
[0,-1,3],
[4,4,4]
]);
answer = np.linalg.svd(A,full_matrices=True,compute_uv=True)
print(answer)
def test_eig_3x3():
A = np.array([
[3,6,2],
[1,7,6],
[9,3,2]
])
[w,v] = np.linalg.eig(A)
print(w)
print(v)
def test_eig_4x4():
A = np.array([
[3,6,2,1],
[1,7,6,1],
[9,3,2,1],
[9,3,7,1]
])
[w,v] = np.linalg.eig(A)
print(w)
print(v)
def test_eig_2x2():
A = np.array([
[3,1],
[0,2]
]);
answer = np.linalg.eig(A)
print(answer)
def test_qr():
A = np.array([
[3,6,2],
[1,7,6],
[9,3,2]
]);
[q,r] = np.linalg.qr(A)
print(q)
print(r)
def test_dgeqrf_dorgqr():
A = np.array([
[3,6,2],
[1,7,6],
[9,3,2]
], np.double);
tau = np.empty((3),np.double)
work = np.zeros(1,np.double);
info = 0
np.linalg.lapack_lite.dgeqrf(3,3,A,3,tau,work,-1,info)
worksize = int(work[0])
work = np.zeros(worksize,np.double);
np.linalg.lapack_lite.dgeqrf(3,3,A,3,tau,work,worksize,info)
work = np.zeros(1,np.double);
np.linalg.lapack_lite.dorgqr(3,3,3,A,3,tau,work,-1,info)
worksize = int(work[0])
work = | np.zeros(worksize,np.double) | numpy.zeros |
#
# This file is part of stcs-mimpc.
#
# Copyright (c) 2020 <NAME>, <NAME>, <NAME>.
# Developed at HS Karlsruhe and IMTEK, University of Freiburg.
# All rights reserved.
#
# The BSD 3-Clause License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import copy
import datetime as dt
import numpy as np
import casadi as ca
from system import System
from nlpsetup import NLPSetupMPC, NLPSetupMHE
from abc import ABCMeta, abstractmethod
import logging
logger = logging.getLogger(__name__)
class NLPSolverMPCBaseClass(NLPSetupMPC, metaclass = ABCMeta):
_SECONDS_TIMEOUT_BEFORE_NEXT_TIME_GRID_POINT = 5.0
_LOGFILE_LOCATION = "/tmp"
@property
def time_grid(self):
return self._timing.time_grid
@property
def x_data(self):
try:
return np.asarray(self._x_data)
except AttributeError:
msg = "Optimized states not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def x_hat(self):
return np.asarray(self._predictor.x_hat)
@property
def u_data(self):
try:
return np.asarray(self._u_data)
except AttributeError:
msg = "Optimized continuous controls not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def b_data(self):
try:
return np.asarray(self._b_data)
except AttributeError:
msg = "Optimized binary controls not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def c_data(self):
return np.asarray(self._ambient.c_data)
@property
def r_data(self):
try:
return np.asarray(self._r_data)
except AttributeError:
msg = "Optimized residuals not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def s_ac_lb_data(self):
try:
return np.asarray(self._s_ac_lb_data)
except AttributeError:
msg = "Optimized slacks for minimum AC operation temperatures not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def s_ac_ub_data(self):
try:
return np.asarray(self._s_ac_ub_data)
except AttributeError:
msg = "Optimized slacks for maximum AC operation temperatures not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def s_x_data(self):
try:
return np.asarray(self._s_x_data)
except AttributeError:
msg = "Optimized slacks for soft state constraints not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def s_ppsc_fpsc_data(self):
try:
return np.asarray(self._s_ppsc_fpsc_data)
except AttributeError:
msg = "Optimized slacks for FPSC safety pump speed not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def s_ppsc_vtsc_data(self):
try:
return np.asarray(self._s_ppsc_vtsc_data)
except AttributeError:
msg = "Optimized slacks for VTSC safety pump speed not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def solver_name(self):
return self._solver_name
def _setup_timing(self, timing):
self._timing = timing
def _setup_ambient(self, ambient):
self._ambient = ambient
def _set_previous_solver(self, previous_solver):
self._previous_solver = previous_solver
def _set_predictor(self, predictor):
self._predictor = predictor
def _setup_solver_name(self, solver_name):
self._solver_name = solver_name
def _setup_general_solver_options(self):
self._nlpsolver_options = {}
self._nlpsolver_options["ipopt.linear_solver"] = "mumps"
self._nlpsolver_options["ipopt.mumps_mem_percent"] = 10000
self._nlpsolver_options["ipopt.mumps_pivtol"] = 0.001
self._nlpsolver_options["ipopt.print_level"] = 5
self._nlpsolver_options["ipopt.output_file"] = os.path.join( \
self._LOGFILE_LOCATION, self._solver_name + ".log")
self._nlpsolver_options["ipopt.file_print_level"] = 5
self._nlpsolver_options["ipopt.max_cpu_time"] = 720.0
@abstractmethod
def _setup_additional_nlpsolver_options(self):
pass
def __init__(self, timing, ambient, previous_solver, predictor, solver_name):
logger.debug("Initializing NLP solver " + solver_name + " ...")
super().__init__(timing)
self._setup_timing(timing = timing)
self._setup_ambient(ambient = ambient)
self._setup_solver_name(solver_name = solver_name)
self._set_previous_solver(previous_solver = previous_solver)
self._set_predictor(predictor = predictor)
self._setup_general_solver_options()
self._setup_additional_nlpsolver_options()
self._setup_collocation_options()
logger.debug("NLP solver " + solver_name + " initialized.")
def set_solver_max_cpu_time(self, time_point_to_finish):
max_cpu_time = (time_point_to_finish - dt.datetime.now(tz = self._timing.timezone) - \
dt.timedelta(seconds = self._SECONDS_TIMEOUT_BEFORE_NEXT_TIME_GRID_POINT)).total_seconds()
self._nlpsolver_options["ipopt.max_cpu_time"] = max_cpu_time
logger.debug("Maximum CPU time for " + self._solver_name + " set to " + \
str(max_cpu_time) + " s ...")
def _setup_nlpsolver(self):
__dirname__ = os.path.dirname(os.path.abspath(__file__))
path_to_nlp_object = os.path.join(__dirname__, self._PATH_TO_NLP_OBJECT, \
self._NLP_OBJECT_FILENAME)
self._nlpsolver = ca.nlpsol(self._solver_name, "ipopt", path_to_nlp_object,
self._nlpsolver_options)
def _set_states_bounds(self):
'''
The boundary values for the states will later be defined as soft constraints.
'''
self.x_min = self.p_op["T"]["min"] * np.ones( \
(self._timing.N+1, self.nx - self.nx_aux))
self.x_max = self.p_op["T"]["max"] * np.ones( \
(self._timing.N+1, self.nx - self.nx_aux))
self.x_max[:,self.x_index["T_shx_psc"][-1]] = \
self.p_op["T_sc"]["T_feed_max"]
def _set_continuous_control_bounds(self):
self.u_min = np.hstack([
self.p_op["v_ppsc"]["min_mpc"] * np.ones((self._timing.N, 1)),
self.p_op["p_mpsc"]["min_mpc"] * np.ones((self._timing.N, 1)),
self.p_op["v_plc"]["min_mpc"] * np.ones((self._timing.N, 1)),
self.p_op["v_pssc"]["min_mpc"] * np.ones((self._timing.N, 1)),
# The upcoming controls are constrained later in the NLP using inequality constraints
np.zeros((self._timing.N, 1)),
np.zeros((self._timing.N, 1))])
self.u_max = np.hstack([
self.p_op["v_ppsc"]["max"] * np.ones((self._timing.N, 1)),
self.p_op["p_mpsc"]["max"] * np.ones((self._timing.N, 1)),
self.p_op["v_plc"]["max"] * np.ones((self._timing.N, 1)),
self.p_op["v_pssc"]["max"] * np.ones((self._timing.N, 1)),
np.inf * np.ones((self._timing.N, 1)),
np.inf * np.ones((self._timing.N, 1))])
@abstractmethod
def _set_binary_control_bounds(self):
pass
def _set_nlpsolver_bounds_and_initials(self):
# Optimization variables bounds and initials
V_min = []
V_max = []
V_init = []
# Constraints bounds
lbg = []
ubg = []
# Time-varying parameters
P_data = []
# Initial states
if self._timing.grid_position_cursor == 0:
V_min.append(self._predictor.x_hat)
V_max.append(self._predictor.x_hat)
V_init.append(self._predictor.x_hat)
else:
V_min.append(self._previous_solver.x_data[0,:])
V_max.append(self._previous_solver.x_data[0,:])
V_init.append(self._previous_solver.x_data[0,:])
for k in range(self._timing.N):
# Collocation equations
for j in range(1,self.d+1):
lbg.append(np.zeros(self.nx))
ubg.append(np.zeros(self.nx))
if k < self._timing.grid_position_cursor:
lbg.append(-np.inf * np.ones(self.nx))
ubg.append(np.inf * np.ones(self.nx))
else:
lbg.append(np.zeros(self.nx))
ubg.append(np.zeros(self.nx))
# s_ac_lb
lbg.append(-1e-1 * np.ones(3)) # vanishing constraints smoothened
ubg.append(np.inf * np.ones(3))
# s_ac_ub
lbg.append(-np.inf * np.ones(3))
ubg.append(1e-1 * np.ones(3)) # vanishing constraints smoothened
# Setup objective temperature range condition
lbg.append(self.p_op["room"]["T_r_a_min"])
ubg.append(self.p_op["room"]["T_r_a_max"])
# State limits soft constraints
lbg.append(self.x_min[k+1,:])
ubg.append(self.x_max[k+1,:])
# Assure ppsc is running at high speed when collector temperature is high
lbg.append(-np.inf * np.ones(4))
ubg.append(1.0e-1)
ubg.append(0)
ubg.append(1.0e-1)
ubg.append(0)
# Assure HTS bottom layer mass flows are always smaller or equal to
# the corresponding total pump flow
lbg.append(-np.inf * np.ones(2))
ubg.append(np.zeros(2))
# SOS1 constraints
lbg.append(0)
ubg.append(1)
# Append new bounds and initials
for j in range(1,self.d+1):
V_min.append(-np.inf * np.ones(self.nx))
V_max.append(np.inf * np.ones(self.nx))
V_init.append(self._previous_solver.x_data[k,:])
if k < self._timing.grid_position_cursor:
V_min.append(self._previous_solver.b_data[k,:])
V_max.append(self._previous_solver.b_data[k,:])
V_init.append(self._previous_solver.b_data[k,:])
else:
V_min.append(self.b_min[k,:])
V_max.append(self.b_max[k,:])
V_init.append(self._previous_solver.b_data[k,:])
V_min.append(np.zeros(3))
V_max.append(np.inf * np.ones(3))
try:
V_init.append(self._previous_solver.s_ac_lb_data[k,:])
except AttributeError:
V_init.append(np.zeros(3))
V_min.append(np.zeros(3))
V_max.append(np.inf * np.ones(3))
try:
V_init.append(self._previous_solver.s_ac_ub_data[k,:])
except AttributeError:
V_init.append(np.zeros(3))
V_min.append(-np.inf * np.ones(self.nx-self.nx_aux))
V_max.append(np.inf * np.ones(self.nx-self.nx_aux))
try:
V_init.append(self._previous_solver.s_x_data[k,:])
except AttributeError:
V_init.append(np.zeros(self.nx-self.nx_aux))
V_min.append(np.zeros(2))
V_max.append(self.p_op["v_ppsc"]["max"] * np.ones(2))
try:
V_init.append(self._previous_solver.s_ppsc_fpsc_data[k,:])
except AttributeError:
V_init.append(0.0)
try:
V_init.append(self._previous_solver.s_ppsc_vtsc_data[k,:])
except AttributeError:
V_init.append(0.0)
if k < self._timing.grid_position_cursor:
V_min.append(self._previous_solver.u_data[k,:])
V_max.append(self._previous_solver.u_data[k,:])
V_init.append(self._previous_solver.u_data[k,:])
else:
V_min.append(self.u_min[k,:])
V_max.append(self.u_max[k,:])
V_init.append(self._previous_solver.u_data[k,:])
V_min.append(-np.inf)
V_max.append(np.inf)
try:
V_init.append(self._previous_solver.r_data[k,:])
except AttributeError:
V_init.append(0.0)
if (k+1) == self._timing.grid_position_cursor:
V_min.append(self._predictor.x_hat)
V_max.append(self._predictor.x_hat)
V_init.append(self._predictor.x_hat)
elif (k+1) < self._timing.grid_position_cursor:
V_min.append(self._previous_solver.x_data[k+1,:])
V_max.append(self._previous_solver.x_data[k+1,:])
V_init.append(self._previous_solver.x_data[k+1,:])
else:
V_min.append(-np.inf * np.ones(self.nx))
V_max.append(np.inf * np.ones(self.nx))
V_init.append(self._previous_solver.x_data[k+1,:])
# Append time-varying parameters
P_data.append(self._ambient.c_data[k,:])
P_data.append(self._timing.time_steps[k])
P_data.append(np.zeros(self.nw))
# Concatenate objects
self.V_min = ca.veccat(*V_min)
self.V_max = ca.veccat(*V_max)
self.V_init = ca.veccat(*V_init)
self.lbg = np.hstack(lbg)
self.ubg = np.hstack(ubg)
self.P_data = ca.veccat(*P_data)
self._nlpsolver_args = {"p": self.P_data, \
"x0": self.V_init,
"lbx": self.V_min, "ubx": self.V_max, \
"lbg": self.lbg, "ubg": self.ubg}
def _run_nlpsolver(self):
logger.info(self._solver_name + ", iter " + \
str(self._timing.mpc_iteration_count) + ", limit " + \
str(round(self._nlpsolver_options["ipopt.max_cpu_time"],1)) + " s ...")
self.nlp_solution = self._nlpsolver(**self._nlpsolver_args)
if self._nlpsolver.stats()["return_status"] == "Maximum_CpuTime_Exceeded":
logger.warning(self._solver_name + " returned '" + \
str(self._nlpsolver.stats()["return_status"]) + "' after " + \
str(round(self._nlpsolver.stats()["t_wall_total"], 2)) + " s")
else:
logger.info(self._solver_name + " returned '" + \
str(self._nlpsolver.stats()["return_status"]) + "' after " + \
str(round(self._nlpsolver.stats()["t_wall_total"], 2)) + " s")
def _collect_nlp_results(self):
v_opt = np.array(self.nlp_solution["x"])
x_opt = []
u_opt = []
b_opt = []
r_opt = []
s_ac_lb_opt = []
s_ac_ub_opt = []
s_x_opt = []
s_ppsc_fpsc_opt = []
s_ppsc_vtsc_opt = []
offset = 0
for k in range(self._timing.N):
x_opt.append(v_opt[offset:offset+self.nx])
for j in range(self.d+1):
offset += self.nx
b_opt.append(v_opt[offset:offset+self.nb])
offset += self.nb
s_ac_lb_opt.append(v_opt[offset:offset+3])
offset += 3
s_ac_ub_opt.append(v_opt[offset:offset+3])
offset += 3
s_x_opt.append(v_opt[offset:offset+self.nx-self.nx_aux])
offset += self.nx-self.nx_aux
s_ppsc_fpsc_opt.append(v_opt[offset:offset+1])
offset += 1
s_ppsc_vtsc_opt.append(v_opt[offset:offset+1])
offset += 1
u_opt.append(v_opt[offset:offset+self.nu])
offset += self.nu
r_opt.append(v_opt[offset:offset+1])
offset += 1
x_opt.append(v_opt[offset:offset+self.nx])
offset += self.nx
r_opt.append(v_opt[offset:offset+1])
offset += 1
self._x_data = ca.horzcat(*x_opt).T
self._u_data = ca.horzcat(*u_opt).T
self._b_data = ca.horzcat(*b_opt).T
self._r_data = ca.horzcat(*r_opt).T
self._s_ac_lb_data = ca.horzcat(*s_ac_lb_opt).T
self._s_ac_ub_data = ca.horzcat(*s_ac_ub_opt).T
self._s_x_data = ca.horzcat(*s_x_opt).T
self._s_ppsc_fpsc_data = ca.horzcat(*s_ppsc_fpsc_opt).T
self._s_ppsc_vtsc_data = ca.horzcat(*s_ppsc_vtsc_opt).T
def solve(self):
self._setup_nlpsolver()
self._set_states_bounds()
self._set_continuous_control_bounds()
self._set_binary_control_bounds()
self._set_nlpsolver_bounds_and_initials()
self._run_nlpsolver()
self._collect_nlp_results()
def reduce_object_memory_size(self):
self._previous_solver = None
self._predictor = None
def save_results(self):
'''
This function can be used to save the MPC results, possibly including
solver runtimes, log files etc.
'''
pass
class NLPSolverBin(NLPSolverMPCBaseClass):
def _setup_additional_nlpsolver_options(self):
self._nlpsolver_options["ipopt.acceptable_tol"] = 0.2
self._nlpsolver_options["ipopt.acceptable_iter"] = 8
self._nlpsolver_options["ipopt.acceptable_constr_viol_tol"] = 10.0
self._nlpsolver_options["ipopt.acceptable_dual_inf_tol"] = 10.0
self._nlpsolver_options["ipopt.acceptable_compl_inf_tol"] = 10.0
self._nlpsolver_options["ipopt.acceptable_obj_change_tol"] = 1e-1
self._nlpsolver_options["ipopt.mu_strategy"] = "adaptive"
self._nlpsolver_options["ipopt.mu_target"] = 1e-5
def _set_binary_control_bounds(self):
self.b_min = self._previous_solver.b_data
self.b_max = self._previous_solver.b_data
class NLPSolverRel(NLPSolverMPCBaseClass):
def _setup_additional_nlpsolver_options(self):
self._nlpsolver_options["ipopt.acceptable_tol"] = 0.2
self._nlpsolver_options["ipopt.acceptable_iter"] = 8
self._nlpsolver_options["ipopt.acceptable_constr_viol_tol"] = 10.0
self._nlpsolver_options["ipopt.acceptable_dual_inf_tol"] = 10.0
self._nlpsolver_options["ipopt.acceptable_compl_inf_tol"] = 10.0
self._nlpsolver_options["ipopt.acceptable_obj_change_tol"] = 1e-1
self._nlpsolver_options["ipopt.mu_strategy"] = "adaptive"
self._nlpsolver_options["ipopt.mu_target"] = 1e-5
def _set_binary_control_bounds(self):
self.b_min = np.zeros((self._timing.N, self.nb))
self.b_max = np.ones((self._timing.N,self.nb))
self.b_max[:,-1] = 0.0
if self._timing._remaining_min_up_time > 0:
locked_time_grid_points = np.where(self._timing.time_grid < self._timing._remaining_min_up_time)[0]
self.b_min[locked_time_grid_points, :] = np.repeat(self._timing._b_bin_locked, len(locked_time_grid_points), 0)
self.b_max[locked_time_grid_points, :] = np.repeat(self._timing._b_bin_locked, len(locked_time_grid_points), 0)
class NLPSolverMHE(NLPSetupMHE):
@property
def x_data(self):
try:
return np.asarray(self._x_data)
except AttributeError:
msg = "Optimized states not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def x_hat(self):
try:
return np.squeeze(self._x_data[-1,:])
except AttributeError:
msg = "Current states estimate not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def w_data(self):
try:
return np.asarray(self._w_data)
except AttributeError:
msg = "Optimized process noise not available yet, call solve() first."
logging.error(msg)
raise RuntimeError(msg)
@property
def u_data(self):
return self._measurement.u_data
@property
def b_data(self):
return self._measurement.b_data
@property
def c_data(self):
return self._ambient.c_data
@property
def solver_name(self):
return self._solver_name
def _setup_ambient(self, ambient):
self._ambient = ambient
def _setup_measurement(self, measurement):
self._measurement = measurement
def _setup_solver_name(self, solver_name):
self._solver_name = solver_name
def _setup_solver_options(self):
self._nlpsolver_options = {}
self._nlpsolver_options["ipopt.linear_solver"] = "mumps"
self._nlpsolver_options["ipopt.print_level"] = 5
self._nlpsolver_options["ipopt.max_cpu_time"] = 1.0e6
def _initialize_arrival_cost_covariance(self):
self.P_x_arr = copy.deepcopy(self.R_w)
def _initialize_arrival_cost(self):
# Initialize arrival cost with initial measurement
self.x_arr = np.zeros(self.nx)
self.x_arr[self.x_index["T_hts"][0]] = \
self._measurement.y_data[0, self.y_index["T_hts"][0]]
self.x_arr[self.x_index["T_hts"][1]] = \
self._measurement.y_data[0, self.y_index["T_hts"][1]]
self.x_arr[self.x_index["T_hts"][2]] = \
self._measurement.y_data[0, self.y_index["T_hts"][2]]
self.x_arr[self.x_index["T_hts"][3]] = \
self._measurement.y_data[0, self.y_index["T_hts"][3]]
self.x_arr[self.x_index["T_lts"][0]] = \
self._measurement.y_data[0, self.y_index["T_lts"][0]]
self.x_arr[self.x_index["T_lts"][1]] = \
self._measurement.y_data[0, self.y_index["T_lts"][1]]
self.x_arr[self.x_index["T_fpsc"]] = \
self._measurement.y_data[0, self.y_index["T_fpsc_s"]]
self.x_arr[self.x_index["T_fpsc_s"]] = \
self._measurement.y_data[0, self.y_index["T_fpsc_s"]]
self.x_arr[self.x_index["T_vtsc"]] = \
self._measurement.y_data[0, self.y_index["T_vtsc_s"]]
self.x_arr[self.x_index["T_vtsc_s"]] = \
self._measurement.y_data[0, self.y_index["T_vtsc_s"]]
self.x_arr[self.x_index["T_pscf"]] = \
self._measurement.y_data[0, self.y_index["T_shx_psc"][1]]
self.x_arr[self.x_index["T_pscr"]] = \
self._measurement.y_data[0, self.y_index["T_shx_psc"][0]]
self.x_arr[self.x_index["T_shx_psc"][:2]] = \
self._measurement.y_data[0, self.y_index["T_shx_psc"][0]]
self.x_arr[self.x_index["T_shx_psc"][2:]] = \
self._measurement.y_data[0, self.y_index["T_shx_psc"][1]]
self.x_arr[self.x_index["T_shx_ssc"][:2]] = \
self._measurement.y_data[0, self.y_index["T_shx_ssc"][0]]
self.x_arr[self.x_index["T_shx_ssc"][2:]] = \
self._measurement.y_data[0, self.y_index["T_shx_ssc"][1]]
self.x_arr[self.x_index["T_fcu_w"]] = \
self._measurement.y_data[0, self.y_index["T_fcu_w"]]
self.x_arr[self.x_index["T_fcu_a"]] = \
self._measurement.y_data[0, self.y_index["T_r_a"][0]]
self.x_arr[self.x_index["T_r_c"]] = \
self._measurement.y_data[0, self.y_index["T_r_c"]]
self.x_arr[self.x_index["T_r_a"]] = \
self._measurement.y_data[0, self.y_index["T_r_a"]]
def _initialize_ekf_for_arrival_cost_update(self):
self.hfcn = ca.Function("h", [self.x, self.c], [self.h])
self.H = self.hfcn.jac()
ode = {"x": self.x, "p": ca.veccat(self.c, self.u, self.b, self.w), \
"ode": self.f}
self.phi = ca.integrator("integrator", "cvodes", ode, \
{"t0": 0.0, "tf": self._timing.dt_day})
self.Phi = self.phi.jac()
def __init__(self, timing, ambient, measurement, solver_name):
logger.info("Initializing NLP solver " + solver_name + " ...")
super().__init__(timing)
self._setup_ambient(ambient = ambient)
self._setup_measurement(measurement = measurement)
self._setup_solver_name(solver_name = solver_name)
self._setup_solver_options()
self._setup_collocation_options()
self._setup_model()
self._initialize_arrival_cost_covariance()
self._initialize_arrival_cost()
self._initialize_ekf_for_arrival_cost_update()
logger.info("NLP solver " + solver_name + " initialized.")
def _setup_nlpsolver(self):
__dirname__ = os.path.dirname(os.path.abspath(__file__))
path_to_nlp_object = os.path.join(__dirname__, self._PATH_TO_NLP_OBJECT, \
self._NLP_OBJECT_FILENAME)
self._nlpsolver = ca.nlpsol(self._solver_name, "ipopt", path_to_nlp_object,
self._nlpsolver_options)
def _set_nlpsolver_bounds_and_initials(self):
# Optimization variables bounds and initials
V_min = []
V_max = []
V_init = []
# Constraints bounds
lbg = []
ubg = []
# Time-varying parameters
P_data = []
# Initial states
V_min.append(-np.inf * np.ones(self.nx-2))
V_min.append(np.zeros(2))
V_max.append(np.inf * np.ones(self.nx-2))
V_max.append(2.5 * np.ones(2))
try:
V_init.append(self._x_data[0,:])
except AttributeError:
V_init.append(self.x_arr)
P_data.append(self.x_arr)
P_data.append( | np.linalg.inv(self.P_x_arr) | numpy.linalg.inv |
# Test the Correlation module
from UQpy.transformations import Decorrelate
import numpy as np
import pytest
def test_samples():
samples_z = np.array([[0.3, 0.36], [0.2, 1.6]])
rz = np.array([[1.0, 0.8], [0.8, 1.0]])
ntf_obj = Decorrelate(samples_z=samples_z, corr_z=rz)
| np.testing.assert_allclose(ntf_obj.samples_u, [[0.3, 0.19999999999999998], [0.2, 2.4000000000000004]], rtol=1e-09) | numpy.testing.assert_allclose |
import numpy as np
from skimage import feature
from sklearn import preprocessing
class LBP:
def __init__(self, p, r):
self.p = p
self.r = r
def getVecLength(self):
return 2**self.p
def getFeature(self, imgMat):
feat = feature.local_binary_pattern(
imgMat, self.p, self.r, method='uniform')
re, _ = np.histogram(feat, bins=range(
256), normed=True)
return re
def getFeatVecs(self, imgList, load=0):
if load == 1:
feats = np.load(r"featVectLbp.npy")
types = np.load(r"typesLbp.npy")
return (feats, types)
feats = None
# i=0
types = np.float32([]).reshape((0, 1))
for mat, type in imgList:
# print("[lbp]:"+str(i))
# i+=1
if mat is None:
continue
feat = self.getFeature(mat)
if feats is None:
feats = feat.reshape((1, -1))
else:
# print(feat.shape)
# print(feats.shape)
feats = np.append(feats, feat.reshape((1, -1)), axis=0)
types = np.append(types, np.array(type).reshape((1, 1)))
np.save(r"featVectLbp.npy", feats)
np.save(r"typesLbp.npy", types)
return (feats, types)
class HOG:
def getVecLength(self):
return 1764
def getFeature(self, imgMat):
feat = feature.hog(imgMat, orientations=9, pixels_per_cell=(
16, 16), cells_per_block=(2, 2), block_norm='L2-Hys')
feat = feat.reshape((1, -1))
feat = preprocessing.normalize(feat)
return feat
def getFeatVecs(self, imgList, load=0):
if load == 1:
feats = np.load(r"featVectHog.npy")
types = np.load(r"typesHog.npy")
return (feats, types)
feats = None
# i=0
types = np.float32([]).reshape((0, 1))
for mat, type in imgList:
# print("[hog]:"+str(i))
# i+=1
# print(mat.shape)
feat = self.getFeature(mat)
if feats is None:
feats = feat.copy()
else:
feats = np.append(feats, feat, axis=0)
types = np.append(types, | np.float32([type]) | numpy.float32 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:16:25 2015
@author: hbanks
Brevity required, prudence preferred
"""
import os
import io
import glob
import errno
import copy
import json
import time
import warnings
import numpy as np
from scipy.optimize import curve_fit
import scipy.interpolate as spi
import scipy.optimize as spo
import scipy.integrate as intgt
import scipy.fftpack as fft
import scipy.special as spl
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import itertools as itt
import multiprocessing as mp
import sys
sys.path.append('/Users/marketing/Desktop/HSG-turbo/')
import hsganalysis.QWPProcessing as qwp
from hsganalysis.QWPProcessing.extractMatrices import makeT,saveT
np.set_printoptions(linewidth=500)
# One of the main results is the HighSidebandCCD.sb_results array. These are the
# various mappings between index and real value
# I deally, this code should be converted to pandas to avoid this issue,
# but that's outside the scope of current work.
# [sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
# [ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
class sbarr(object):
SBNUM = 0
CENFREQ = 1
CENFREQERR = 2
AREA = 3
AREAERR = 4
WIDTH = 5
WIDTHERR = 6
####################
# Objects
####################
class CCD(object):
def __init__(self, fname, spectrometer_offset=None):
"""
This will read the appropriate file and make a basic CCD object. Fancier
things will be handled with the sub classes.
Creates:
self.parameters = Dictionary holding all of the information from the
data file, which comes from the JSON encoded header in the data
file
self.description = string that is the text box from data taking GUI
self.raw_data = raw data output by measurement software, wavelength vs.
data, errors. There may be text for some of the entries
corresponding to text used for Origin imports, but they
should appear as np.nan
self.ccd_data = semi-processed 1600 x 3 array of photon energy vs. data with standard error of mean at that pixel
calculated by taking multiple images. Standard error is calculated from
the data collection software
Most subclasses should make a self.proc_data, which will do whatever
processing is required to the ccd_data, such as normalizing, taking ratios,
etc.
:param fname: file name where the data is saved
:type fname: str
:param spectrometer_offset: if the spectrometer won't go where it's told, use this to correct the wavelengths (nm)
:type spectrometer_offset: float
"""
self.fname = fname
# Checking restrictions from Windows path length limits. Check if you can
# open the file:
try:
with open(fname) as f: pass
except FileNotFoundError:
# Couldn't find the file. Could be you passed the wrong one, but I'm
# finding with a large number of subfolders for polarimetry stuff,
# you end up exceeding Windows' filelength limit.
# Haven't tested on Mac or UNC moutned drives (e.g \\128.x.x.x\Sherwin\)
fname = r"\\?\\" + os.path.abspath(fname)
# Read in the JSON-formatted parameter string.
# The lines are all prepended by '#' for easy numpy importing
# so loop over all those lines
with open(fname, 'r') as f:
param_str = ''
line = f.readline()
while line[0] == '#':
### changed 09/17/18
# This line assumed there was a single '#'
# param_str += line[1:]
# while this one handles everal (because I found old files
# which had '## <text>...'
param_str += line.replace("#", "")
line = f.readline()
# Parse the JSON string
try:
self.parameters = json.loads(param_str)
except json.JSONDecodeError:
# error from _really_ old data where comments were dumped after a
# single-line json dumps
self.parameters=json.loads(param_str.splitlines()[0])
# Spec[trometer] steps are set to define the same physical data, but taken at
# different spectrometer center wavelengths. This value is used later
# for stitching these scans together
try:
self.parameters["spec_step"] = int(self.parameters["spec_step"])
except (ValueError, KeyError):
# If there isn't a spe
self.parameters["spec_step"] = 0
# Slice through 3 to get rid of comments/origin info.
# Would likely be better to check np.isnan() and slicing out those nans.
# I used flipup so that the x-axis is an increasing function of frequency
self.raw_data = np.flipud(np.genfromtxt(fname, comments='#', delimiter=',')[3:])
# The camera chip is 1600 pixels wide. This line was redudent with the [3:]
# slice above and served to make sure there weren't extra stray bad lines
# hanging around.
#
# This should also be updated some day to compensate for any horizontal bining
# on the chip, or masking out points that are bad (cosmic ray making it
# through processing, room lights or monitor lines interfering with signal)
self.ccd_data = np.array(self.raw_data[:1600, :])
# Check to see if the spectrometer offset is set. This isn't specified
# during data collection. This is a value that can be appended
# when processing if it's realized the data is offset.
# This allows the offset to be specified and kept with the data file itself,
# instead of trying to do it in individual processing scripts
#
# It's allowed as a kwarg parameter in this script for trying to determine
# what the correct offset should be
if spectrometer_offset is not None or "offset" in self.parameters:
try:
self.ccd_data[:, 0] += float(self.parameters["offset"])
except:
self.ccd_data[:, 0] += spectrometer_offset
# Convert from nm to eV
# self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
self.ccd_data[:, 0] = photon_converter["nm"]["eV"](self.ccd_data[:, 0])
class Photoluminescence(CCD):
def __init__(self, fname):
"""
This object handles PL-type data. The only distinction from the parent class
is that the CCD data gets normalized to the exposure time to make different
exposures directly comparable.
creates:
self.proc_data = self.ccd_data divided by the exposure time
units: PL counts / second
:param fname: name of the file
:type fname: str
"""
super(Photoluminescence, self).__init__(fname)
# Create a copy of the array , and then normalize the signal and the errors
# by the exposure time
self.proc_data = np.array(self.ccd_data)
self.proc_data[:, 1] = self.proc_data[:, 1] / self.parameters['exposure']
self.proc_data[:, 2] = self.proc_data[:, 2] / self.parameters['exposure']
class Absorbance(CCD):
def __init__(self, fname):
"""
There are several ways Absorbance data can be loaded
You could try to load the abs data output from data collection directly,
which has the wavelength, raw, blank and actual absorbance data itself.
This is best way to do it.
Alternatively, you could want to load the raw transmission/reference
data, ignoring (or maybe not even having) the abs calculated
from the data collection software. If you want to do it this way,
you should pass fname as a list where the first element is the
file name for the reference data, and the second is the absorbance data
At first, it didn't really seem to make sense to let you pass just the
raw reference or raw abs data,
Creates:
self.ref_data = np array of the reference,
freq (eV) vs. reference (counts)
self.raw_data = np.array of the raw absorption spectrum,
freq (eV) vs. reference (counts)
self.proc_data = np.array of the absorption spectrum
freq (eV) vs. "absorbance" (dB)
Note, the error bars for this data haven't been defined.
:param fname: either an absorbance filename, or a length 2 list of filenames
:type fname: str
:return: None
"""
if "abs_" in fname:
super(Absorbance, self).__init__(fname)
# Separate into the separate data sets
# The raw counts of the reference data
self.ref_data = np.array(self.ccd_data[:, [0, 1]])
# Raw counts of the sample
self.raw_data = np.array(self.ccd_data[:, [0, 2]])
# The calculated absorbance data (-10*log10(raw/ref))
self.proc_data = np.array(self.ccd_data[:, [0, 3]]) # Already in dB's
else:
# Should be here if you pass the reference/trans filenames
try:
super(Absorbance, self).__init__(fname[0])
self.ref_data = np.array(self.ccd_data)
super(Absorbance, self).__init__(fname[1])
self.raw_data = np.array(self.ccd_data)
except ValueError:
# ValueError gets thrown when importing older data
# which had more headers than data columns. Enforce
# only loading first two columns to avoid numpy trying
# to parse all of the data
# See CCD.__init__ for what's going on.
self.ref_data = np.flipud(np.genfromtxt(fname[0], comments='#',
delimiter=',', usecols=(0, 1)))
self.ref_data = np.array(self.ref_data[:1600, :])
self.ref_data[:, 0] = 1239.84 / self.ref_data[:, 0]
self.raw_data = np.flipud(np.genfromtxt(fname[1], comments='#',
delimiter=',', usecols=(0, 1)))
self.raw_data = np.array(self.raw_data[:1600, :])
self.raw_data[:, 0] = 1239.84 / self.raw_data[:, 0]
except Exception as e:
print("Exception opening absorbance data,", e)
# Calculate the absorbance from the raw camera counts.
self.proc_data = np.empty_like(self.ref_data)
self.proc_data[:, 0] = self.ref_data[:, 0]
self.proc_data[:, 1] = -10*np.log10(self.raw_data[:, 1] / self.ref_data[:,
1])
def abs_per_QW(self, qw_number):
"""
:param qw_number: number of quantum wells in the sample.
:type qw_number: int
:return: None
"""
"""
This method turns the absorption to the absorbance per quantum well. Is
that how this data should be reported?
Also, I'm not sure if columns 1 and 2 are correct.
"""
temp_abs = -np.log(self.proc_data[:, 1] / self.proc_data[:, 2]) / qw_number
self.proc_data = np.hstack((self.proc_data, temp_abs))
def fft_smooth(self, cutoff, inspectPlots=False):
"""
This function removes the Fabry-Perot that affects the absorption data
creates:
self.clean = np.array of the Fourier-filtered absorption data, freq (eV) vs. absorbance (dB!)
self.parameters['fourier cutoff'] = the low pass cutoff frequency, in eV**(-1)
:param cutoff: Fourier frequency of the cut off for the low pass filter
:type cutoff: int or float
:param inspectPlots: Do you want to see the results?
:type inspectPlots: bool
:return: None
"""
# self.fixed = -np.log10(abs(self.raw_data[:, 1]) / abs(self.ref_data[:, 1]))
# self.fixed = np.nan_to_num(self.proc_data[:, 1])
# self.fixed = np.column_stack((self.raw_data[:, 0], self.fixed))
self.parameters['fourier cutoff'] = cutoff
self.clean = low_pass_filter(self.proc_data[:, 0], self.proc_data[:, 1], cutoff, inspectPlots)
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This bad boy saves the absorption spectrum that has been manipulated.
Saves 100 lines of comments.
:param file_name: The base name of the file to be saved
:type file_name: str
:param folder_str: The name of the folder where the file will be saved
:type folder_str: str
:param marker: A further label that might be the series tag or something
:type marker: str
:param index: If multiple files are being saved with the same name, include an integer to append to the end of the file
:type index: int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
self.save_name = spectra_fname
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing into Origin is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
spectra_fname = 'clean ' + spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.clean, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
# class LaserLineCCD(HighSidebandCCD):
# """
# Class for use when doing alinging/testing by sending the laser
# directly into the CCD. Modifies how "sidebands" and guess and fit,
# simply looking at the max signal.
# """
# def guess_sidebands(self, cutoff=8, verbose=False, plot=False):
# pass
class NeonNoiseAnalysis(CCD):
"""
This class is used to make handling neon calibration lines easier. It's not great.
"""
def __init__(self, fname, spectrometer_offset=None):
# print 'opening', fname
super(NeonNoiseAnalysis, self).__init__(fname, spectrometer_offset=spectrometer_offset)
self.addenda = self.parameters['addenda']
self.subtrahenda = self.parameters['subtrahenda']
self.noise_and_signal()
self.process_stuff()
def noise_and_signal(self):
"""
This bad boy calculates the standard deviation of the space between the
neon lines.
The noise regions are, in nm:
high: 784-792
low1: 795-806
low2: 815-823
low3: 831-834
the peaks are located at, in nm:
#1, weak: 793.6
#2, medium: 794.3
#3, medium: 808.2
#4, weak: 825.9
#5, strong: 830.0
"""
print('\n\n')
self.ccd_data = np.flipud(self.ccd_data)
# self.high_noise_region = np.array(self.ccd_data[30:230, :])
self.high_noise_region = np.array(self.ccd_data[80:180, :]) # for dark current measurements
self.low_noise_region1 = np.array(self.ccd_data[380:700, :])
self.low_noise_region2 = np.array(self.ccd_data[950:1200, :])
self.low_noise_region3 = np.array(self.ccd_data[1446:1546, :])
# self.high_noise = np.std(self.high_noise_region[:, 1])
self.high_noise_std = np.std(self.high_noise_region[:, 1])
self.high_noise_sig = np.mean(self.high_noise_region[:, 1])
self.low_noise1 = np.std(self.low_noise_region1[:, 1])
self.low_noise2 = np.std(self.low_noise_region2[:, 1])
self.low_noise_std = np.std(self.low_noise_region2[:, 1])
self.low_noise_sig = np.mean(self.low_noise_region2[:, 1])
self.low_noise3 = np.std(self.low_noise_region3[:, 1])
# self.noise_list = [self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3]
self.peak1 = np.array(self.ccd_data[303:323, :])
self.peak2 = np.array(self.ccd_data[319:339, :])
self.peak3 = np.array(self.ccd_data[736:746, :])
self.peak4 = np.array(self.ccd_data[1268:1288, :])
self.peak5 = np.array(self.ccd_data[1381:1421, :])
temp_max = np.argmax(self.peak1[:, 1])
self.signal1 = np.sum(self.peak1[temp_max - 1:temp_max + 2, 1])
self.error1 = np.sqrt(np.sum(self.peak1[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak2[:, 1])
self.signal2 = np.sum(self.peak2[temp_max - 1:temp_max + 2, 1])
self.error2 = np.sqrt(np.sum(self.peak2[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak3[:, 1])
self.signal3 = np.sum(self.peak3[temp_max - 1:temp_max + 2, 1])
self.error3 = np.sqrt(np.sum(self.peak3[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak4[:, 1])
self.signal4 = np.sum(self.peak4[temp_max - 1:temp_max + 2, 1])
self.error4 = np.sqrt(np.sum(self.peak4[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak5[:, 1])
self.signal5 = np.sum(self.peak5[temp_max - 1:temp_max + 2, 1])
self.error5 = np.sqrt(np.sum(self.peak5[temp_max - 1:temp_max + 2, 2] ** 2))
self.signal_list = [self.signal1, self.signal2, self.signal3, self.signal4, self.signal5]
self.error_list = [self.error1, self.error2, self.error3, self.error4, self.error5]
print("Signal list:", self.signal_list)
self.ccd_data = np.flipud(self.ccd_data)
def process_stuff(self):
"""
This one puts high_noise, low_noise1, signal2, and error2 in a nice horizontal array
"""
# self.results = np.array([self.high_noise, self.low_noise1, self.signal5, self.error5])
# average = np.mean([self.low_noise1, self.low_noise2, self.low_noise3])
# self.results = np.array([self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3, self.high_noise/average])
self.results = np.array([self.high_noise_sig, self.high_noise_std, self.low_noise_sig, self.low_noise_std])
def collect_noise(neon_list, param_name, folder_name, file_name, name='Signal'):
"""
This function acts like save parameter sweep.
param_name = string that we're gonna save!
"""
# param_array = None
for elem in neon_list:
print("pname: {}".format(elem.parameters[param_name]))
print("results:", elem.results)
temp = np.insert(elem.results, 0, elem.parameters[param_name])
try:
param_array = np.row_stack((param_array, temp))
except UnboundLocalError:
param_array = np.array(temp)
if len(param_array.shape) == 1:
print("I don't think you want this file")
return
# append the relative peak error
print('\n', param_array, '\n')
param_array = np.column_stack((param_array, param_array[:, 4] / param_array[:, 3]))
# append the snr
param_array = np.column_stack((param_array, param_array[:, 3] / param_array[:, 2]))
try:
param_array = param_array[param_array[:, 0].argsort()]
except:
print("param_array shape", param_array.shape)
raise
try:
os.mkdir(folder_name)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
file_name = file_name + '.txt'
origin_import1 = param_name + ",Noise,Noise,Signal,error,rel peak error,peak signal-to-noise"
# origin_import1 = param_name + ",Noise,Noise,Noise,Noise,Ratio"
origin_import2 = ",counts,counts,counts,counts,,"
# origin_import2 = ",counts,counts,counts,,"
origin_import3 = ",High noise region,Low noise region,{},{} error,{} rel error, {}".format(name, name, name, name)
# origin_import3 = ",High noise region,Low noise region 1,Low noise region 2,Low noise region 3,High/low"
header_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# print "Spec header: ", spec_header
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_name, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_name, file_name)))
class HighSidebandCCD(CCD):
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
"""
This will read the appropriate file. The header needs to be fixed to
reflect the changes to the output header from the Andor file. Because
another helper file will do the cleaning and background subtraction,
those are no longer part of this init. This also turns all wavelengths
from nm (NIR ones) or cm-1 (THz ones) into eV.
OR, if an array is thrown in there, it'll handle the array and dict
Input:
For post-processing analysis:
hsg_thing = file name of the hsg spectrum from CCD superclass
spectrometer_offset = number of nanometers the spectrometer is off by,
should be 0.0...but can be 0.2 or 1.0
For Live-software:
hsg_thing = np array of spectrum from camera
parameter_dict = equipment dict generated by software
Internal:
self.hsg_thing = the filename
self.parameters = string with all the relevant experimental perameters
self.description = the description we added to the file as the data
was being taken
self.proc_data = processed data that has gone is frequency vs counts/pulse
self.dark_stdev = this is not currently handled appropriately
self.addenda = the list of things that have been added to the file, in
form of [constant, *spectra_added]
self.subtrahenda = the list of spectra that have been subtracted from
the file. Constant subtraction is dealt with with
self.addenda
:param hsg_thing: file name for the file to be opened. OR the actually hsg np.ndarray. Fun!
:type hsg_thing: str OR np.ndarray
:param parameter_dict: If being loaded through the data acquisition GUI, throw the dict in here
:type parameter_dict: dict
:param spectrometer_offset: Number of nm the spectrometer is off by
:type spectrometer_offset: float
:return: None, technically
"""
if isinstance(hsg_thing, str):
super(HighSidebandCCD, self).__init__(hsg_thing, spectrometer_offset=spectrometer_offset)
# TODO: fix addenda bullshit
self.addenda = []
self.subtrahenda = []
elif isinstance(hsg_thing, np.ndarray):
self.parameters = parameter_dict.copy() # Probably shouldn't shoehorn this in this way
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of ones
self.ccd_data = np.column_stack((self.ccd_data, np.ones_like(self.ccd_data[:,1])))
self.ccd_data = np.flipud(self.ccd_data) # Because turning into eV switches direction
self.fname = "Live Data"
else:
raise Exception("I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)
))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV), signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84 / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get("nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 * float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get("fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get("nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(self.parameters["pulseEnergies"]["std"])
except: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get("fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD object,
then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'], other.parameters['center_lambda']):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception('Source: Spectrum.__add__:\nThese are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other # Need to choose a name
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception('Source: Spectrum.__sub__:\nThese are not from the same grating settings')
return ret
def __repr__(self):
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(os.path.basename(self.fname),**self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq: thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
try:
error = np.array(self.proc_data[:, 2])
except IndexError:
# Happens on old data where spectra weren't calculated in the live
# software.
error = np.ones_like(x_axis)
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def guess_sidebandsOld(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
05/24/18
Old code from Hunter's days (or nearly, I've already started cleaning some
stuff up). keeping it around in case I break too much stuff
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
error = np.array(self.proc_data[:, 2])
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def fit_sidebands(self, plot=False, verbose=False):
"""
This takes self.sb_guess and fits to each maxima to get the details of
each sideband. It's really ugly, but it works. The error of the
sideband area is approximated from the data, not the curve fit. All
else is from the curve fit. Which is definitely underestimating the
error, but we don't care too much about those errors (at this point).
self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
Temporary stuff:
sb_fits = holder of the fitting results until all spectra have been fit
window = an integer that determines the "radius" of the fit window, proportional to thz_freq.
Attributes created:
self.sb_results = the money maker. Column order:
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
self.full_dict = a dictionary similar to sb_results, but now the keys
are the sideband orders. Column ordering is otherwise the same.
:param plot: Do you want to see the fits plotted with the data?
:type plot: bool
:param verbose: Do you want to see the details AND the initial guess fits?
:type verbose: bool
:return: None
"""
# print "Trying to fit these"
sb_fits = []
if verbose:
print("=" * 15)
print()
print("Fitting CCD Sidebands")
print(os.path.basename(self.fname))
print()
print("=" * 15)
# pretty sure you want this up here so things don't break
# when no sidebands found
self.full_dict = {}
thz_freq = self.parameters["thz_freq"]
window = 15 + int(15 * thz_freq / 0.0022) # Adjust the fit window based on the sideband spacing
# The 15's are based on empirical knowledge that for
# 540 GHz (2.23 meV), the best window size is 30 and
# that it seems like the window size should grow slowly?
for elem, peakIdx in enumerate(self.sb_index): # Have to do this because guess_sidebands
# doesn't out put data in the most optimized way
if peakIdx < window:
data_temp = self.proc_data[:peakIdx + window, :]
elif (1600 - peakIdx) < window:
data_temp = self.proc_data[peakIdx - window:, :]
else:
data_temp = self.proc_data[peakIdx - window:peakIdx + window, :]
width_guess = 0.0001 + 0.000001 * self.sb_list[elem] # so the width guess gets wider as order goes up
p0 = np.array([self.sb_guess[elem, 0],
self.sb_guess[elem, 1] * width_guess,
width_guess,
0.1])
# print "Let's fit this shit!"
if verbose:
print("Fitting SB {}. Peak index: {}, {}th peak in spectra".format(
self.sb_list[elem], peakIdx, elem
))
# print "\nnumber:", elem, num
# print "data_temp:", data_temp
# print "p0:", p0
print(' '*20 +"p0 = " + np.array_str(p0, precision=4))
# plot_guess = True # This is to disable plotting the guess function
if verbose and plot:
plt.figure('CCD data')
linewidth = 3
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *p0),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
try:
# 11/1/16
# needed to bump maxfev up to 2k because a sideband wasn't being fit
# Fix for sb 106
# 05-23 Loren 10nm\hsg_640_Perp352seq_spectrum.txt
coeff, var_list = curve_fit(
gauss, data_temp[:, 0], data_temp[:, 1], p0=p0, maxfev = 2000)
except Exception as e:
if verbose:
print("\tThe fit failed:")
print("\t\t", e)
print("\tFitting region: {}->{}".format(peakIdx-window, peakIdx+window))
# print "I couldn't fit", elem
# print "It's sideband", num
# print "In file", self.fname
# print "because", e
# print "wanted to fit xindx", peakIdx, "+-", window
self.sb_list[elem] = None
continue # This will ensure the rest of the loop is not run without an actual fit.
coeff[1] = abs(coeff[1]) # The amplitude could be negative if the linewidth is negative
coeff[2] = abs(coeff[2]) # The linewidth shouldn't be negative
if verbose:
print("\tFit successful: ", end=' ')
print("p = " + np.array_str(coeff, precision=4))
# print "coeffs:", coeff
# print "sigma for {}: {}".format(self.sb_list[elem], coeff[2])
if 10e-4 > coeff[2] > 10e-6:
try:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.diag(var_list)))))
except RuntimeWarning:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.abs(np.diag(var_list))))))
# the var_list wasn't approximating the error well enough, even when using sigma and absoluteSigma
# self.sb_guess[elem, 2] is the relative error as calculated by the guess_sidebands method
# coeff[1] is the area from the fit. Therefore, the product should be the absolute error
# of the integrated area of the sideband. The other errors are still underestimated.
#
# 1/12/18 note: So it looks like what hunter did is calculate an error estimate
# for the strength/area by the quadrature sum of errors of the points in the peak
# (from like 813 in guess_sidebands:
# error_est = np.sqrt(sum([i ** 2 for i in error[found_index - 1:found_index + 2]])) / (
# Where the error is what comes from the CCD by averaging 4 spectra. As far as I can tell,
# it doesn't currently pull in the dark counts or anything like that, except maybe
# indirectly since it'll cause the variations in the peaks
sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]
if verbose:
print("\tRel.Err: {:.4e} | Abs.Err: {:.4e}".format(
self.sb_guess[elem, 2], coeff[1] * self.sb_guess[elem, 2]
))
print()
# print "The rel. error guess is", self.sb_guess[elem, 2]
# print "The abs. error guess is", coeff[1] * self.sb_guess[elem, 2]
# The error from self.sb_guess[elem, 2] is a relative error
if plot and verbose:
plt.figure('CCD data')
linewidth = 5
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *coeff),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
sb_fits_temp = np.asarray(sb_fits)
reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]
# Reorder the list to put the error of the i-th parameter as the i+1th.
try:
sb_fits = sb_fits_temp[:, reorder]
# if verbose: print "The abs. error guess is", sb_fits[:, 0:5]
except:
raise RuntimeError("No sidebands to fit?")
# Going to label the appropriate row with the sideband
self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))
sb_names = np.vstack(self.sb_list)
# Sort by SB order
sorter = np.argsort(sb_fits[:, 0])
self.sb_results = np.array(sb_fits[sorter, :7])
if verbose:
print("\tsb_results:")
print("\t\t" + ("{:^5s}" + ("{:^12s}")*(self.sb_results.shape[1]-1)).format(
"SB", "Cen.En.", "", "Area", "", "Width",""))
for line in self.sb_results:
print('\t\t[' + ("{:^5.0f}"+ "{:<12.4g}"*(line.size-1)).format(*line) + ']')
print('-'*19)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def infer_frequencies(self, nir_units="wavenumber", thz_units="GHz", bad_points=-2):
"""
This guy tries to fit the results from fit_sidebands to a line to get the relevant frequencies
:param nir_units: What units do you want this to output?
:type nir_units: 'nm', 'wavenumber', 'eV', 'THz'
:param thz_units: What units do you want this to output for the THz?
:type thz_units: 'GHz', 'wavenumber', 'meV'
:param bad_points: How many more-positive order sidebands shall this ignore?
:type bad_points: int
:return: freqNIR, freqTHz, the frequencies in the appropriate units
"""
# force same units for in dict
freqNIR, freqTHz = calc_laser_frequencies(self, "wavenumber", "wavenumber", bad_points)
self.parameters["calculated NIR freq (cm-1)"] = "{}".format(freqNIR, nir_units)
self.parameters["calculated THz freq (cm-1)"] = "{}".format(freqTHz, freqTHz)
freqNIR, freqTHz = calc_laser_frequencies(self, nir_units, thz_units, bad_points)
return freqNIR, freqTHz
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # But [:, 3] is already area?
# (The old name was area)
# I think it must be amplitude
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
self.parameters['addenda'] = self.addenda
self.parameters['subtrahenda'] = self.subtrahenda
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'
origin_import_fits += '\norder,eV,,arb. u.,,meV,,arb. u.'
origin_import_fits += "\n{},,,{},,,".format(marker, marker)
fits_header = '#' + parameter_str + origin_import_fits
# print "DEBUG: in saving", folder_str, ",", spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
class HighSidebandCCDRaw(HighSidebandCCD):
"""
This class is meant for passing in an image file (currently supports a 2x1600)
Which it does all the processing on.
"""
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
# let the supers do the hard work of importing the json dict and all that jazz
super(HighSidebandCCDRaw, self).__init__(hsg_thing, parameter_dict=None, spectrometer_offset=None)
self.ccd_data = np.genfromtxt(hsg_thing, delimiter=',').T
self.proc_data = np.column_stack((
self.gen_wavelengths(self.parameters["center_lambda"], self.parameters["grating"]),
np.array(self.ccd_data[:,1], dtype=float)-np.median(self.ccd_data[:,1]),
np.ones_like(self.ccd_data[:,1], dtype=float)
))
self.proc_data[:, 0] = 1239.84 / self.proc_data[:, 0]
self.proc_data = np.flipud(self.proc_data)
@staticmethod
def gen_wavelengths(center_lambda, grating):
'''
This returns a 1600 element list of wavelengths for each pixel in the EMCCD based on grating and center wavelength
grating = which grating, 1 or 2
center = center wavelength in nanometers
'''
b = 0.75 # length of spectrometer, in m
k = -1.0 # order looking at
r = 16.0e-6 # distance between pixles on CCD
if grating == 1:
d = 1. / 1800000.
gamma = 0.213258508834
delta = 1.46389935365
elif grating == 2:
d = 1. / 1200000.
gamma = 0.207412628027
delta = 1.44998344749
elif grating == 3:
d = 1. / 600000.
gamma = 0.213428934011
delta = 1.34584754696
else:
print("What a dick, that's not a valid grating")
return None
center = center_lambda * 10 ** -9
wavelength_list = np.arange(-799.0, 801.0)
output = d * k ** (-1) * ((-1) * np.cos(delta + gamma + (-1) * np.arccos(
(-1 / 4) * (1 / np.cos((1 / 2) * gamma)) ** 2 * (
2 * (np.cos((1 / 2) * gamma) ** 4 * (2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (
1 / 2) + d ** (-1) * k * center * np.sin(gamma))) + np.arctan(
b ** (-1) * (r * wavelength_list + b * np.cos(delta + gamma)) * (1 / np.sin(delta + gamma)))) + (
1 + (-1 / 16) * (1 / np.cos((1 / 2) * gamma)) ** 4 * (2 * (
np.cos((1 / 2) * gamma) ** 4 * (
2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (1 / 2) + d ** (
-1) * k * center * np.sin(
gamma)) ** 2) ** (1 / 2))
output = (output + center) * 10 ** 9
return output
class PMT(object):
def __init__(self, file_name):
"""
Initializes a SPEX spectrum. It'll open a file, and bring in the details
of a sideband spectrum into the object. There isn't currently any reason
to use inheritance here, but it could be extended later to include PLE or
something of the sort.
attributes:
self.parameters - dictionary of important experimental parameters
this will not necessarily be the same for each
file in the object
self.fname - the current file path
:param file_name: The name of the PMT file
:type file_name: str
:return: None
"""
# print "This started"
self.fname = file_name
# self.files_included = [file_name]
with open(file_name, 'r') as f:
param_str = ''
line = f.readline() # Needed to move past the first line, which is the sideband order. Not generally useful
line = f.readline()
while line[0] == '#':
param_str += line[1:]
line = f.readline()
self.parameters = json.loads(param_str)
class HighSidebandPMT(PMT):
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
if self.parameters.get("photon counted", False):
# The scale factor for photon counting to generic
# PMT data depends on... things. It's different each
# day. Unfortunately, the overlap in dynamic range between
# the two is small, and generally only one sideband
# can been seen by both methods. I don't really have
# the motivation to automatically calculate the
# appropriate factor, so this is your reminder to find
# it yourself.
import time
# assert time.strftime("%x") == "03/15/17"
assert self.parameters.get("pc ratio", -1) != -1, self.fname
raw_temp[:,3] *= self.parameters["pc ratio"]
pass
raw_temp[:, 0] = raw_temp[:, 0] / 8065.6 # turn NIR freq into eV
self.parameters["thz_freq"] = 0.000123984 * float(
self.parameters.get("fel_lambda", -1))
self.parameters["nir_freq"] = float(
self.parameters.get("nir_lambda", -1))/8065.6
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
NOTE: This means that if both aren't equally "good" (taking a second scan with higher
gain/photon counting because you didn't see it), you need to not add the file
(remove/rename the file, etc.)
I'd love to overhall the data collection/analysis so this can be more intelligent
(Effectively offload a lot of the processing (especially not saving 10 arbitrary
points to process later) onto the live software and add sideband strengths alone,
like the CCD works. But this would be a bigger change that I can seem to find
time for).
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb] = np.row_stack(
(self.sb_dict[other.initial_sb], other.initial_data)
)
except KeyError:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
except Exception as e:
print("THIS IS THE OTHER ERROR", e)
raise
def process_sidebands(self, verbose=False, baselineCorr = False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:param baselineCorr: Whether to subtract the average across
the two endpoints
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
# temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
if baselineCorr:
x = temp[[0, -1], 0]
y = temp[[0, -1], 1]
p = np.polyfit(x, y, 1)
temp[:, 1] -= np.polyval(p, temp[:,0])
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False, cutoff=1.0, **kwargs):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
cutoff is the ratio of area/error which must be exceeded to count
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
if verbose:
print("="*15)
print()
print("Integrating PMT Sidebands")
print("Cutoff: {}".format(cutoff))
print(os.path.basename(self.fname))
print()
print("=" * 15)
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
# stroff = np.nan_to_num(sideband[1][[0,1,-2,1], 1]).sum()/4.
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("\torder: {}, area: {:.3g}, error: {:.3g}, ratio: {:.3f}".format(
sideband[0], area, error, area/error
))
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("\t\tarea < 0")
continue
elif area < cutoff/5 * error: # Two seems like a good cutoff?
if verbose:
print("\t\tI did not keep sideband")
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
if verbose:
print('-'*19)
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False, **kwargs):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:,0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results)
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index='', verbose=False):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nIndex,Center energy,error,Amplitude,error,Linewidth,error\nInt,eV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
if verbose:
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class HighSidebandPMTOld(PMT):
"""
Old version: Replaced March 01, 2017
Class initialized by loading in data set.
Multiple copies of the same sideband were stacked as raw data and combined,
effectively causing (2) 10-pt scans to be treated the same as (1) 20pt scan.
This works well until you have photon counted pulses.
"""
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb].vstack((other.initial_data))
except:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
def process_sidebands(self, verbose=False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("order", sideband[0])
print("area", area)
print("error", error)
print("ratio", area / error)
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("area less than 0", sideband[0])
continue
elif area < 1.0 * error: # Two seems like a good cutoff?
if verbose:
print("I did not keep sideband ", sideband[0])
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:, 0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results[laser_index, :])
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nCenter energy,error,Amplitude,error,Linewidth,error\neV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class TimeTrace(PMT):
"""
This class will be able to handle time traces output by the PMT softare.
"""
def __init__(self, file_path):
super(HighSidebandPMT, self).__init__(file_path)
class FullSpectrum(object):
def __init__(self):
pass
class FullAbsorbance(FullSpectrum):
"""
I'm imagining this will sew up absorption spectra, but I'm not at all sure
how to do that at the moment.
"""
def __init__(self):
pass
class FullHighSideband(FullSpectrum):
"""
I'm imagining this class is created with a base CCD file, then gobbles up
other spectra that belong with it, then grabs the PMT object to normalize
everything, assuming that PMT object exists.
"""
def __init__(self, initial_CCD_piece):
"""
Initialize a full HSG spectrum. Starts with a single CCD image, then
adds more on to itself using stitch_hsg_dicts.
Creates:
self.fname = file name of the initial_CCD_piece
self.sb_results = The sideband details from the initializing data
self.parameters = The parameter dictionary of the initializing data. May
not have all details of spectrum pieces added later.
self.full_dict = a copy of the sb_results without the zeroth column, which
is SB order
:param initial_CCD_piece: The starting part of the spectrum, often the lowest orders seen by CCD
:type initial_CCD_piece: HighSidebandCCD
:return: None
"""
self.fname = initial_CCD_piece.fname
try:
self.sb_results = initial_CCD_piece.sb_results
except AttributeError:
print(initial_CCD_piece.full_dict)
raise
self.parameters = initial_CCD_piece.parameters
self.parameters['files_here'] = [initial_CCD_piece.fname.split('/')[-1]]
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
@staticmethod
def parse_sb_array(arr):
"""
Check to make sure the first even order sideband in an array is not weaker
than the second even order. If this happens, it's likely because the SB was in
the short pass filter and isn't work counting.
We cut it out to prevent it from itnerfering with calculating overlaps
:param arr:
:return:
"""
arr = np.array(arr)
if (arr[0, sbarr.SBNUM]>0 and arr[1, sbarr.SBNUM]>0 and # make sure they're both pos
arr[0, sbarr.AREA] < arr[1, sbarr.AREA]): # and the fact the area is less
# print "REMOVING FIRST SIDEBAND FROM FULLSIDEBAND"
# print arr[0]
# print arr[1]
arr = arr[1:]
full_dict = {}
for sb in arr:
full_dict[sb[0]] = np.asarray(sb[1:])
return full_dict, arr
def add_CCD(self, ccd_object, verbose=False, force_calc=None, **kwargs):
"""
This method will be called by the stitch_hsg_results function to add another
CCD image to the spectrum.
:param ccd_object: The CCD object that will be stiched into the current FullHighSideband object
:type ccd_object: HighSidebandCCD
:return: None
"""
if self.parameters["gain"] == ccd_object.parameters["gain"]:
calc = False
else:
calc = True
if force_calc is not None:
calc = force_calc
if "need_ratio" in kwargs: #cascading it through, starting to think
# everything should be in a kwarg
calc = kwargs.pop("need_ratio")
try:
# self.full_dict = stitch_hsg_dicts(self.full_dict, ccd_object.full_dict,
# need_ratio=calc, verbose=verbose)
self.full_dict = stitch_hsg_dicts(self, ccd_object, need_ratio=calc,
verbose=verbose, **kwargs)
self.parameters['files_here'].append(ccd_object.fname.split('/')[-1])
# update sb_results, too
sb_results = [[k]+list(v) for k, v in list(self.full_dict.items())]
sb_results = np.array(sb_results)
self.sb_results = sb_results[sb_results[:,0].argsort()]
except AttributeError:
print('Error, not enough sidebands to fit here! {}, {}, {}, {}'.format(
self.parameters["series"], self.parameters["spec_step"],
ccd_object.parameters["series"], ccd_object.parameters["spec_step"]
))
def add_PMT(self, pmt_object, verbose=True):
"""
This method will be called by the stitch_hsg_results function to add the PMT
data to the spectrum.
"""
# print "I'm adding PMT once"
# self.full_dict = stitch_hsg_dicts(pmt_object.full_dict, self.full_dict,
# need_ratio=True, verbose=False)
self.full_dict = stitch_hsg_dicts(pmt_object, self,
need_ratio=True, verbose=verbose)
# if verbose:
# self.full_dict, ratio = self.full_dict
# print "I'm done adding PMT data"
self.parameters['files_here'].append(pmt_object.parameters['files included'])
self.make_results_array()
# if verbose:
# return ratio
def make_results_array(self):
"""
The idea behind this method is to create the sb_results array from the
finished full_dict dictionary.
"""
self.sb_results = None
# print "I'm making the results array:", sorted(self.full_dict.keys())
for sb in sorted(self.full_dict.keys()):
# print "Going to add this", sb
try:
self.sb_results = np.vstack((self.sb_results, np.hstack((sb, self.full_dict[sb]))))
except ValueError:
# print "It didn't exist yet!"
self.sb_results = np.hstack((sb, self.full_dict[sb]))
# print "and I made this array:", self.sb_results[:, 0]
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files, one that is self.proc_data, the other is self.sb_results
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # I'm pretty sure this is
# amplitude, not area
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
# spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
# self.save_name = spectra_fname
# self.parameters['addenda'] = self.addenda
# self.parameters['subtrahenda'] = self.subtrahenda
try:
# PMT files add unnecessary number of lines, dump it into one line
# by casting it to a string.
reduced = self.parameters.copy()
reduced["files_here"] = str(reduced["files_here"])
parameter_str = json.dumps(reduced, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
print(e)
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
# origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'+\
'\norder,eV,,arb. u.,,meV,,arb. u.\n' + ','.join([marker]*8)
fits_header = '#' + parameter_str + origin_import_fits
# np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
# header=spec_header, comments='', fmt='%f')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, fit_fname)))
class TheoryMatrix(object):
def __init__(self,ThzField,Thzomega,nir_wl,dephase,peakSplit,temp=60):
'''
This class is designed to handle everything for creating theory
matrices and comparing them to experiement.
Init defines some constants that are used throughout the calculation
and puts somethings in proper units.
Parameters:
:ThzField: Give in kV/cm.
:Thzomega: Give in Ghz.
:nir_wl: Give in nanometers.
:dephase: Dephasing, give in meV.
Should roughly be the width of absorption peaks
:detune: Detuning, give in meV.
Difference between NIR excitation and band gap
:temp: Temperature, give in K
'''
self.F = ThzField * 10**5
self.Thz_w = Thzomega * 10**9 *2*np.pi
self.nir_wl = nir_wl * 10**(-9)
self.nir_ph = .0012398/self.nir_wl #NIR PHOTON ENERGY
self.detune = 1.52 - self.nir_ph
self.peakSplit = peakSplit*1.602*10**(-22)
self.dephase = dephase*1.602*10**(-22)
self.n_ref = 0
self.iterations = 0
self.max_iter = 0
self.hbar = 1.055*10**(-34) # hbar in Js
self.temp = temp
self.kb = 8.617*10**(-5) # Boltzmann constant in eV/K
self.temp_ev = self.temp*self.kb
def mu_generator(self,gamma1,gamma2,phi,beta):
'''
Given gamma1 and gamma2 produces mu+- according to
mu+- = electron mass/(mc^-1+gamma1 -+ 2*gamma2)
Note that this formula is only accurate for THz and NIR
polarized along [010]. The general form requires gamma3 as well
Parameters:
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
Returns: mu_p, mu_m effective mass of of mu plus/minus
'''
theta = phi + np.pi/4
emass = 9.109*10**(-31) # bare electron mass in kg
m_cond = 0.0665 # Effective mass of conduction band
mu_p = emass/( 1/m_cond + gamma1 - gamma2*np.sqrt(3*np.sin(2*theta)**2+1+3*np.cos(2*theta)**2*beta**2) ) # Calculates mu_plus
mu_m = emass/( 1/m_cond + gamma1 + gamma2*np.sqrt(3*np.sin(2*theta)**2+1+3*np.cos(2*theta)**2*beta**2) ) # Calculates mu_minus
return mu_p,mu_m
def alpha_value(self,x):
'''
alpha parameter given by Qile's notes on two band model for a given x
Parameters:
:x: the argument of the calculation. Give in radians
Returns:
:alpha_val: the alpha parameter given in Qile's notes
'''
alpha_val = np.cos(x/2) - np.sin(x/2)/(x/2)
# This does the calculation. Pretty straightforward
return alpha_val
def gamma_value(self,x):
'''
gamma parameter given by Qile's notes on two band model
Parameters:
:x: Argument of the calculation. Give in radians
Returns:
:gamma_val: the gamma parameter given in Qile's notes
'''
gamma_val = np.sin(x/2)/(x/2)
# does the calculation
return gamma_val
def Up(self,mu):
'''
Calculates the ponderemotive energy
Ponderemotive energy given by
U = e^2*F_THz^2/(4*mu*w_THz^2)
Parameters:
:F: Thz field. Give in V/m
:mu: effective mass. Give in kg
:w: omega, the THz freqeuncy. Give in angular frequency.
Returns:
:u: The ponderemotive energy
'''
F = self.F
w = self.Thz_w
echarge = 1.602*10**(-19) # electron charge in Coulombs
u = echarge**(2)*F**(2)/(4*mu*w**2) # calculates the ponderemotive energy
return u
def phonon_dephase(self,n):
'''
Step function that will compare the energy gained by the sideband to the
energy of the phonon (36.6meV). If the energy is less than the phonon,
return zero. If it's more return the scattering rate as determined by
Yu and Cordana Eq 5.51
This really should be treated as a full integral, but whatever
'''
thz_omega = self.Thz_w
hbar = self.hbar
thz_ev = n*hbar*thz_omega/(1.602*10**-19) # converts to eV
phonon_ev = 36.6*10**(-3) # phonon energy in Vv
emass = 9.109*10**(-31) # bare electron mass in kg
m_cond = 0.0665 # Effective mass of conduction band
m_eff = emass*m_cond
phonon_n = 1/(np.exp(phonon_ev/self.temp_ev)-1)
if thz_ev<phonon_ev:
# print('No phonon for order',n)
return 0
else:
W0 = 7.7*10**12 # characteristic rate
rate_frac = phonon_n*np.sqrt((thz_ev+phonon_ev)/thz_ev)+(
phonon_n+1)*np.sqrt((thz_ev-phonon_ev)/thz_ev)+(
phonon_ev/thz_ev)*(-phonon_n*np.arcsinh(np.sqrt(
phonon_ev/thz_ev))+(phonon_n+1)*np.arcsinh(np.sqrt(
(thz_ev-phonon_ev)/thz_ev)))
# Got this from Yu and Cordana's book
fullW = W0*rate_frac
return fullW
def integrand(self,x,mu,n):
'''
Calculate the integrand to integrate A_n+- in two_band_model pdf eqn 13.
Given in the new doc pdf from Qile as I_d^(2n)
Parameters:
:x: Argument of integrand equal to omega*t. This is the variable integrated
over.
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:w: Frequency of THz in radians.
:F: Thz field in V/m
:mu: reduced mass give in kg
:n: Order of the sideband
Returns:
:result: The value of the integrand for a given x value
'''
hbar = self.hbar
F = self.F
w = self.Thz_w
dephase = self.dephase
detune = self.detune
pn_dephase = self.phonon_dephase(n)
exp_arg = (-dephase*x/(hbar*w)-pn_dephase*x/w + 1j*x*self.Up(mu)/(hbar*w)*(self.gamma_value(x)**2-1)+1j*n*x/2-1j*detune*x/(hbar*w))
# Argument of the exponential part of the integrand
bessel_arg = x*self.Up(mu)*self.alpha_value(x)*self.gamma_value(x)/(hbar*w)
# Argument of the bessel function
bessel = spl.jv(n/2,bessel_arg)
# calculates the J_n(bessel_arg) bessel function
result = np.exp(exp_arg)*bessel/x
# This is the integrand for a given x
return result
def Qintegrand(self,x,mu,n):
'''
Calculate the integrand in the expression for Q, with the simplification
that the canonical momentum is zero upon exciton pair creation.
Parameters:
:x: integration variable of dimensionless units. Equal to omega*tau
:dephase: dephasing rate of the electron hole pair as it is accelerated by
the THz field
:w: Frequency of THz is radiams
:F: THz field in V/m
:mu: the effective reduced mass of the electron-hole pair
:n: Order of the sideband
'''
hbar = self.hbar
F = self.F
w = self.Thz_w
dephase = self.dephase
pn_detune = self.phonon_dephase(n)
c0 = 2*(x-np.sin(x))
a = 3*np.sin(2*x)-4*np.sin(w*x)-2*w*x*np.cos(2*x)
b = -3*np.cos(2*w*x)-4*np.cos(x)+2*w*x*np.sin(2*x)+1
c1 = np.sign(a)*np.sqrt(a**2+b**2)
phi = np.arctan2(a,b)
exp_arg = -dephase*x/w-1j*pn_detune*x/w + 1j*(self.Up(mu)*x)/(hbar*w**2)*c0 -1j*n*phi
bessel_arg = self.Up(mu)/(hbar*w)*c1
bessel = spl.jv(n,bessel_arg)
result = np.exp(exp_arg)*bessel*(-1)**(n/2)
return result
def scale_J_n_T(self,Jraw,Jxx,observedSidebands,crystalAngle,saveFileName,
index, save_results=True, scale_to_i=True):
'''
This function takes the raw J from fan_n_Tmat or findJ and scales it with
Jxx found from scaling sideband strengths with the laser line/PMT
In regular processing we actually find all the matrices normalized to Jxx
Now can scale to a given sideband order.
This is to allow comparision between the measured sideband powers,
normalized by the PMT, to the evalueated Path Integral from the two band
model. By normalizing the measured values and integrals to a given
sideband index, we can remove the physical constants from the evaluation.
:param Jraw: set of matrices from findJ
:param Jxx: sb_results from PMT and CCD data
:param observedSidebands: np array of observed sidebands. Data will be
cropped such that these sidebands are included in everything.
:param crystalAngle: (Float) Angle of the sample from the 010 crystal face
:saveFileName: Str of what you want to call the text files to be saved
:save_results: Boolean controls if things are saved to txt files.
Currently saves scaled J and T
:param index: the sideband index to which we want to normalize.
:param saveFileName: Str of what you want to call the text files to be saved.
:param scale_to_i: Boolean that controls to normalize to the ith sideband
True -> Scale to ith | False -> scale to laser line
returns: scaledJ, scaledT matrices scaled by Jxx strengths
'''
# Initialize the array for scaling
Jxx_scales = np.array([ ])
self.n_ref = index
if scale_to_i:
for idx in np.arange(len(Jxx[:,0])):
if Jxx[idx,0] == index:
scale_to = Jxx[idx,3]
print('scale to:',scale_to)
# sets the scale_to to be Jxx for the ith sideband
else:
scale_to = 1 # just makes this 1 if you don't want to scale to i
scaledJ = Jraw # initialize the scaled J matrix
for idx in np.arange(len(Jxx[:,0])):
if Jxx[idx,0] in observedSidebands:
Jxx_scales = np.append(Jxx_scales,Jxx[idx,3]/scale_to)
print('Scaling sb order',Jxx[idx,0])
# Creates scaling factor
for idx in np.arange(len(Jxx_scales)):
scaledJ[:,:,idx] = Jraw[:,:,idx]*Jxx_scales[idx]
# For each sideband scales Jraw by Jxx_scales
scaledT = makeT(scaledJ,crystalAngle)
# Makes scaledT from our new scaledJ
if save_results:
saveT(scaledJ, observedSidebands, "{}_scaledJMatrix.txt".format(saveFileName))
saveT(scaledT, observedSidebands, "{}_scaledTMatrix.txt".format(saveFileName))
# Saves the matrices
return scaledJ, scaledT
def Q_normalized_integrals(self,gamma1,gamma2,n,phi,beta):
'''
Returns Q_n^{HH}/Q_n^{LH} == Integrand_n^{HH}/Integrand_n^{LH}
Unlike the normallized integrals used in early 2020 analysis, these integrals are of a
given Fourier component's intensity from either the HH or LH band, and thus there is no
prefactor related to the energy of the given sideband photon
Parameters:
:dephase: dephasing rate passed to intiallized TMAtrix object
:w: the frequency of the THz field, in GHz
:F: THz field strength in V/m
:gamma1: Gamma1 parameter from Luttinger Hamiltonian
:gamma2: Gamma2 parameter from Luttinger Hamiltonian
:n: Order of the sideband for this integral
:phi: [100] to THz orientation, passed from the cost function funciton (in radians)
:beta: experimentally measured g3/g2 ratio
Returns: QRatio, the ratio of Q_n^{HH}/Q_n^{LH}
'''
mu_p,mu_m = self.mu_generator(gamma1,gamma2,phi,beta)
w = self.Thz_w
hbar = self.hbar
detune = self.detune
U_pp = self.Up(mu_p)
U_pm = self.Up(mu_m)
int_cutoff_HH = ((n*hbar*w-detune)/(8*U_pp))**(1/4)
int_cutoff_LH = ((n*hbar*w-detune)/(8*U_pm))**(1/4)
# Because the integral is complex, the real and imaginary parts have to be
# counted seperatly.
re_Q_HH = intgt.quad(lambda x: self.Qintegrand(x,mu_p,n),
0,int_cutoff_HH)[0]
re_Q_LH = intgt.quad(lambda x: self.Qintegrand(x,mu_m,n),
0,int_cutoff_LH)[0]
im_Q_HH = intgt.quad(lambda x: self.Qintegrand(x,mu_p,n),
0,int_cutoff_HH)[1]
im_Q_LH = intgt.quad(lambda x: self.Qintegrand(x,mu_m,n),
0,int_cutoff_LH)[1]
# Combine the real and imaginary to have the full integral
QRatioRe = re_Q_HH/re_Q_LH
QRatioIm = im_Q_HH/im_Q_LH
return QRatioRe, QRatioIm
def normalized_integrals(self,gamma1,gamma2,n,n_ref,phi,beta):
'''
Returns the plus and minus eta for a given sideband order, normalized
to order n_ref (should probably be 10?). This whole calculation relies
on calculating the ratio of these quantities to get rid of some troubling
constants. So you need a reference integral.
eta(n)+- =
(w_nir + 2*n*w_thz)^2/(w_nir + 2*n_ref*w_thz)^2 *
(mu_+-/mu_ref)^2 * (int(n)+-)^2/(int(n_ref)+)^2
This takes gamma1 and gamma2 and gives the effective mass via mu_generator.
It then calculates the normalized integrals for both mu's and gives eta,
which is the integrals squared with some prefactors.
Then you feed this into a cost function that varies gamma1 and gamma2.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency in GHz of fel. DO NOT give in angular form, the code
does that for you.
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
Returns: eta_p, eta_m the values of the eta parameter normalized to the
appropriate sideband order for plus and minus values of mu.
'''
mu_p,mu_m = self.mu_generator(gamma1,gamma2,phi,beta)
# gets the plus/minus effective mass
omega_thz = self.Thz_w # FEL frequency
omega_nir = 2.998*10**8/(self.nir_wl) *2*np.pi
# NIR frequency, takes nm (wavelength) and gives angular Hz
Field = self.F # THz field
hbar = self.hbar
dephase = self.dephase
int_cutoff = hbar*omega_thz/dephase*10
# This cuts off the integral when x* dephase/hbaromega = 10
# Therefore the values of the integrand will be reduced by a value
# of e^(-10) which is about 4.5*10^(-5)
re_int_ref = intgt.quad(lambda x: np.real(self.integrand(
x,mu_p,n_ref)),0,int_cutoff,limit = 1000000)[0]
re_int_p = intgt.quad(lambda x: np.real(self.integrand(
x,mu_p,n)),0,int_cutoff,limit = 1000000)[0]
re_int_m = intgt.quad(lambda x: np.real(self.integrand(
x,mu_m,n)),0,int_cutoff,limit = 1000000)[0]
# Ok so these integrands are complex valued, but the intgt.quad integration
# does not work with that. So we split the integral up into two parts,
# real and imaginary parts. These lines calculate the real part for the
# reference, plus, and minus integrals.
# The integrals currently are limited to 10,000 iterations. No clue if that's
# a good amount or what. We could potentially make this simpler by doing
# a trapezoidal rule.
# We define the lambda function here to set all the values of the integrand
# function we want except for the variable of integration x
im_int_ref = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_p,n_ref)),0,int_cutoff,limit = 1000000)[0]
im_int_p = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_p,n)),0,int_cutoff,limit = 1000000)[0]
im_int_m = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_m,n)),0,int_cutoff,limit = 1000000)[0]
# Same as above but these are the imaginary parts of the integrals.
int_ref = re_int_ref + 1j*im_int_ref
int_p = re_int_p + 1j*im_int_p
int_m = re_int_m + 1j*im_int_m
# All the king's horses and all the king's men putting together our integrals
# again. :)
prefactor = ((omega_nir +2*n*omega_thz)**2)/((omega_nir +2*n_ref*omega_thz)**2)
# This prefactor is the ratio of energy of the nth sideband to the reference
m_pre = (mu_m/mu_p)**2
# There is a term of mu/mu_ref in the eta expression. For the
eta_p = prefactor*(np.abs(int_p)**2)/(np.abs(int_ref)**2)
eta_m = prefactor*m_pre*(np.abs(int_m)**2)/(np.abs(int_ref)**2)
# Putting everthing together in one tasty little result
return eta_p,eta_m
def cost_func(self,gamma1,gamma2,observedSidebands,n_ref,Jexp,phi,beta,gc_fname,eta_folder):
'''
This will sum up a cost function that takes the difference between
the theory generated eta's and experimental scaled matrices
eta+/eta+_ref = |Jxx|^2
eta-/eta+_ref = |Jyy-Jxx/4|^2/|3/4|^2
The cost function is given as
Sqrt(|eta+(theory)-eta+(experiment)|^2 + |eta-(theory)-eta-(experiment)|^2)
Where the J elements have been scaled to the n_ref sideband (Jxx_nref)
This is designed to run over and over again as you try different
gamma values. On my (Joe) lab computer a single run takes ~300-400 sec.
The function keeps track of values by writing a file with iteration,
gamma1, gamma2, and cost for each run. This lets you keep track of the
results as you run.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength in kV/cm
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n_ref: Order of the reference integral which everything will be divided by
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
:gc_fname: File name for the gammas and cost results
:eta_folder: Folder name for the eta lists to go in
:i: itteration, for parallel processing output purposes
Returns:
:costs: Cumulative cost function for that run
:i: itteration, for parallel processing output purposes
:eta_list: list of eta for's for each sideband order of the form
sb order | eta_plus theory | eta_plus experiment | eta_minus thoery | eta_minus experiment
.
.
.
'''
costs = 0 # initialize the costs for this run
t_start = time.time() # keeps track of the time the run started.
eta_list = np.array([0,0,0,0,0])
dephase = self.dephase
lambda_nir = self.nir_wl
omega_nir = 2.998*10**8/(self.nir_wl) *2*np.pi
w_thz = self.Thz_w
F = self.F
for idx in np.arrange(len(observedSidebands)):
n = observedSidebands[idx]
eta_p,eta_m = self.normalized_integrals(gamma1,gamma2,n,n_ref,phi,beta)
# calculates eta from the normalized_integrals function
prefactor = ((omega_nir +2*n*w_thz)**2)/((omega_nir +2*n_ref*w_thz)**2)
#Have to hard code the index of the 16th order sideband (8,10,12,14,16)
exp_p = prefactor*np.abs(Jexp[0,0,idx])**2
exp_m = prefactor*np.abs(Jexp[1,1,idx]-(1/4)*Jexp[0,0,idx])**2*(9/16)
# calculates the experimental plus and minus values
# 1/9/20 added prefactor to these bad boys
costs += np.sqrt(np.abs((exp_p-eta_p)/(exp_p))**2 + np.abs((exp_m-eta_m)/(exp_m))**2)
# Adds the cost function for this sideband to the overall cost function
# 1/8/20 Changed cost function to be the diiference of the ratio of the two etas
# 01/30/20 Changed cost function to be relative difference of eta_pm
this_etas = np.array([n,eta_p,exp_p,eta_m,exp_m])
eta_list = np.vstack((eta_list,this_etas))
self.iterations += 1
# Ups the iterations counter
g1rnd = round(gamma1,3)
g2rnd = round(gamma2,3)
costs_rnd = round(costs,5)
# Round gamma1,gamma2,costs to remove float rounding bullshit
g_n_c = str(self.iterations)+','+str(g1rnd)+','+str(g2rnd)+','+str(costs)+'\n'
# String version of iteration, gamma1, gamma2, cost with a new line
gc_file = open(gc_fname,'a') #opens the gamma/cost file in append mode
gc_file.write(g_n_c) # writes the new line to the file
gc_file.close() # closes the file
etas_header = "#\n"*95
etas_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
etas_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
etas_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
etas_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
etas_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
etas_header += 'sb order, eta_plus theory, eta_plus experiment, eta_minus thoery, eta_minus experiment \n'
etas_header += 'unitless, unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for the eta's
# eta_fname = 'eta_g1_' + str(g1rnd) + '_g2_' + str(g2rnd) + r'.txt'
eta_fname = f'eta_g1_{g1rnd}_g2_{g2rnd}.txt'
eta_path = os.path.join(eta_folder,eta_fname)
#creates the file for this run of etas
eta_list = eta_list[1:,:]
np.savetxt(eta_path,eta_list, delimiter = ',',
header = etas_header, comments = '') #save the etas for these gammas
t_taken = round(time.time()-t_start,5) # calcuates time taken for this run
print(" ")
print("---------------------------------------------------------------------")
print(" ")
print(f'Iteration number {self.iterations} / {self.max_iter} done')
print('for gamma1, gamma2 = ',g1rnd,g2rnd)
print('Cost function is = ',costs_rnd)
print('This calculation took ',t_taken,' seconds')
print(" ")
print("---------------------------------------------------------------------")
print(" ")
# These print statements help you keep track of what's going on as this
# goes on and on and on.
return costs
def Q_cost_func(self,gamma1,gamma2,Gamma_Sidebands,Texp,crystalAngles,
beta,gc_fname,Q_folder,ThetaSweep = True):
'''
This compairs the T Matrix components measured by experiment to the
'''
costs = 0 # Initialize the costs
imcost = 0
recost = 0
t_start = time.time()
Q_list = np.array([0,0,0,0,0])
if ThetaSweep:
for idx in np.arange(len(crystalAngles)):
n = Gamma_Sidebands
phi = float(crystalAngles[idx])
phi_rad = phi*np.pi/180
theta = phi_rad + np.pi/4
#Calculate the Theoretical Q Ratio
QRatioRe, QRatioIm = self.Q_normalized_integrals(gamma1,gamma2,n,phi_rad,beta)
QRatio = QRatioRe + 1j*QRatioIm
#Prefactor for experimental T Matirx algebra
PHI = 5/(3*(np.sin(2*theta) - 1j*beta*np.cos(2*theta)))
THETA = 1/(np.sin(2*theta)-1j*beta*np.cos(2*theta))
ExpQ = (Texp[idx,0,0]+PHI*Texp[idx,0,1])/(Texp[idx,0,0]-THETA*Texp[idx,0,1])
costs += np.abs((ExpQ - QRatio)/QRatio)
imcost += np.abs((np.imag(ExpQ)-QRatioIm)/QRatioIm)
recost += np.abs((np.real(ExpQ)-QRatioRe)/QRatioRe)
this_Qs = np.array([phi,np.real(ExpQ),np.imag(ExpQ),QRatioRe,QRatioIm])
Q_list = np.vstack((Q_list,this_Qs))
else:
for idx in np.arange(len(Gamma_Sidebands)):
n = Gamma_Sidebands[idx]
phi = float(crystalAngles)
phi_rad = phi*np.pi/180
theta = phi_rad + np.pi/4
#Calculate the Theoretical Q Ratio
QRatioRe, QRatioIm = self.Q_normalized_integrals(gamma1,gamma2,n,phi_rad,beta)
QRatio = QRatioRe + 1j*QRatioIm
#Prefactor for experimental T Matirx algebra
PHI = 5/(3*(np.sin(2*theta) - 1j*beta*np.cos(2*theta)))
THETA = 1/(np.sin(2*theta)-1j*beta*np.cos(2*theta))
ExpQ = (Texp[0,0,idx]+PHI*Texp[0,1,idx])/(Texp[0,0,idx]-THETA*Texp[0,1,idx])
costs += np.abs((ExpQ - QRatio)/QRatio)
imcost += np.abs((np.imag(ExpQ)-QRatioIm)/QRatioIm)
recost += np.abs((np.real(ExpQ)-QRatioRe)/QRatioRe)
this_Qs = np.array([n,np.real(ExpQ),np.imag(ExpQ),QRatioRe,QRatioIm])
Q_list = np.vstack((Q_list,this_Qs))
self.iterations += 1
g1rnd = round(gamma1,3)
g2rnd = round(gamma2,3)
costs_rnd = round(costs,5)
imcost_rnd = round(imcost,5)
recost_rnd = round(recost,5)
g_n_c = str(self.iterations) + ',' + str(g1rnd) + ',' + str(g2rnd) + ',' + str(costs) + ',' + str(imcost) + ',' + str(recost) + '\n'
gc_file = open(gc_fname,'a')
gc_file.write(g_n_c)
gc_file.close()
# Origin Header
Q_header = "#\n"*94
Q_header += f'# Crystal Angle: {phi} Deg \n'
Q_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
Q_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
Q_header += f'# Feild Strength: {self.F/(10**5)} kV/cm \n'
Q_header += f'# THz Frequncy {self.Thz_w/(10**9 *2*np.pi)} GHz \n'
Q_header += f'# NIR Wavelength {self.nir_wl/(10**(-9))} nm \n'
Q_header += 'Crystal Angles, QRatio Experiment Real, Imaginary, QRatio Theory Real, Imaginary\n'
Q_header += 'Degrees, unitless, unitless \n'
#Eta File Name
Q_fname = f'Q_g1_{g1rnd}_g2_{g2rnd}.txt'
Q_path = os.path.join(Q_folder,Q_fname)
Q_list = Q_list[1:,:]
np.savetxt(Q_path,Q_list, delimiter = ',',
header = Q_header, comments = '')
t_taken = round(time.time() - t_start,5)
print(" ")
print("---------------------------------------------------------------------")
print(" ")
print(f'Iteration number {self.iterations} / {self.max_iter} done')
print('for gamma1, gamma2 = ',g1rnd,g2rnd)
print('Cost function is = ',costs_rnd)
print('Imaginary Cost function is =',imcost_rnd)
print('Real Cost function is =',recost_rnd)
print('This calculation took ',t_taken,' seconds')
print(" ")
print("---------------------------------------------------------------------")
print(" ")
return costs,imcost,recost
def gamma_sweep(self,gamma1_array,gamma2_array,observedSidebands,n_ref,
Jexp,crystalAngle,gc_fname,eta_folder,save_results = True):
'''
This function calculates the integrals and cost function for an array of
gamma1 and gamma2. You can pass any array of gamma1 and gamma2 values and
this will return the costs for all those values. Let's you avoid the
weirdness of fitting algorithims.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:observedSidebands: List or array of observed sidebands. The code will
loop over sidebands in this array.
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:gc_fname: File name for the gammas and cost functions, include .txt
:eta_folder: Folder name for the eta lists to go in
Returns: gamma_cost_array of form
gamma1 | gamma2 | cost |
. . .
. . .
. . .
This is just running cost_func over and over again essentially.
'''
dephase = self.dephase
lambda_nir = self.nir_wl
w_thz = self.Thz_w
F = self.F
phi = crystalAngle
self.max_iter = len(gamma1_array)*len(gamma2_array)
self.iterations = 0
gamma_cost_array = np.array([0,0,0])
# Initialize the gamma cost array
gammas_costs = np.array([])
# This is just for initializing the gamma costs file
gammacosts_header = "#\n"*95
gammacosts_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
gammacosts_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
gammacosts_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
gammacosts_header += 'Iteration, Gamma1, Gamma2, Cost Function \n'
gammacosts_header += 'unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for gamma costs
np.savetxt(gc_fname, gammas_costs, delimiter = ',',
header = gammacosts_header, comments = '')
# create the gamma cost file
data = [gamma1_array,gamma2_array]
for gamma1 in gamma1_array:
for gamma2 in gamma2_array:
cost = self.cost_func(gamma1,gamma2,observedSidebands,
n_ref,Jexp, phi, 1.42, gc_fname,eta_folder)
this_costngamma = np.array([gamma1,gamma2,cost])
gamma_cost_array = np.vstack((gamma_cost_array,this_costngamma))
# calculates the cost for each gamma1/2 and adds the gamma1, gamma2,
# and cost to the overall array.
# gamma_cost_array = gamma_cost_final[1:,:]
# if save_results:
# sweepcosts_header = "#\n"*100
# sweepcosts_header += 'Gamma1, Gamma2, Cost Function \n'
# sweepcosts_header += 'unitless, unitless, unitless \n'
#
# sweep_name = 'sweep_costs_' + gc_fname
# np.savetxt(sweep_name,gamma_cost_array,delimiter = ',',
# header = sweepcosts_header, comments = '')
# Ok so right now I think I am going to get rid of saving this file
# since it has the same information as the file that is saved in
# cost_func but that file is updated every interation where this
# one only works at the end. So if the program gets interrupted
# the other one will still give you some information.
return gamma_cost_array
def gamma_th_sweep(self,gamma1_array,gamma2_array,n,crystalAngles,
Texp,gc_fname,Q_folder,ThetaSweep = True, save_results = True):
'''
This function calculates the integrals and cost function for an array of
gamma1 and gamma2. You can pass any array of gamma1 and gamma2 values and
this will return the costs for all those values. Let's you avoid the
weirdness of fitting algorithims.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:observedSidebands: List or array of observed sidebands. The code will
loop over sidebands in this array.
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:gc_fname: File name for the gammas and cost functions, include .txt
:eta_folder: Folder name for the eta lists to go in
Returns: gamma_cost_array of form
gamma1 | gamma2 | cost |
. . .
. . .
. . .
This is just running cost_func over and over again essentially.
'''
#Hard Coding the experimental g3/g2 factor
beta = 1.42
self.iterations = 0
self.max_iter = len(gamma1_array)*len(gamma2_array)
gamma_cost_array = np.array([0,0,0,0,0])
# Initialize the gamma cost array
gammas_costs = np.array([])
# This is just for initializing the gamma costs file
gammacosts_header = "#\n"*95
gammacosts_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
gammacosts_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
gammacosts_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
gammacosts_header += 'Iteration, Gamma1, Gamma2, Cost Function, Imaginary, Real \n'
gammacosts_header += 'unitless, unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for gamma costs
np.savetxt(gc_fname, gammas_costs, delimiter = ',',
header = gammacosts_header, comments = '')
# create the gamma cost file
for gamma1 in gamma1_array:
for gamma2 in gamma2_array:
cost,imcost,recost = self.Q_cost_func(gamma1,gamma2,n,
Texp,crystalAngles,beta,gc_fname,Q_folder,ThetaSweep)
this_costngamma = np.array([gamma1,gamma2,cost,imcost,recost])
gamma_cost_array = np.vstack((gamma_cost_array,this_costngamma))
# calculates the cost for each gamma1/2 and adds the gamma1, gamma2,
# and cost to the overall array.
return gamma_cost_array
####################
# Fitting functions
####################
def gauss(x, *p):
"""
Gaussian fit function.
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, y offset] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0 = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0
def lingauss(x, *p):
"""
Gaussian fit function with a linear offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0, m = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0 + m * x
def lorentzian(x, *p):
"""
Lorentzian fit with constant offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, gamma, y0 = p
return (A / np.pi) * (gamma / ((x - mu) ** 2 + gamma ** 2)) + y0
def background(x, *p):
"""
Arbitrary pink-noise model background data for absorbance FFT
for the intention of replacing a peak in the FFT
with the background
:param x: The independent variable
:type x: np.array, or int or float
:param p: [proportionality factor, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
a, b = p
return a * (1 / x) ** b
def gaussWithBackground(x, *p):
"""
Gaussian with pink-noise background function
:param x: independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant background, proportionality of power law, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
pGauss = p[:4]
a, b = p[4:]
return gauss(x, *pGauss) + background(x, a, b)
####################
# Collection functions
####################
def hsg_combine_spectra(spectra_list, verbose = False, **kwargs):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: List of HighSidebandCCD objects
kwargs gets passed onto add_item
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
good_list = []
spectra_list = spectra_list.copy()
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each series' spec step
# This allows you to combine spectra whose spec steps
# change by values other than 1 (2, if you skip, or 0.5 if you
# decide to insert things, or arbitary strings)
spec_steps = {}
for elem in spectra_list:
# if verbose:
# print "Spec_step is", elem.parameters["spec_step"]
current_steps = spec_steps.get(elem.parameters["series"], [])
current_steps.append(elem.parameters["spec_step"])
spec_steps[elem.parameters["series"]] = current_steps
if verbose:
print("I found these spec steps for each series:")
print("\n\t".join("{}: {}".format(*ii) for ii in spec_steps.items()))
# sort the list of spec steps
for series in spec_steps:
spec_steps[series].sort()
same_freq = lambda x,y: x.parameters["fel_lambda"] == y.parameters["fel_lambda"]
for index in range(len(spectra_list)):
try:
temp = spectra_list.pop(0)
if verbose:
print("\nStarting with this guy", temp, "\n")
except:
break
good_list.append(FullHighSideband(temp))
counter = 1
temp_list = list(spectra_list)
for piece in temp_list:
if verbose:
print("\tchecking this spec_step", piece.parameters["spec_step"], end=' ')
print(", the counter is", counter)
if not same_freq(piece, temp):
if verbose:
print("\t\tnot the same fel frequencies ({} vs {})".format(piece.parameters["fel_lambda"], temp.parameters["fel_lambda"]))
continue
if temp.parameters["series"] == piece.parameters["series"]:
if piece.parameters["spec_step"] == spec_steps[temp.parameters["series"]][counter]:
if verbose:
print("I found this one", piece)
counter += 1
good_list[-1].add_CCD(piece, verbose=verbose, **kwargs)
spectra_list.remove(piece)
else:
print("\t\tNot the right spec step?", type(piece.parameters["spec_step"]))
else:
if verbose:
print("\t\tNot the same series ({} vs {}".format(
piece.parameters["series"],temp.parameters["series"]))
good_list[-1].make_results_array()
return good_list
def hsg_combine_spectra_arb_param(spectra_list, param_name="series", verbose = False):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
This is different than hsg_combine_spectra in that you pass which
criteria distinguishes the files to be the "same". Since it can be any arbitrary
value, things won't be exactly the same (field strength will never be identical
between images). It will start with the first (lowest) spec step, then compare the
number of images in the next step. Whichever has
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: list of HighSidebandCCD
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
if not spectra_list:
raise RuntimeError("Passed an empty spectra list!")
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name)
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
param_name = param_name[0]
elif isinstance(spectra_list[0].parameters[param_name], dict):
paramGetter = lambda x: x.parameters[param_name]["mean"]
else:
paramGetter = lambda x: x.parameters[param_name]
good_list = []
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each spec step.
spec_steps = {}
for elem in spectra_list:
if verbose:
print("Spec_step is", elem.parameters["spec_step"])
current_steps = spec_steps.get(elem.parameters["spec_step"], [])
current_steps.append(elem)
spec_steps[elem.parameters["spec_step"]] = current_steps
# Next, loop over all of the elements. For each element, if it has not
# already been added to a spectra, look at all of the combinations from
# other spec steps to figure out which has the smallest overall deviation
# to make a new full spectrum
good_list = []
already_added = set()
for elem in spectra_list:
if elem in already_added: continue
already_added.add(elem)
good_list.append(FullHighSideband(elem))
other_spec_steps = [v for k, v in list(spec_steps.items()) if
k != good_list[-1].parameters["spec_step"]]
min_distance = np.inf
cur_value = paramGetter(good_list[-1])
best_match = None
for comb in itt.product(*other_spec_steps):
new_values = list(map(paramGetter, comb))
all_values = new_values + [cur_value]
if np.std(all_values) < min_distance:
min_distance = np.std(all_values)
best_match = list(comb)
if best_match is None:
raise RuntimeError("No matches found. Empty lists passed?")
best_values = list(map(paramGetter, best_match))
for spec in best_match:
print("Adding new spec step\n\tStarted with spec={},series={}".format(
good_list[-1].parameters["spec_step"],good_list[-1].parameters["series"]
))
print("\tAdding with spec={},series={}\n".format(
spec.parameters["spec_step"],
spec.parameters["series"]
))
print("\n\nfirst SBs:\n", good_list[-1].sb_results)
print("\n\nsecond SBs:\n", spec.sb_results)
good_list[-1].add_CCD(spec, True)
print("\n\nEnding SBs:\n", good_list[-1].sb_results)
already_added.add(spec)
best_match.append(good_list[-1])
best_values.append(cur_value)
new_value = np.mean(best_values)
new_std = np.std(best_values)
if isinstance(good_list[-1].parameters[param_name], dict):
best_values = np.array([x.parameters[param_name]["mean"] for x in best_match])
best_std = np.array([x.parameters[param_name]["std"] for x in best_match])
new_value = np.average(best_values, weights = best_std)
new_std = np.sqrt(np.average((best_values-new_value)**2, weights=best_std))
good_list[-1].parameters[param_name] = {
"mean": new_value,
"std": new_std
}
return good_list
def pmt_sorter(folder_path, plot_individual = True):
"""
This function will be fed a folder with a bunch of PMT data files in it.
The folder should contain a bunch of spectra with at least one sideband in
them, each differing by the series entry in the parameters dictionary.
This function will return a list of HighSidebandPMT objects.
:param folder_path: Path to a folder containing a bunch of PMT data, can be
part of a parameter sweep
:type folder_path: str
:param plot_individual: Whether to plot each sideband itself
:return: A list of all the possible hsg pmt spectra, organized by series tag
:rtype: list of HighSidebandPMT
"""
file_list = glob.glob(os.path.join(folder_path, '*[0-9].txt'))
pmt_list = []
plot_sb = lambda x: None
if plot_individual:
plt.figure("PMT data")
def plot_sb(spec):
spec = copy.deepcopy(spec)
spec.process_sidebands()
elem = spec.sb_dict[spec.initial_sb]
plt.errorbar(elem[:, 0], elem[:, 1], elem[:, 2],
marker='o',
label="{} {}, {}.{} ".format(
spec.parameters["series"], spec.initial_sb,
spec.parameters["pm_hv"],
't' if spec.parameters.get("photon counted", False) else 'f')
)
for sb_file in file_list:
temp = HighSidebandPMT(sb_file)
plot_sb(temp)
try:
for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object
if temp.parameters['series'] == pmt_spectrum.parameters['series']:
pmt_spectrum.add_sideband(temp)
break
else: # this will execute IF the break was NOT called
pmt_list.append(temp)
except:
pmt_list.append(temp)
# for sb_file in file_list:
# with open(sb_file,'rU') as f:
# param_str = ''
# line = f.readline()
# line = f.readline()
# while line[0] == '#':
# param_str += line[1:]
# line = f.readline()
#
# parameters = json.loads(param_str)
# try:
# for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object?
# if parameters['series'] == pmt_spectrum.parameters['series']:
# pmt_spectrum.add_sideband(sb_file)
# break
# else: # this will execute IF the break was NOT called
# pmt_list.append(HighSidebandPMT(sb_file))
# except:
# pmt_list.append(HighSidebandPMT(sb_file))
for pmt_spectrum in pmt_list:
pmt_spectrum.process_sidebands()
return pmt_list
def stitch_abs_results(main, new):
raise NotImplementedError
def hsg_combine_qwp_sweep(path, loadNorm = True, save = False, verbose=False,
skipOdds = True):
"""
Given a path to data taken from rotating the QWP (doing polarimetry),
process the data (fit peaks), and parse it into a matrix of sb strength vs
QWP angle vs sb number.
By default, saves the file into "Processed QWP Dependence"
Return should be passed directly into fitting
-1 | SB1 | SB1 | SB2 | SB2 | ... | ... | SBn | SBn |
angle1 | SB Strength | SB err | SB Strength | SB Err |
angle2 | ... | . |
.
.
.
:param path: Path to load
:param loadNorm: if true, load the normalized data
:param save: Save the processed file or not
:param verbose:
:param skipOdds: Passed on to save sweep; determine whether or not to save
odd orders. Generally, odds are artifacts and I don't want
them messing up the data, so default to True.
:return:
"""
def getData(fname):
"""
Helper function for loading the data and getting the header information for incident NIR stuff
:param fname:
:return:
"""
if isinstance(fname, str):
if loadNorm:
ending = "_norm.txt"
else:
ending = "_snip.txt"
header = ''
with open(os.path.join("Processed QWP Dependence", fname + ending)) as fh:
ln = fh.readline()
while ln[0] == '#':
header += ln[1:]
ln = fh.readline()
data = np.genfromtxt(os.path.join("Processed QWP Dependence", fname + ending),
delimiter=',', dtype=str)
if isinstance(fname, io.BytesIO):
header = b''
ln = fname.readline()
while ln.decode()[0] == '#':
header += ln[1:]
ln = fname.readline()
fname.seek(0)
data = np.genfromtxt(fname,
delimiter=',', dtype=str)
header = json.loads(header)
return data, float(header["lAlpha"]), float(header["lGamma"]), float(header["nir"]), float(header["thz"])
######### End getData
try:
sbData, lAlpha, lGamma, nir, thz = getData(path)
except:
# Do the processing on all the files
specs = proc_n_plotCCD(path, keep_empties=True, verbose=verbose)
for sp in specs:
try:
sp.parameters["series"] = round(float(sp.parameters["rotatorAngle"]), 2)
except KeyError:
# Old style of formatting
sp.parameters["series"] = round(float(sp.parameters["detectorHWP"]), 2)
specs = hsg_combine_spectra(specs, ignore_weaker_lowers=False)
if not save:
# If you don't want to save them, set everything up for doing Bytes objects
# to replacing saving files
full, snip, norm = io.BytesIO(), io.BytesIO(), io.BytesIO()
if "nir_pola" not in specs[0].parameters:
# in the olden days. Force them. Hopefully making them outside of ±360
# makes it obvious
specs[0].parameters["nir_pola"] = 361
specs[0].parameters["nir_polg"] = 361
keyName = "rotatorAngle"
if keyName not in specs[0].parameters:
# from back before I changed the name
keyName = "detectorHWP"
save_parameter_sweep(specs, [full, snip, norm], None,
keyName, "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
if loadNorm:
sbData, lAlpha, lGamma, nir, thz = getData(norm)
else:
sbData, lAlpha, lGamma, nir, thz = getData(snip)
else:
save_parameter_sweep(specs, os.path.basename(path), "Processed QWP Dependence",
"rotatorAngle", "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
sbData, lAlpha, lGamma, nir, thz = getData(os.path.basename(path))
laserParams = {
"lAlpha": lAlpha,
"lGamma": lGamma,
"nir": nir,
"thz": thz
}
# get which sidebands were found in this data set
# first two rows are origin header, second is sideband number
# (and empty strings, which is why the "if ii" below, to prevent
# ValueErrors on int('').
foundSidebands = np.array(sorted([float(ii) for ii in set(sbData[2]) if ii]))
# Remove first 3 rows, which are strings for origin header, and cast it to floats
sbData = sbData[3:].astype(float)
# double the sb numbers (to account for sb strength/error) and add a dummy
# number so the array is the same shape
foundSidebands = np.insert(foundSidebands, range(len(foundSidebands)), foundSidebands)
foundSidebands = np.insert(foundSidebands, 0, -1)
return laserParams, np.row_stack((foundSidebands, sbData))
def makeCurve(eta, isVertical):
"""
:param eta: QWP retardance at the wavelength
:return:
"""
cosd = lambda x: np.cos(x * np.pi / 180)
sind = lambda x: np.sin(x * np.pi / 180)
eta = eta * 2 * np.pi
if isVertical:
# vertical polarizer
def analyzerCurve(x, *S):
S0, S1, S2, S3 = S
return S0-S1/2*(1+np.cos(eta)) \
+ S3*np.sin(eta)*sind(2*x) \
+ S1/2*(np.cos(eta)-1)*cosd(4*x) \
+ S2/2*(np.cos(eta)-1)*sind(4*x)
else:
# vertical polarizer
def analyzerCurve(x, *S):
S0, S1, S2, S3 = S
return S0+S1/2*(1+np.cos(eta)) \
- S3*np.sin(eta)*sind(2*x) \
+ S1/2*(1-np.cos(eta))*cosd(4*x) \
+ S2/2*(1-np.cos(eta))*sind(4*x)
return analyzerCurve
def proc_n_fit_qwp_data(data, laserParams = dict(), wantedSBs = None, vertAnaDir = True, plot=False,
save = False, plotRaw = lambda sbidx, sbnum: False, series = '', eta=None, fourier = True,
**kwargs):
"""
Fit a set of sideband data vs QWP angle to get the stoke's parameters
:param data: data in the form of the return of hsg_combine_qwp_sweep
:param laserParams: dictionary of the parameters of the laser, the angles and frequencies. See function for
expected keys. I don't think the errors are used (except for plotting?), or the wavelengths (but
left in for potential future use (wavelength dependent stuff?))
:param wantedSBs: List of the wanted sidebands to fit out.
:param vertAnaDir: direction of the analzyer. True if vertical, false if horizontal.
:param plot: True/False to plot alpha/gamma/dop. Alternatively, a list of "a", "g", "d" to only plot selected ones
:param save: filename to save the files. Accepts BytesIO
:param plotRaw: callable that takes an index of the sb and sb number, returns true to plot the raw curve
:param series: a string to be put in the header for the origin files
:param eta: a function to call to calculate the desired retardance. Input will be the SB order.
:param fourier: Will use Fourier analysis over a fit funciton if True
if saveStokes is in kwargs and False, it will not save the stokes parameters, since I rarely actually use them.
:return:
"""
defaultLaserParams = {
"lAlpha": 90,
"ldAlpha": 0.2,
"lGamma": 0.0,
"ldGamma": 0.2,
"lDOP": 1,
"ldDOP": 0.02,
"nir": 765.7155,
"thz": 21.1
}
defaultLaserParams.update(laserParams)
lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP = defaultLaserParams["lAlpha"], \
defaultLaserParams["ldAlpha"], \
defaultLaserParams["lGamma"], \
defaultLaserParams["ldGamma"], \
defaultLaserParams["lDOP"], \
defaultLaserParams["ldDOP"]
allSbData = data
angles = allSbData[1:, 0]
# angles += -5
# print("="*20)
# print("\n"*3)
# print(" WARNING")
# print("\n"*3)
# print("ANGLES HAVE BEEN MANUALLY OFFEST IN proc_n_fit_qwp_data")
# print("\n"*3)
# print("="*20)
allSbData = allSbData[:, 1:] # trim out the angles
if wantedSBs is None:
# set to get rid of duplicates, 1: to get rid of the -1 used for
# getting arrays the right shape
wantedSBs = set(allSbData[0, 1:])
if eta is None:
"""
It might be easier for the end user to do this by passing eta(wavelength) instead of eta(sborder),
but then this function would need to carry around wavelengths, which is extra work. It could convert
between NIR/THz wavelengths to SB order, but it's currently unclear whether you'd rather use what the WS6
claims, or what the sidebands say, and you'd probably want to take the extra step to ensure the SB fit rseults
if using the spectromter wavelengths. In general, if you have a function as etal(wavelength), you'd probably
want to pass this as
eta = lambda x: etal(1239.84/(nirEv + x*THzEv))
assuming nirEv/THzEv are the photon energies of the NIR/THz.
"""
eta = lambda x: 0.25
# allow pasing a flag it ignore odds. I think I generally do, so set it to
# default to True
skipOdds = kwargs.get("skip_odds", True)
# Make an array to keep all of the sideband information.
# Start it off by keeping the NIR information (makes for easier plotting into origin)
sbFits = [[0] + [-1] * 8 + [lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP]]
# Also, for convenience, keep a dictionary of the information.
# This is when I feel like someone should look at porting this over to pandas
sbFitsDict = {}
sbFitsDict["S0"] = [[0, -1, -1]]
sbFitsDict["S1"] = [[0, -1, -1]]
sbFitsDict["S2"] = [[0, -1, -1]]
sbFitsDict["S3"] = [[0, -1, -1]]
sbFitsDict["alpha"] = [[0, lAlpha, ldAlpha]]
sbFitsDict["gamma"] = [[0, lGamma, ldGamma]]
sbFitsDict["DOP"] = [[0, lDOP, ldDOP]]
# Iterate over all sb data. Skip by 2 because error bars are included
for sbIdx in range(0, allSbData.shape[1], 2):
sbNum = allSbData[0, sbIdx]
if sbNum not in wantedSBs: continue
if skipOdds and sbNum%2: continue
# if verbose:
# print("\tlooking at sideband", sbNum)
sbData = allSbData[1:, sbIdx]
sbDataErr = allSbData[1:, sbIdx + 1]
if fourier:
# We want to do Fourier Analysis
# I've hard coded the maximum expected variance from QWP retardance to be
# 5 degrees (converted to radians bc of small angle approximation).
# Not sure how to deal with the fact that this method leaves no variance
# for the S3 paramter.
f0 = 0
f2 = 0
f4 = 0
df0 = 0
df2 = 0
df4 = 0
for k in range(0,16,1):
f0 = f0 + allSbData[k+1,sbIdx]
f2 = f2 + allSbData[k+1,sbIdx]*np.exp(-1j*np.pi*k/4)
f4 = f4 + allSbData[k+1,sbIdx]*np.exp(-1j*np.pi*k/2)
df0 = df0 + allSbData[k+1, sbIdx+1]
df2 = df2 + allSbData[k+1,sbIdx+1]*np.exp(-1j*np.pi*k/4)
df4 = df4 + allSbData[k+1,sbIdx+1]*np.exp(-1j*np.pi*k/2)
phi = 5*2*np.pi/180
# Generate the Stokes parameters from the Fourier Components
S0 = (f0 - 2*f4.real)/(np.pi)
S1 = 4*f4.real/(np.pi)
S2 = -4*f4.imag/(np.pi)
S3 = 2*f2.imag/(np.pi)
# For the Error Propagation, I say phi = 0 and dPhi = 2*phi (value set above)
d0 = np.sqrt(df0**2+2*(4*f4.real**2*phi**2+df4.real**2*(1+phi)**2*(1-1*phi)**2)/(1+phi)**4)/(2*np.pi)
d1 = np.sqrt((f4.real**2*phi**2+df4.real**2*phi**2)/(1+phi)**4)/(np.pi)
d2 = np.sqrt((f4.imag**2*phi**2+df4.imag**2*phi**2)/(1+phi)**4)/(np.pi)
d3 = 2*df2.imag/np.pi
# Calculate the alpha, gamma, DOP and errors from Stokes parameters
thisAlpha = np.arctan2(S2, S1) / 2 * 180. / np.pi
thisAlphaError = np.sqrt(d2 ** 2 * S1 ** 2 + d1 ** 2 * S2 ** 2) / (S1 ** 2 + S2 ** 2) * 180./np.pi
thisGamma = np.arctan2(S3, np.sqrt(S1 ** 2 + S2 ** 2)) / 2 * 180. / np.pi
thisGammaError = np.sqrt((d3 ** 2 * (S1 ** 2 + S2 ** 2) ** 2 + (d1 ** 2 * S1 ** 2 + d2 ** 2 * S2 ** 2) * S3 ** 2) / (
(S1 ** 2 + S2 ** 2) * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2)) *180. /np.pi
thisDOP = np.sqrt(S1 ** 2 + S2 ** 2 + S3 ** 2) / S0
thisDOPerror = np.sqrt(((d1 ** 2 * S0 ** 2 * S1 ** 2 + d0 ** 2 * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2 + S0 ** 2 * (
d2 ** 2 * S2 ** 2 + d3 ** 2 * S3 ** 2)) / (S0 ** 4 * (S1 ** 2 + S2 ** 2 + S3 ** 2))))
# Append The stokes parameters and errors to the dictionary output.
sbFitsDict["S0"].append([sbNum, S0, d0])
sbFitsDict["S1"].append([sbNum, S1, d1])
sbFitsDict["S2"].append([sbNum, S2, d2])
sbFitsDict["S3"].append([sbNum, S3, d3])
sbFitsDict["alpha"].append([sbNum, thisAlpha, thisAlphaError])
sbFitsDict["gamma"].append([sbNum, thisGamma, thisGammaError])
sbFitsDict["DOP"].append([sbNum, thisDOP, thisDOPerror])
toAppend = [sbNum, S0, d0, S1, d1, S2, d2, S3, d3, thisAlpha, thisAlphaError, thisGamma, thisGammaError, thisDOP, thisDOPerror]
sbFits.append(toAppend)
# Otherwise we will do the normal fit
else:
# try:
# p0 = sbFits[-1][1:8:2]
# except:
# p0 = [1, 1, 0, 0]
p0 = [1, 1, 0, 0]
etan = eta(sbNum)
try:
p, pcov = curve_fit(makeCurve(etan, vertAnaDir), angles, sbData, p0=p0)
except ValueError:
# This is getting tossed around, especially when looking at noisy data,
# especially with the laser line, and it's fitting erroneous values.
# Ideally, I should be cutting this out and not even returning them,
# but that's immedaitely causing
p = np.nan*np.array(p0)
pcov = np.eye(len(p))
if plot and plotRaw(sbIdx, sbNum):
# pg.figure("{}: sb {}".format(dataName, sbNum))
plt.figure("All Curves")
plt.errorbar(angles, sbData, sbDataErr, 'o-', name=f"{series}, {sbNum}")
# plt.plot(angles, sbData,'o-', label="Data")
fineAngles = np.linspace(angles.min(), angles.max(), 300)
# plt.plot(fineAngles,
# makeCurve(eta, "V" in dataName)(fineAngles, *p0), name="p0")
plt.plot(fineAngles,
makeCurve(etan, vertAnaDir)(fineAngles, *p))
# plt.show()
plt.ylim(0, 1)
plt.xlim(0, 360)
plt.ylabel("Normalized Intensity")
plt.xlabel("QWP Angle (θ)")
print(f"\t{series} {sbNum}, p={p}")
# get the errors
d = np.sqrt(np.diag(pcov))
thisData = [sbNum] + list(p) + list(d)
d0, d1, d2, d3 = d
S0, S1, S2, S3 = p
# reorder so errors are after values
thisData = [thisData[i] for i in [0, 1, 5, 2, 6, 3, 7, 4, 8]]
sbFitsDict["S0"].append([sbNum, S0, d0])
sbFitsDict["S1"].append([sbNum, S1, d1])
sbFitsDict["S2"].append([sbNum, S2, d2])
sbFitsDict["S3"].append([sbNum, S3, d3])
# append alpha value
thisData.append(np.arctan2(S2, S1) / 2 * 180. / np.pi)
# append alpha error
variance = (d2 ** 2 * S1 ** 2 + d1 ** 2 * S2 ** 2) / (S1 ** 2 + S2 ** 2) ** 2
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["alpha"].append([sbNum, thisData[-2], thisData[-1]])
# append gamma value
thisData.append(np.arctan2(S3, np.sqrt(S1 ** 2 + S2 ** 2)) / 2 * 180. / np.pi)
# append gamma error
variance = (d3 ** 2 * (S1 ** 2 + S2 ** 2) ** 2 + (d1 ** 2 * S1 ** 2 + d2 ** 2 * S2 ** 2) * S3 ** 2) / (
(S1 ** 2 + S2 ** 2) * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2)
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["gamma"].append([sbNum, thisData[-2], thisData[-1]])
# append degree of polarization
thisData.append(np.sqrt(S1 ** 2 + S2 ** 2 + S3 ** 2) / S0)
variance = ((d1 ** 2 * S0 ** 2 * S1 ** 2 + d0 ** 2 * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2 + S0 ** 2 * (
d2 ** 2 * S2 ** 2 + d3 ** 2 * S3 ** 2)) / (S0 ** 4 * (S1 ** 2 + S2 ** 2 + S3 ** 2)))
thisData.append(np.sqrt(variance))
sbFitsDict["DOP"].append([sbNum, thisData[-2], thisData[-1]])
sbFits.append(thisData)
sbFits = np.array(sbFits)
sbFitsDict = {k: np.array(v) for k, v in sbFitsDict.items()}
# This chunk used to insert the "alpha deviation", the difference between the angles and the
# nir. I don't think I use this anymore, so stop saving it
# origin_header = 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha deviation,alpha err,gamma,gamma err,DOP,DOP err\n'
# origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,deg,arb.u.,arb.u.\n'
# origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 15)
# sbFits = np.array(sbFits)
# sbFits = np.insert(sbFits, 10, sbFits[:, 9] - lAlpha, axis=1)
# sbFits = sbFits[sbFits[:, 0].argsort()]
origin_header = "#\n"*100 # to fit all other files for easy origin importing
origin_header += 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha err,gamma,gamma err,DOP,DOP err\n'
origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,arb.u.,arb.u.\n'
origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 14)
sbFits = sbFits[sbFits[:, 0].argsort()]
if isinstance(save, str):
sbFitsSave = sbFits
if not kwargs.get("saveStokes", True):
headerlines = origin_header.splitlines()
ln, units, coms = headerlines[-3:]
ln = ','.join([ln.split(',')[0]] + ln.split(',')[9:])
units = ','.join([units.split(',')[0]] + units.split(',')[9:])
coms = ','.join([coms.split(',')[0]] + coms.split(',')[9:])
headerlines[-3:] = ln, units, coms
# remove them from the save data
origin_header = '\n'.join(headerlines)
sbFitsSave = np.delete(sbFits, range(1, 9), axis=1)
if not os.path.exists(os.path.dirname(save)):
os.mkdir(os.path.dirname(save))
np.savetxt(save, np.array(sbFitsSave), delimiter=',', header=origin_header,
comments='', fmt='%.6e')
# print("a = {:.2f} ± {:.2f}".format(sbFits[1, 9], sbFits[1, 10]))
# print("g = {:.2f} ± {:.2f}".format(sbFits[1, 11], sbFits[1, 12]))
if plot:
plt.figure("alpha")
plt.errorbar(sbFitsDict["alpha"][:, 0],
sbFitsDict["alpha"][:, 1],
sbFitsDict["alpha"][:, 2],
'o-', name = series
)
plt.figure("gamma")
plt.errorbar(sbFitsDict["gamma"][:, 0],
sbFitsDict["gamma"][:, 1],
sbFitsDict["gamma"][:, 2],
'o-', name=series
)
return sbFits, sbFitsDict
####################
# Helper functions
####################
def fvb_crr(raw_array, offset=0, medianRatio=1, noiseCoeff=5, debugging=False):
"""
Remove cosmic rays from a sequency of identical exposures
:param raw_array: The array to be cleaned. Successive spectra should
be the columns (i.e. 1600 x n) of the raw_array
:param offset: baseline to add to raw_array.
Not used, but here if it's needed in the future
:param medianRatio: Multiplier to the median when deciding a cutoff
:param noiseCoeff: Multiplier to the noise on the median
May need changing for noisy data
:return:
"""
d = np.array(raw_array)
med = ndimage.filters.median_filter(d, size=(1, d.shape[1]), mode='wrap')
med = np.median(d, axis=1).reshape(d.shape[0], 1)
if debugging:
print("shape of median filter:", med.shape)
meanMedian = med.mean(axis=1)
# meanMedian = med.copy()
if debugging:
print("shape of meaned median filter:", meanMedian.shape)
# Construct a cutoff for each pixel. It was kind of guess and
# check
cutoff = meanMedian * medianRatio + noiseCoeff * np.std(meanMedian[-100:])
if debugging:
print("shape of cutoff criteria:", cutoff.shape)
import pyqtgraph as pg
winlist = []
app = pg.QtGui.QApplication([])
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Raw Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate(d.T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(np.sum(d, axis=1), pen=pg.mkPen('w', width=3))
win.show()
winlist.append(win)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("Median Image")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage(med.T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(med, axis=1) / d.shape[1])
win2.show()
winlist.append(win2)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("d-m")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage((d - med).T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate((d - med).T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(cutoff, pen=pg.mkPen('w', width=3))
win2.show()
winlist.append(win2)
# Find the bad pixel positions
# Note the [:, None] - needed to cast the correct shapes
badPixs = np.argwhere((d - med) > (cutoff.reshape(len(cutoff), 1)))
for pix in badPixs:
# get the other pixels in the row which aren't the cosmic
if debugging:
print("cleaning pixel", pix)
p = d[pix[0], [i for i in range(d.shape[1]) if not i == pix[1]]]
if debugging:
print("\tRemaining pixels in row are", p)
# Replace the cosmic by the average of the others
# Could get hairy if more than one cosmic per row.
# Maybe when doing many exposures?
d[pix[0], pix[1]] = np.mean(p)
if debugging:
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Clean Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(d, axis=1))
win.show()
winlist.append(win)
app.exec_()
return np.array(d)
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data set
to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be fairly
straightforward to recursivly handle a list>2. Shifts the second
data set to overlap the first
elements of dataList can be either np.arrays or Absorbance class,
where it will take the proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def stitch_hsg_dicts(full_obj, new_obj, need_ratio=False, verbose=False, ratios=[1,1],
override_ratio = False, ignore_weaker_lowers = True):
"""
This helper function takes a FullHighSideband and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
This function has been updated to take the CCD objects themselves to be more
intelligent about stitching. Consider two scans, (a) spec step 0 with 1 gain, spec
step 2 with 110 gain and (b) spec step 0 with 50 gain and spec step 1 with 110 gain.
The old version would always take spec step 0 to scale to, so while comparisons
between spec step 0 and 1 for either case is valid, comparison between (a) and (b)
were not, since they were scaled to different gain parameters. This new code will
check what the gain values are and scale to the 110 data set, if present. This seems
valid because we currently always have a 110 gain exposure for higher order
sidebands.
The exception is if the laser is present (sideband 0), as that is an absolute
measure to which all else should be related.
TODO: run some test cases to test this.
06/11/18
--------
That sometimes was breaking if there were only 3-4 sidebands to fit with poor
SNR. I've added the override_ratio to be passed to set a specific ratio to scale
by. From data on 06/03/18, the 50gain to 110gain is a ~3.6 ratio. I haven't done
a clean way of specifying which data set it should be scaled. Right now,
it leaves the laser line data, or the 110 gain data alone.
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
ratios: Will update with the values to the ratios needed to scale the data.
ratios[0] is the ratio for the "full_obj"
ratios[1] is the ratio for the "new_obj"
one of them will be one, one will be the appropriate scale, since one of
them is unscaled. This is strictly speaking an output
override_ratio: Pass a float to specify the ratio that should be used.
ignore_weaker_lowers: Sometimes, a SB is in the short pass filter so a lower
order is weaker than the next highest. If True, causes script to ignore all
sidebands which are weaker and lower order.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if isinstance(full_obj, dict) and isinstance(new_obj, dict):
return stitch_hsg_dicts_old(full_obj, new_obj, need_ratio, verbose)
if verbose:
print("=" * 15)
print()
print("Stitching HSG dicts")
print()
print("=" * 15)
# remove potentially offensive SBs, i.e. a 6th order SB being in the SPF for more
# data, but being meaningless to pull intensity information from.
# Note: this might not be the best if you get to higher order stitches where it's
# possible that the sidebands might not be monotonic (from noise?)
if ignore_weaker_lowers:
full_obj.full_dict, full_obj.sb_results = FullHighSideband.parse_sb_array(full_obj.sb_results)
new_obj.new_dict, new_obj.sb_results = FullHighSideband.parse_sb_array(new_obj.sb_results)
# was fucking around with references and causing updates to arrays when it shouldn't
# be
full = copy.deepcopy(full_obj.full_dict)
new_dict = copy.deepcopy(new_obj.full_dict)
# Force a rescaling if you've passed a specified parameter
# if isinstance(override_ratio, float):
# need_ratio = True
# Do some testing to see which dict should be scaled to the other
# I honestly forget why I prioritized the PMT first like this. But the third
# check looks to make a gain 110 prioritize non-110, unless the non-110 includes
# a laser line
scaleTo = ""
if need_ratio:
if isinstance(new_obj, HighSidebandPMT):
scaleTo = "new"
elif isinstance(full_obj, HighSidebandPMT):
scaleTo = "full"
elif new_obj.parameters["gain"] == 110 and full_obj.parameters["gain"] != 110 \
and 0 not in full:
scaleTo = "new"
else:
scaleTo = "full"
if verbose:
print("\tI'm adding these sidebands", new_obj.sb_results[:,0])
print("\t With these:", sorted(full.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in new_obj.sb_results[:,0]:
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("\t ( overlap:", overlap, ")")
print("\t ( missing:", missing, ")")
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
try:
new_starter = overlap[-1]
if verbose:
print("\n\tadding these ratios,", end=' ')
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]# and (x != min(overlap) and (x != max(overlap)))]
if scaleTo == "new":
if verbose:
print("scaling to new :")
for sb in overlap:
ratio_list.append(new_dict[sb][2]/full[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, new_dict[sb][2],
full[sb][2], ratio_list[-1]))
# new_ratio = 1 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
else:
if verbose:
print("scaling to full:")
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, full[sb][2],
new_dict[sb][2], ratio_list[-1]))
# new_ratio = np.mean(ratio_list) 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
# Maybe not the best way to do it, performance wise, since you still
# iterate through the list, even though you'll override it.
if isinstance(override_ratio, float):
ratio = override_ratio
if verbose:
print("overriding calculated ratio with user inputted")
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen), hardcode a ratio
# and error. I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
# print "Ratio list\n\t", ("{:.3g}, "*len(ratio_list))[:-2].format(*ratio_list)
# print "Overlap \n\t", [round(ii, 3) for ii in overlap]
print("\t Ratio: {:.3g} +- {:.3g} ({:.2f}%)\n".format(ratio, error, error/ratio*100))
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
if scaleTo == "full":
ratios[1] = ratio
for sb in overlap:
if verbose:
print("For SB {:02d}, original strength is {:.3g} +- {:.3g} ({:.3f}%)".format(int(sb), new_dict[sb][2], new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100
))
new_dict[sb][3] = ratio * new_dict[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
new_dict[sb][2] = ratio * new_dict[sb][2]
if verbose:
print("\t\t scaled\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(new_dict[sb][2],
new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100))
print("\t\t full\t\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
sb_error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sb_error
if verbose:
print("\t\t replaced with \t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
print()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else:
ratios[0] = ratio
for sb in overlap:
full[sb][3] = ratio * full[sb][2] * np.sqrt((error / ratio) ** 2 + (full[sb][3] / full[sb][2]) ** 2)
full[sb][2] = ratio * full[sb][2]
sberror = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sberror
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else: # not needing a new ratio
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0)
] # and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
try:
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
except RuntimeWarning:
raise IOError()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
for sb in [x for x in list(new_dict.keys()) if ((x > new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if scaleTo == "full":
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
if scaleTo == "new":
for sb in set(full.keys()) - set(sorted(new_dict.keys())[:]):
full[sb][2] *= ratio
# TODO: I think this is an invalid error
# propagation (since ratio has error associated with it
full[sb][3] *= ratio
if verbose:
print("I made this dictionary", sorted(full.keys()))
print('-'*19)
return full
return full, ratio #the fuck? Why was this here?
return full
def stitch_hsg_dicts_old(full, new_dict, need_ratio=False, verbose=False):
"""
This helper function takes a FullHighSideband.full_dict attribute and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
The original function has been updated to take the full object (instead of
the dicts alone) to better handle calculating ratios when stitching. This is called
once things have been parsed in the original function (or legacy code where dicts
are passed instead of the object)
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if verbose:
print("I'm adding these sidebands in old stitcher", sorted(new_dict.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in sorted(new_dict.keys()):
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("overlap:", overlap)
print("missing:", missing)
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
#print '\n1979\nfull[2]', full[0][2]
try:
new_starter = overlap[-1]
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]#and (x != min(overlap) and (x != max(overlap)))]
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
ratio = np.mean(ratio_list)
# print
# print '-'*15
# print "ratio for {}: {}".format()
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen),
# hardcode a ratio and error.
# I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold
# for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs
# all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
print("Ratio list","\n", [round(ii, 3) for ii in ratio_list])
print("Overlap ","\n", [round(ii, 3) for ii in overlap])
print("Ratio", ratio)
print("Error", error)
#print '\n2118\nfull[2]', full[0][2]
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
for sb in overlap:
full[sb][2] = ratio * new_dict[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
#print '\n2125\nfull[2]', full[0][3]
# Now for linewidths
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error
#print '\n2132\nfull[2]', full[0][2]
else:
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0) and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
# This loop will add the sidebands which were only seen in the second step
for sb in [x for x in list(new_dict.keys()) if ((x >= new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if need_ratio:
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
#print '\n2164\nfull[2]', full[0][2]
if verbose:
print("I made this dictionary", sorted(full.keys()))
return full
def save_parameter_sweep_no_sb(spectrum_list, file_name, folder_str, param_name, unit,
verbose=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
for spec in spectrum_list:
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
included_spectra[spec.fname.split('/')[-1]] = spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
temp_dict = {} # This is different from full_dict in that the list has the
# sideband order as the zeroth element.
if verbose:
print("the sb_results:", spec.sb_results)
if spec.sb_results.ndim == 1: continue
for index in range(len(spec.sb_results[:, 0])):
if verbose:
print("my array slice:", spec.sb_results[index, :])
temp_dict[int(round(spec.sb_results[index, 0]))] = np.array(
spec.sb_results[index, 1:])
if verbose:
print(temp_dict)
for sb in sb_included:
blank = np.zeros(6)
# print "checking sideband order:", sb
# print "blank", blank
if sb not in temp_dict:
# print "\nNeed to add sideband order:", sb
temp_dict[sb] = blank
try: # Why is this try-except here?
spec_data = np.array([float(spec.parameters[param_name])])
except:
spec_data = np.array([float(spec.parameters[param_name][:2])])
for key in sorted(temp_dict.keys()):
# print "I am going to hstack this:", temp_dict[key]
spec_data = np.hstack((spec_data, temp_dict[key]))
try:
param_array = np.vstack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if verbose:
print("The shape of the param_array is:", param_array.shape)
# print "The param_array itself is:", param_array
'''
param_array_norm = np.array(param_array).T # python iterates over rows
for elem in [x for x in xrange(len(param_array_norm)) if (x-1)%7 == 3]:
temp_max = np.max(param_array_norm[elem])
param_array_norm[elem] = param_array_norm[elem] / temp_max
param_array_norm[elem + 1] = param_array_norm[elem + 1] / temp_max
'''
snipped_array = param_array[:, 0]
norm_array = param_array[:, 0]
if verbose:
print("Snipped_array is", snipped_array)
for ii in range(len(param_array.T)):
if (ii - 1) % 6 == 0:
if verbose:
print("param_array shape", param_array[:, ii])
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii]))
elif (ii - 1) % 6 == 2:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
temp_max = np.max(param_array[:, ii])
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
elif (ii - 1) % 6 == 3:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
snipped_array = snipped_array.T
norm_array = norm_array.T
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += "Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",Frequency,Sideband strength,error"
origin_import2 += ",eV,arb. u.,"
origin_import3 += ",{0},{0},".format(order)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(
os.path.join(folder_str, file_name)))
def save_parameter_sweep(spectrum_list, file_name, folder_str, param_name, unit,
wanted_indices = [1, 3, 4], skip_empties = False, verbose=False,
header_dict = {}, only_even=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
Thus function has been update to pass a list of indices to slice for the return
values
skip_empties: If False, will add a row of zeroes for the parameter even if no sidebands
are found. If True, will not add a line for that parameter
only_even: don't include odd orders in the saved sweep
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
"""
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name) # keep reference to old one
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
# Keep the name for labeling things later on
param_name = param_name[0]
else:
paramGetter = lambda x: x.parameters[param_name]
# Sort all of the spectra based on the desired key
spectrum_list.sort(key=paramGetter)
# keep track of which file name corresponds to which parameter which gets put in
included_spectra = dict()
# The big array which will be stacked up to keep all of the sideband details vs desired parameter
param_array = None
# list of which sidebands are seen throughout.
sb_included = []
# how many parameters (area, strength, linewidth, pos, etc.) are there?
# Here incase software changes and more things are kept in
# sb results. Needed to handle how to slice the arrays
try:
num_params = spectrum_list[0].sb_results.shape[1]
except IndexError:
# There's a file with only 1 sb and it happens to be first
# in the list.
num_params = spectrum_list[0].sb_results.shape[0]
except AttributeError:
# The first file has no sidebands, so just hardcode it, as stated below.
num_params=0
# Rarely, there's an issue where I'm doing some testing and there's a set
# where the first file has no sidebands in it, so the above thing returns 0
# It seems really silly to do a bunch of testing to try and correct for that, so
# I'm going to hardcode the number of parameters.
if num_params == 0:
num_params = 7
# loop through all of them once to figure out which sidebands are seen in all spectra
for spec in spectrum_list:
try:
# use sets to keep track of only unique sidebands
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
except AttributeError:
print("No full dict?", spec.fname)
print(spec.sb_list)
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
included_spectra[spec.fname.split('/')[-1]] = paramGetter(spec)
if only_even:
sb_included = [ii for ii in sb_included if not ii%2]
if verbose:
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
# Flag to keep whethere there are no sidebands or not. Used to skip
# issues when trying to index on empty arrays
noSidebands = False
if verbose:
print("the sb_results:", spec.sb_results)
# if no sidebands were found, skip this one
try:
# TODO: (08/14/18) the .ndim==1 isn't the correct check, since it fails
# when looking at the laser line. Need to test this with a real
# empty data set, vs data set with 1 sb
#
#
# (08/28/18) I'm not sure what the "not spec" is trying to handle
# spec.sb_results is None occurs when _no_ sidebands were fit
# spec.sb_results.ndim == 1 happens when only one sideband is found
if not spec or spec.sb_results is None or spec.sb_results.ndim == 1:
if spec.sb_results is None:
# Flag no sidebands are afound
noSidebands = True
elif spec.sb_results[0] == 0:
# Cast it to 2d to allow slicing later on. Not sure hwy this is
# only done if the laser line is the one found.
spec.sb_results = np.atleast_2d(spec.sb_results)
elif skip_empties:
continue
else:
noSidebands = True
except (AttributeError, TypeError):
# continue
raise
# Make an sb_results of all zeroes where we'll fill
# in the sideband info we found
new_spec = np.zeros((len(sb_included), num_params))
if not noSidebands:
sb_results = spec.sb_results.copy()
saw_sbs = sb_results[:, 0]
found_sb = sorted(list(set(sb_included) & set(saw_sbs)))
found_idx = [sb_included.index(ii) for ii in found_sb]
try:
new_spec[:, 0] = sb_included
except:
print("new_spec", new_spec)
raise
try:
if only_even:
new_spec[found_idx, :] = sb_results[sb_results[:,0]%2==0]
else:
new_spec[found_idx, :] = sb_results
except ValueError:
print(spec.fname)
print("included:", sb_included)
print("found:", found_sb, found_idx)
print(new_spec.shape, sb_results.shape)
print(sb_results)
print(new_spec)
raise
spec_data = np.insert(new_spec.flatten(), 0, float(paramGetter(spec)))
try:
param_array = np.row_stack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if param_array.ndim == 1: # if you only pass one spectra
param_array = param_array[None, :] # recast it to 2D for slicing
# the indices we want from the param array from the passed argument
snip = wanted_indices
N = len(sb_included)
# run it out across all of the points across the param_array
snipped_indices = [0] + list(
1+np.array(snip * N) + num_params * np.array(sorted(list(range(N)) * len(snip))))
snipped_array = param_array[:, snipped_indices]
norm_array = snipped_array.copy()
# normalize the area if it's requested
if 3 in snip:
num_snip = len(snip)
strength_idx = snip.index(3)
if 4 in snip:
#normalize error first if it was requested
idx = snip.index(4)
norm_array[:, 1 + idx + np.arange(N) * num_snip] /= norm_array[:,1 + strength_idx + np.arange(N) * num_snip].max(axis=0)
strength_idx = snip.index(3)
norm_array[:, 1+strength_idx+ | np.arange(N) | numpy.arange |
"""
Histogram classes to contain event rate data and allow for easy plotting
Original author: <NAME> (https://github.com/maxbriel)
Modified by: <NAME> (https://github.com/Krytic)
"""
import matplotlib.pyplot as plt
import numpy as np
import pickle
from scipy.stats import iqr
from scipy.stats import multivariate_normal
import takahe
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import warnings
from uncertainties import ufloat
from uncertainties.umath import log as ulog
from uncertainties.umath import log10 as ulog10
from uncertainties.umath import log as ulog
class histogram:
"""
A histogram which can contain data and can be manipulated.
Either **xlow**, **xup**, and **nr_bins** is given or **edges**
As per any histogram, the upper edges are non inclusive, except for
the last bin.
Arguments:
xlow {float} -- lower bound
xup {float} -- upper bound
nr_bins {int} -- the number of bins
edges {array} -- An array with items defining the edges.
Attributes:
_xlow {float} -- lower bound of the histogram
_xup {float} -- upper bound of the histogram
_nr_bins {int} -- the number of bins in the histogram
_bin_edges {array} -- An array of bin edges
_values {array} -- An array of length **_nr_bins**
containing the value of each bin
lower_edges {array} -- An array of the lower edges of the bins
in the histogram
upper_edges {array} -- An array of the upper edges of the bins
in the histogram
_hits {array} -- An array containing the number of times
each bin has been inserted into.
"""
def __init__(self, xlow=None, xup=None, nr_bins=None, edges=None):
if xlow != None and xup != None and nr_bins != None:
self._xlow = xlow
self._xup = xup
self._nr_bins = nr_bins
self._bin_edges = np.linspace(xlow, xup, nr_bins+1)
elif isinstance(edges, type([])) or isinstance(edges, type(np.array([]))):
self._xlow = edges[0]
self._xup = edges[-1]
self._nr_bins = len(edges)-1
self._bin_edges = np.array(edges)
else:
raise Exception("Not given the correct input")
self._values = np.zeros(self._nr_bins)
self._hits = np.zeros(self._nr_bins)
self.lower_edges = self._bin_edges[:-1]
self.upper_edges = self._bin_edges[1:]
def __len__(self):
return len(self._values)
def __str__(self):
return str(self._values)
def __repr__(self):
return f"The bins: {self._bin_edges}\nThe values: {self._values}"
def __add__(self, other):
"""
Addition
Performs element-wise addition with another histogram or float
object.
Arguments:
other {mixed} -- Either another histogram object, or a float
type object,
Returns:
{histogram} -- A deep copy of the resultant histogram.
"""
out = self.copy()
if isinstance(other, histogram):
out._values = self._values + other._values
else:
out._values = self._values + other
return out
def __mul__(self, other):
"""
Multiplication
Performs element-wise multiplication with a float type object.
Arguments:
other {float} -- The multiplier
Returns:
{histogram} -- A deep copy of the resultant histogram.
"""
out = self.copy()
out._values = self._values * other
return out
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
return self + -1 * other
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
out = self.copy()
out._values = self._values / other
out._hits = self._hits
return out
def inbounds(self, value):
"""
Determines if a value is within the bounds of the histogram.
Arguments:
value {float} -- The value to checl
Returns:
{bool} -- Whether or not the value is within the histogram
range.
"""
return self._xlow <= value and self._xup >= value
def copy(self):
"""
Creates a copy of the histogram
Returns:
{histogram} -- An exact (deep) copy of the histogram
"""
out = histogram(xlow=self._xlow, xup=self._xup, nr_bins=self._nr_bins)
out._values = self._values
out._hits = self._hits
return out
def fill(self, x, weight = 1):
"""
Fill the histogram with data.
Arguments:
x {mixed} -- Either a single entry or an array of *N* to
put into the histogram
w {mixed} -- The weight of the entry of *N* entries to be
added to the histogram.
"""
def _insert(f, g):
if f >= self._xup:
self._values[self._nr_bins-1] += g
self._hits[self._nr_bins-1] += 1
elif f <= self._xlow:
self._values[0] += g
self._hits[0] += 1
else:
bin_nr = np.where(self.lower_edges <= f)[0][-1]
self._values[bin_nr] += g
self._hits[bin_nr] += 1
# Data va;odatopm. x can be either a float or an array type.
# First - is it a float type?
if not isinstance(x, type(0.0)):
if isinstance(weight, type(0)):
for i in range(0, len(x)):
_insert(x[i], weight)
elif len(x) != len(weight):
raise Exception(f"Weight needs to be as long as x. (x={len(x)}, weight={len(weight)})")
else:
for i in range(0, len(x)):
_insert(x[i], weight[i])
# Otherwise assume it is a list type.
else:
_insert(x, weight)
return None
def plot(self, with_errors=False, *argv, **kwargs):
"""
Plot the histogram.
Additional arguments (beyond with_errors) will be passed on to to the
call to plt.hist().
Arguments:
with_errors {bool} -- Whether or not to plot errors (error bars)
on the histogram. (default: {False})
"""
xobj = self._bin_edges
wobj = self._values
# Sometimes the histogram has one too few values for the y-axis
# (and sometimes it has one too many). We coerce the histogram
# into having the right shape in this instance (and fail if it
# still does not).
if len(self._values) == len(xobj) - 1:
wobj = np.append(wobj, wobj[-1])
elif len(self._values) - 1 == len(xobj):
wobj = wobj[:-1]
entries, edges, _ = plt.hist(xobj,
self._bin_edges,
weights=wobj,
histtype=u'step',
*argv,
**kwargs)
if with_errors:
plt.errorbar(self.getBinCenters(), self._values, yerr=np.sqrt(self._hits), fmt='r.')
return None
def plotLog(self, with_errors=False, *argv, **kwargs):
"""
Plot the histogram with a logged x-axis.
Additional arguments (beyond with_errors) will be passed on to to the
call to plt.hist().
Arguments:
with_errors {bool} -- Whether or not to plot errors (error bars)
on the histogram. (default: {False})
"""
entries, edges, _ = plt.hist(np.log10(self._bin_edges[:-1]),
np.log10(self._bin_edges),
weights=self._values,
histtype=u'step',
*argv,
**kwargs)
if with_errors:
plt.errorbar(self.getBinCenters(), self._values, yerr= | np.sqrt(self._hits) | numpy.sqrt |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps SciPy's optimization routines with PyTree and implicit diff support.
# TODO(fllinares): add support for `LinearConstraint`s.
# TODO(fllinares): add support for methods requiring Hessian / Hessian prods.
# TODO(fllinares): possibly hardcode `dtype` attribute, as likely useless.
"""
import abc
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import jax
import jax.numpy as jnp
import jax.tree_util as tree_util
from jaxopt._src import base
from jaxopt._src import implicit_diff as idf
from jaxopt._src import linear_solve
from jaxopt._src import projection
from jaxopt._src.tree_util import tree_sub
import numpy as onp
import scipy as osp
class ScipyMinimizeInfo(NamedTuple):
"""Named tuple with results for `scipy.optimize.minimize` wrappers."""
fun_val: jnp.ndarray
success: bool
status: int
iter_num: int
class ScipyRootInfo(NamedTuple):
"""Named tuple with results for `scipy.optimize.root` wrappers."""
fun_val: float
success: bool
status: int
class PyTreeTopology(NamedTuple):
"""Stores info to reconstruct PyTree from flattened PyTree leaves.
# TODO(fllinares): more specific type annotations for attributes?
Attributes:
treedef: the PyTreeDef object encoding the structure of the target PyTree.
shapes: an iterable with the shapes of each leaf in the target PyTree.
dtypes: an iterable with the dtypes of each leaf in the target PyTree.
sizes: an iterable with the sizes of each leaf in the target PyTree.
n_leaves: the number of leaves in the target PyTree.
"""
treedef: Any
shapes: Sequence[Any]
dtypes: Sequence[Any]
@property
def sizes(self):
return [int(onp.prod(shape)) for shape in self.shapes]
@property
def n_leaves(self):
return len(self.shapes)
def jnp_to_onp(x_jnp: Any,
dtype: Optional[Any] = onp.float64) -> onp.ndarray:
"""Converts JAX PyTree into repr suitable for scipy.optimize.minimize.
Several of SciPy's optimization routines require inputs and/or outputs to be
onp.ndarray<float>[n]. Given an input PyTree `x_jnp`, this function will
flatten all its leaves and, if there is more than one leaf, the corresponding
flattened arrays will be concatenated and, optionally, casted to `dtype`.
Args:
x_jnp: a PyTree of jnp.ndarray with structure identical to init.
dtype: if not None, ensure output is a NumPy array of this dtype.
Return type:
onp.ndarray.
Returns:
A single onp.ndarray<dtype>[n] array, consisting of all leaves of x_jnp
flattened and concatenated. If dtype is None, the output dtype will be
determined by NumPy's casting rules for the concatenate method.
"""
x_onp = [onp.asarray(leaf, dtype).reshape(-1)
for leaf in tree_util.tree_leaves(x_jnp)]
# NOTE(fllinares): return value must *not* be read-only, I believe.
return onp.concatenate(x_onp)
def make_jac_jnp_to_onp(input_pytree_topology: PyTreeTopology,
output_pytree_topology: PyTreeTopology,
dtype: Optional[Any] = onp.float64) -> Callable:
"""Returns function "flattening" Jacobian for given in/out PyTree topologies.
For a smooth function `fun(x_jnp, *args, **kwargs)` taking an arbitrary
PyTree `x_jnp` as input and returning another arbitrary PyTree `y_jnp` as
output, JAX's transforms such as `jax.jacrev` or `jax.jacfwd` will return a
Jacobian with a PyTree structure reflecting the input and output PyTrees.
However, several of SciPy's optimization routines expect inputs and outputs to
be 1D NumPy arrays and, thus, Jacobians to be 2D NumPy arrays.
Given the Jacobian of `fun(x_jnp, *args, **kwargs)` as provided by JAX,
`jac_jnp_to_onp` will format it to match the Jacobian of
`jnp_to_onp(fun(x_jnp, *args, **kwargs))` w.r.t. `jnp_to_onp(x_jnp)`,
where `jnp_to_onp` is a vectorization operator for arbitrary PyTrees.
Args:
input_pytree_topology: a PyTreeTopology encoding the topology of the input
PyTree.
output_pytree_topology: a PyTreeTopology encoding the topology of the output
PyTree.
dtype: if not None, ensure output is a NumPy array of this dtype.
Return type:
Callable.
Returns:
A function "flattening" Jacobian for given input and output PyTree
topologies.
"""
ravel_index = lambda i, j: j + i * input_pytree_topology.n_leaves
def jac_jnp_to_onp(jac_pytree: Any):
# Builds flattened Jacobian blocks such that `jacs_onp[i][j]` equals the
# Jacobian of vec(i-th leaf of output_pytree) w.r.t.
# vec(j-th leaf of input_pytree), where vec() is the vectorization op.,
# i.e. reshape(input, [-1]).
jacs_leaves = tree_util.tree_leaves(jac_pytree)
jacs_onp = []
for i, output_size in enumerate(output_pytree_topology.sizes):
jacs_onp_i = []
for j, input_size in enumerate(input_pytree_topology.sizes):
jac_leaf = onp.asarray(jacs_leaves[ravel_index(i, j)], dtype)
jac_leaf = jac_leaf.reshape([output_size, input_size])
jacs_onp_i.append(jac_leaf)
jacs_onp.append(jacs_onp_i)
return onp.block(jacs_onp)
return jac_jnp_to_onp
def make_onp_to_jnp(pytree_topology: PyTreeTopology) -> Callable:
"""Returns inverse of `jnp_to_onp` for a specific PyTree topology.
Args:
pytree_topology: a PyTreeTopology encoding the topology of the original
PyTree to be reconstructed.
Return type:
Callable.
Returns:
The inverse of `jnp_to_onp` for a specific PyTree topology.
"""
treedef, shapes, dtypes = pytree_topology
split_indices = onp.cumsum(list(pytree_topology.sizes[:-1]))
def onp_to_jnp(x_onp: onp.ndarray) -> Any:
"""Inverts `jnp_to_onp` for a specific PyTree topology."""
flattened_leaves = | onp.split(x_onp, split_indices) | numpy.split |
# -*- coding: utf-8 -*-
'''
Classes to solve the steady state of liquid and illiquid assets model
'''
from __future__ import print_function
import sys
sys.path.insert(0,'../')
import numpy as np
import scipy as sc
from scipy.stats import norm
from scipy.interpolate import interp1d, interp2d, griddata, RegularGridInterpolator
from scipy import sparse as sp
import time
from SharedFunc3 import Transition, ExTransitions, GenWeight, MakeGridkm, Tauchen, Fastroot
class SteadyStateTwoAsset:
'''
Classes to solve the steady state of liquid and illiquid assets model
'''
def __init__(self, par, mpar, grid):
self.par = par
self.mpar = mpar
self.grid = grid
def SolveSteadyState(self):
'''
solve for steady state
returns
----------
par : dict
parametres
mpar : dict
parametres
grid: dict
grid for solution
Output : float
steady state output
targets : dict
steady state stats
Vm : np.array
marginal value of assets m
Vk : np.array
marginal value of assets m
joint_distr : np.array
joint distribution of m and h
Copula : dict
points for interpolation of joint distribution
c_a_star : np.array
policy function for consumption w/ adjustment
c_n_star : np.array
policy function for consumption w/o adjustment
psi_star : np.array
continuation value of holding capital
m_a_star : np.array
policy function for asset m w/ adjustment
m_n_star : np.array
policy function for asset m w/o adjustment
mutil_c_a : np.array
marginal utility of c w/ adjustment
mutil_c_n : np.array
marginal utility of c w/o adjustment
mutil_c : np.array
marginal utility of c w/ & w/o adjustment
P_H : np.array
transition probability
'''
## Set grid h
grid = self.grid
resultStVar=self.StochasticsVariance(self.par, self.mpar, grid)
P_H = resultStVar['P_H'].copy()
grid = resultStVar['grid'].copy()
par = resultStVar['par'].copy()
grid = MakeGridkm(self.mpar, grid, grid['k_min'], grid['k_max'], grid['m_min'], grid['m_max'])
meshes = {}
meshes['m'], meshes['k'], meshes['h'] = np.meshgrid(grid['m'],grid['k'],grid['h'],indexing='ij')
## Solve for steady state capital by bi-section
result_SS = self.SteadyState(P_H, grid, meshes, self.mpar, par)
c_n_guess = result_SS['c_n_guess'].copy()
m_n_star = result_SS['m_n_star'].copy()
c_a_guess = result_SS['c_a_guess'].copy()
m_a_star = result_SS['m_a_star'].copy()
cap_a_star = result_SS['cap_a_star'].copy()
psi_guess = result_SS['psi_guess'].copy()
joint_distr = result_SS['joint_distr'].copy()
R_fc = result_SS['R_fc']
W_fc = result_SS['W_fc']
Profits_fc = result_SS['Profits_fc']
Output = result_SS['Output']
grid = result_SS['grid'].copy()
## SS stats
mesh ={}
mesh['m'],mesh['k'] =np.meshgrid(grid['m'].copy(),grid['k'].copy(), indexing = 'ij')
targets = {}
targets['ShareBorrower'] = np.sum((grid['m']<0)*np.transpose(np.sum(np.sum(joint_distr.copy(),axis = 1), axis = 1)))
targets['K'] = np.sum(grid['k'].copy()*np.sum(np.sum(joint_distr.copy(),axis =0),axis=1))
targets['B'] = np.dot(grid['m'].copy(),np.sum(np.sum(joint_distr.copy(),axis = 1),axis = 1))
grid['K'] = targets['K']
grid['B'] = targets['B']
JDredux = np.sum(joint_distr.copy(),axis =2)
targets['BoverK'] = targets['B']/targets['K']
targets['L'] = grid['N']*np.sum(np.dot(grid['h'].copy(),np.sum(np.sum(joint_distr.copy(),axis=0),axis=0)))
targets['KY'] = targets['K']/Output
targets['BY'] = targets['B']/Output
targets['Y'] = Output
BCaux_M = np.sum(np.sum(joint_distr.copy(),axis =1), axis=1)
targets['m_bc'] = BCaux_M[0].copy()
targets['m_0'] = float(BCaux_M[grid['m']==0].copy())
BCaux_K = np.sum(np.sum(joint_distr.copy(),axis=0),axis=1)
targets['k_bc'] = BCaux_K[0].copy()
aux_MK = np.sum(joint_distr.copy(),axis=2)
targets['WtH_b0']=np.sum(aux_MK[(mesh['m']==0)*(mesh['k']>0)].copy())
targets['WtH_bnonpos']=np.sum(aux_MK[(mesh['m']<=0)*(mesh['k']>0)].copy())
targets['T'] =(1.0-par['tau'])*W_fc*grid['N'] +(1.0-par['tau'])*Profits_fc
par['G']=targets['B']*(1.0-par['RB']/par['PI'])+targets['T']
par['R']=R_fc
par['W']=W_fc
par['PROFITS']=Profits_fc
par['N']=grid['N']
targets['GtoY']=par['G']/Output
## Ginis
# Net worth Gini
mplusk=mesh['k'].copy().flatten('F')*par['Q']+mesh['m'].copy().flatten('F')
IX = np.argsort(mplusk.copy())
mplusk = mplusk[IX.copy()].copy()
moneycapital_pdf = JDredux.flatten(order='F')[IX].copy()
moneycapital_cdf = np.cumsum(moneycapital_pdf.copy())
targets['NegNetWorth']= np.sum((mplusk.copy()<0)*moneycapital_pdf.copy())
S = np.cumsum(moneycapital_pdf.copy()*mplusk.copy())
S = np.concatenate(([0.], S.copy()))
targets['GiniW'] = 1.0-(np.sum(moneycapital_pdf.copy()*(S[:-1].copy()+S[1:].copy()).transpose())/S[-1])
# Liquid Gini
IX = np.argsort(mesh['m'].copy().flatten('F'))
liquid_sort = mesh['m'].copy().flatten('F')[IX.copy()].copy()
liquid_pdf = JDredux.flatten(order='F')[IX.copy()].copy()
liquid_cdf = np.cumsum(liquid_pdf.copy())
targets['Negliquid'] = np.sum((liquid_sort.copy()<0)*liquid_pdf.copy())
S = np.cumsum(liquid_pdf.copy()*liquid_sort.copy())
S = np.concatenate(([0.], S.copy()))
targets['GiniLI'] = 1.0-(np.sum(liquid_pdf.copy()*(S[:-1].copy()+S[1:].copy()))/S[-1].copy())
# Illiquid Gini
IX = np.argsort(mesh['k'].copy().flatten('F'))
illiquid_sort = mesh['k'].copy().flatten('F')[IX.copy()].copy()
illiquid_pdf = JDredux.flatten(order='F')[IX.copy()].copy()
illiquid_cdf = np.cumsum(illiquid_pdf.copy());
targets['Negliquid'] = np.sum((illiquid_sort.copy()<0)*illiquid_pdf.copy())
S = np.cumsum(illiquid_pdf.copy()*illiquid_sort.copy())
S = np.concatenate(([0.], S.copy()))
targets['GiniIL'] = 1.-(np.sum(illiquid_pdf.copy()*(S[:-1].copy()+S[1:].copy()))/S[-1].copy())
## MPCs
meshesm, meshesk, meshesh = np.meshgrid(grid['m'],grid['k'],grid['h'],indexing='ij')
NW = par['gamma']/(1.+par['gamma'])*(par['N']/par['H'])*par['W']
WW = NW*np.ones((self.mpar['nm'],self.mpar['nk'],self.mpar['nh'])) # Wages
WW[:,:,-1]=par['PROFITS']*par['profitshare']
# MPC
WW_h=np.squeeze(WW[0,0,:].copy().flatten('F'))
WW_h_mesh=np.squeeze(WW.copy()*meshes['h'].copy())
grid_h_aux=grid['h']
MPC_a_m = np.zeros((self.mpar['nm'],self.mpar['nk'],self.mpar['nh']))
MPC_n_m = np.zeros((self.mpar['nm'],self.mpar['nk'],self.mpar['nh']))
for kk in range(0 ,self.mpar['nk']) :
for hh in range(0, self.mpar['nh']) :
MPC_a_m[:,kk,hh]=np.gradient(np.squeeze(c_a_guess[:,kk,hh].copy()))/np.gradient(grid['m'].copy()).transpose()
MPC_n_m[:,kk,hh]=np.gradient(np.squeeze(c_n_guess[:,kk,hh].copy()))/np.gradient(grid['m'].copy()).transpose()
MPC_a_m = MPC_a_m.copy()*(WW_h_mesh.copy()/c_a_guess.copy())
MPC_n_m = MPC_n_m.copy()*(WW_h_mesh.copy()/c_n_guess.copy())
MPC_a_h = np.zeros((self.mpar['nm'],self.mpar['nk'],self.mpar['nh']))
MPC_n_h = np.zeros((self.mpar['nm'],self.mpar['nk'],self.mpar['nh']))
for mm in range(0, self.mpar['nm']) :
for kk in range(0, self.mpar['nk']) :
MPC_a_h[mm,kk,:] = np.gradient(np.squeeze(np.log(c_a_guess[mm,kk,:].copy())))/np.gradient(np.log(WW_h.copy().transpose()*grid_h_aux.copy())).transpose()
MPC_n_h[mm,kk,:] = np.gradient(np.squeeze(np.log(c_n_guess[mm,kk,:].copy())))/np.gradient(np.log(WW_h.copy().transpose()*grid_h_aux.copy())).transpose()
EMPC_h = np.dot(joint_distr.copy().flatten('F'),(par['nu']*MPC_a_h.copy().flatten('F')+(1.-par['nu'])*MPC_n_h.copy().flatten('F')))
EMPC_m = np.dot(joint_distr.copy().flatten('F'),(par['nu']*MPC_a_m.copy().flatten('F')+(1.-par['nu'])*MPC_n_m.copy().flatten('F')))
EMPC_a_h = np.dot(joint_distr.copy().flatten('F'), MPC_a_h.copy().flatten('F'))
EMPC_a_m = np.dot(joint_distr.copy().flatten('F'), MPC_a_m.copy().flatten('F'))
EMPC_n_h = np.dot(joint_distr.copy().flatten('F'), MPC_n_h.copy().flatten('F'))
EMPC_n_m = np.dot(joint_distr.copy().flatten('F'), MPC_n_m.copy().flatten('F'))
targets['Insurance_coeff']=np.concatenate(( | np.concatenate(([[1.-EMPC_h]], [[1.-EMPC_m]]), axis =1) | numpy.concatenate |
#!/usr/local/sci/bin/python
# PYTHON2.7
#
# Author: <NAME>
# Created: 7 April 2016
# Last update: 7 April 2016
# Location: /data/local/hadkw/HADCRUH2/MARINE/EUSTACEMDS/EUSTACE_SST_MAT/
# GitHub: https://github.com/Kate-Willett/HadISDH_Marine_Build/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code provides tools to read new_suite files and write new_suite*extended and new_suite*uncertainty files.
# It reads the files into a dictionary where each column can be explored through its 'key'.
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# import numpy as np
# import copy
# import sys, os
# import pdb # pdb.set_trace() or c
#
# Kates:
#
# -----------------------
# DATA
# -----------------------
# /project/hadobs2/hadisdh/marine/ICOADS.2.5.1/*/new_suite_197312_ERAclimNBC.txt
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# python2.7
# import MDS_RWtools as MDStool
#
# MDSdict=MDStool.ReadMDSstandard('year', 'month', 'type')
# year='1973' # string
# month='01' # string
# type='ERAclimNBC' # which iteration of output?
#
# MDSdict=MDStool.ReadMDSextended('year', 'month', 'type')
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output?
#
# MDSdict=MDStool.ReadMDSuncertainty('year', 'month', 'type')
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output?
#
# Writing is slightly more complex
# Can't really think where this one would be used bu just in case
# MDStool.WriteMDSstandard('year', 'month', 'type',MDSDict)
# year='1973' # string
# month='01' # string
# type='ERAclimNBC' # which iteration of output - should also be the name of the directory the file sits in so the program can figure out the filename and path
# MDSDict = {} # A dictionary created by MakeExtDict()
#
# Writing is slightly more complex
# MDStool.WriteMDSextended('year', 'month', 'type',MDSDict)
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output - should also be the name of the directory the file sits in so the program can figure out the filename and path
# MDSDict = {} # A dictionary created by MakeExtDict()
#
# MDStool.WriteMDSuncertainty('year', 'month', 'type',MDSDict)
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output - should also be the name of the directory the file sits in so the program can figure out the filename and path
# MDSDict = {} # A dictionary created by MakeExtDict()
#
# MDSDict=MDStool.MakeStdDict()
#
# MDSDict=MDStool.MakeExtDict()
#
# MDSDict=MDStool.MakeUncDict()
#
#
#
# For reading this runs the code and stops mid-process so you can then interact with the
# data. You should be able to call this from another program too.
#
# -----------------------
# OUTPUT
# -----------------------
# a dictionary to play with
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (25 June 2019)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
# For some reason this stopped working now I'm running on RHEL7 even though its stil python2.7
# This appears to be something with reading in the type for each element. SO now I read in everything as strings and later convert
#
#
# Version 1 (7 April 2016)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
import numpy as np
import sys, os
import copy
import struct
import pdb # pdb.set_trace() or c
# first element is 9 characters lon with a space - so delimiters = 10.
#TheTypesStd=("|S9","|S8","int","int","int","int","int","int",
# "int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","|S8",
# "int","int","int","int","int","int","int","int","int","int","int",
# "int","|S3","|S4","|S4","|S3","|S2","|S3","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int")
TheTypesStd=("str","str","int","int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","str",
"int","int","int","int","int","int","int","int","int","int","int",
"int","str","str","str","str","str","str","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int")
TheDelimitersStd=(10,8,8,8,8,8,8,8, # 8 8 ID, Location and time metadata
8,8,8,8,8, # 5 Temperature and pressure OBS values AT, SST and SLP
8,8,8,8,8,8,8,8,8,8,8,8, # 12 Humidity related OBS values DPT, SHU, VAP, CRH, CWB and DPD
8,8,8,8,8,9, # 6 Deck and Platform ID and other platform related metadata
4,3,3,3,8,3,8,3,8,3,8, # 11 OBS related metadata
4,3,4,4,3,2,3,5,5,5,5,5,7, # 13 Instrument related metadata
2,1,1,1,1,1,1,1,1, # 9 BASE QC
2,1,1,1,1,1,1,1,1, # 9 SST QC
2,1,1,1,1,1,1,1,1, # 9 AT QC
2,1,1,1,1,1,1,1,1, # 9 DPT QC
2,1,1,1,1,1,1,1) # 8 Additional QC
# first element is 9 characters lon with a space - so delimiters = 10.
TheTypesExt=("|S9","|S8","int","int","int","int","int","int",
"int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int",
"|S3","|S3","|S3","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int","int")
TheDelimitersExt=(10,8,8,8,8,8,8,8,
8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,9,
3,3,3,5,5,5,5,5,5,
2,1,1,1,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,1)
# first element is 9 characters lon with a space - so delimiters = 10.
TheTypesUnc=("|S9","|S8","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int",
"|S3","|S3","|S3","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int","int")
TheDelimitersUnc=(10,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,9,
3,3,3,5,5,5,5,5,5,
2,1,1,1,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,1)
#************************************************************************
# ReadMDSstandard
#************************************************************************
def ReadMDSstandard(TheYear,TheMonth,TheType):
# InDir = '/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/'+TheType+'/' # THRESH5_5
InDir = '/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/'+TheType+'/' # THRESH5_5
InFil = 'new_suite_'+TheYear+TheMonth+'_'+TheType+'.txt'
TheFilee = InDir+InFil
print(TheFilee)
# RD - moved the TheTypes and TheDelimiters to outside of definition
# so I can call them from another routine
RawData=ReadData(TheFilee,TheTypesStd,TheDelimitersStd)
MDSDict=dict([])
MDSDict['shipid'] = np.array(RawData[:,0],dtype=TheTypesStd[0])
MDSDict['UID'] = np.array(RawData[:,1],dtype=TheTypesStd[1])
MDSDict['LAT'] = np.array(RawData[:,2],dtype=TheTypesStd[2])/100.
MDSDict['LON'] = np.array(RawData[:,3],dtype=TheTypesStd[3])/100.
MDSDict['YR'] = np.array(RawData[:,4],dtype=TheTypesStd[4])
MDSDict['MO'] = np.array(RawData[:,5],dtype=TheTypesStd[5])
MDSDict['DY'] = np.array(RawData[:,6],dtype=TheTypesStd[6])
MDSDict['HR'] = np.array(RawData[:,7],dtype=TheTypesStd[7])
MDSDict['AT'] =np.array(RawData[:,8],dtype=TheTypesStd[8])/10.
MDSDict['ATA'] =np.array(RawData[:,9],dtype=TheTypesStd[9])/100.
MDSDict['SST'] =np.array(RawData[:,10],dtype=TheTypesStd[10])/10.
MDSDict['SSTA'] =np.array(RawData[:,11],dtype=TheTypesStd[11])/100.
MDSDict['SLP'] =np.array(RawData[:,12],dtype=TheTypesStd[12])/10.
MDSDict['DPT'] =np.array(RawData[:,13],dtype=TheTypesStd[13])/10.
MDSDict['DPTA'] =np.array(RawData[:,14],dtype=TheTypesStd[14])/100.
MDSDict['SHU'] =np.array(RawData[:,15],dtype=TheTypesStd[15])/10.
MDSDict['SHUA'] =np.array(RawData[:,16],dtype=TheTypesStd[16])/100.
MDSDict['VAP'] =np.array(RawData[:,17],dtype=TheTypesStd[17])/10.
MDSDict['VAPA'] =np.array(RawData[:,18],dtype=TheTypesStd[18])/100.
MDSDict['CRH'] =np.array(RawData[:,19],dtype=TheTypesStd[19])/10.
MDSDict['CRHA'] =np.array(RawData[:,20],dtype=TheTypesStd[20])/100.
MDSDict['CWB'] =np.array(RawData[:,21],dtype=TheTypesStd[21])/10.
MDSDict['CWBA'] =np.array(RawData[:,22],dtype=TheTypesStd[22])/100.
MDSDict['DPD'] =np.array(RawData[:,23],dtype=TheTypesStd[23])/10.
MDSDict['DPDA'] =np.array(RawData[:,24],dtype=TheTypesStd[24])/100.
# MDSDict['DSVS']=np.array(RawData['f25'])
MDSDict['DCK'] =np.array(RawData[:,26],dtype=TheTypesStd[26])
MDSDict['SID'] =np.array(RawData[:,27],dtype=TheTypesStd[27])
MDSDict['PT'] =np.array(RawData[:,28],dtype=TheTypesStd[28])
# MDSDict['SI']=np.array(RawData['f29'])
# MDSDict['printsim']=np.array(RawData['f30'])
MDSDict['II'] =np.array(RawData[:,31],dtype=TheTypesStd[31])
MDSDict['IT'] =np.array(RawData[:,32],dtype=TheTypesStd[32])
MDSDict['DPTI'] =np.array(RawData[:,33],dtype=TheTypesStd[33])
MDSDict['WBTI'] =np.array(RawData[:,34],dtype=TheTypesStd[34])
MDSDict['WBT'] =np.array(RawData[:,35],dtype=TheTypesStd[35])/10.
# MDSDict['DI']=np.array(RawData['f36'])
# MDSDict['D']=np.array(RawData['f37'])
MDSDict['WI'] =np.array(RawData[:,38],dtype=TheTypesStd[38])
MDSDict['W'] =np.array(RawData[:,39],dtype=TheTypesStd[39])/10.
# MDSDict['VI']=np.array(RawData['f40'])
# MDSDict['VV']=np.array(RawData['f41'])
# MDSDict['DUPS']=np.array(RawData['f42'])
# MDSDict['COR']=np.array(RawData['f43'])
MDSDict['TOB'] =np.array(RawData[:,44],dtype=TheTypesStd[44])
MDSDict['TOT'] =np.array(RawData[:,45],dtype=TheTypesStd[45])
MDSDict['EOT'] =np.array(RawData[:,46],dtype=TheTypesStd[46])
MDSDict['TOH'] =np.array(RawData[:,47],dtype=TheTypesStd[47])
MDSDict['EOH'] =np.array(RawData[:,48],dtype=TheTypesStd[48])
MDSDict['LOV'] =np.array(RawData[:,49],dtype=TheTypesStd[49])
MDSDict['HOP'] =np.array(RawData[:,50],dtype=TheTypesStd[50])
MDSDict['HOT'] =np.array(RawData[:,51],dtype=TheTypesStd[51])
MDSDict['HOB'] =np.array(RawData[:,52],dtype=TheTypesStd[52])
MDSDict['HOA'] =np.array(RawData[:,53],dtype=TheTypesStd[53])
# MDSDict['SMF']=np.array(RawData['f54'])
MDSDict['day'] =np.array(RawData[:,55],dtype=TheTypesStd[55])
MDSDict['land'] =np.array(RawData[:,56],dtype=TheTypesStd[56])
MDSDict['trk'] =np.array(RawData[:,57],dtype=TheTypesStd[57])
MDSDict['date1'] =np.array(RawData[:,58],dtype=TheTypesStd[58])
MDSDict['date2'] =np.array(RawData[:,59],dtype=TheTypesStd[59])
MDSDict['pos'] =np.array(RawData[:,60],dtype=TheTypesStd[60])
MDSDict['blklst'] =np.array(RawData[:,61],dtype=TheTypesStd[61])
MDSDict['dup'] =np.array(RawData[:,62],dtype=TheTypesStd[62])
# MDSDict['POSblank1']=np.array(RawData['f63'])
MDSDict['SSTbud'] =np.array(RawData[:,64],dtype=TheTypesStd[64])
MDSDict['SSTclim']=np.array(RawData[:,65],dtype=TheTypesStd[65])
MDSDict['SSTnonorm']=np.array(RawData[:,66],dtype=TheTypesStd[66])
MDSDict['SSTfreez']=np.array(RawData[:,67],dtype=TheTypesStd[67])
# MDSDict['SSTnoval']=np.array(RawData['f68'])
# MDSDict['SSTnbud']=np.array(RawData['f69'])
# MDSDict['SSTbbud']=np.array(RawData['f70'])
MDSDict['SSTrep'] =np.array(RawData[:,71],dtype=TheTypesStd[71])
# MDSDict['SSTblank']=np.array(RawData['f72'])
MDSDict['ATbud'] =np.array(RawData[:,73],dtype=TheTypesStd[73])
MDSDict['ATclim'] =np.array(RawData[:,74],dtype=TheTypesStd[74])
MDSDict['ATnonorm']=np.array(RawData[:,75],dtype=TheTypesStd[75])
# MDSDict['ATblank1']=np.array(RawData['f76'])
MDSDict['ATnoval']=np.array(RawData[:,77],dtype=TheTypesStd[77])
# MDSDict['ATnbud']=np.array(RawData['f78'])
MDSDict['ATround']=np.array(RawData[:,78],dtype=TheTypesStd[78]) # round in place of nbud
# MDSDict['ATbbud']=np.array(RawData['f79'])
MDSDict['ATrep'] =np.array(RawData[:,80],dtype=TheTypesStd[80])
# MDSDict['ATblank2']=np.array(RawData['f81'])
MDSDict['DPTbud'] =np.array(RawData[:,82],dtype=TheTypesStd[82])
MDSDict['DPTclim']=np.array(RawData[:,83],dtype=TheTypesStd[83])
MDSDict['DPTnonorm']=np.array(RawData[:,84],dtype=TheTypesStd[84])
MDSDict['DPTssat']=np.array(RawData[:,85],dtype=TheTypesStd[85])
MDSDict['DPTnoval']=np.array(RawData[:,86],dtype=TheTypesStd[86])
# MDSDict['DPTnbud']=np.array(RawData['f87'])
MDSDict['DPTround']=np.array(RawData[:,87],dtype=TheTypesStd[87]) # round in place of nbud
# MDSDict['DPTbbud']=np.array(RawData['f88'])
MDSDict['DPTrep'] =np.array(RawData[:,89],dtype=TheTypesStd[89])
MDSDict['DPTrepsat']=np.array(RawData[:,90],dtype=TheTypesStd[90])
# MDSDict['few']=np.array(RawData['f91'])
# MDSDict['ntrk']=np.array(RawData['f92'])
# MDSDict['POSblank2']=np.array(RawData['f93'])
# MDSDict['POSblank3']=np.array(RawData['f94'])
# MDSDict['POSblank4']=np.array(RawData['f95'])
# MDSDict['POSblank5']=np.array(RawData['f96'])
# MDSDict['POSblank6']=np.array(RawData['f97'])
# MDSDict['POSblank7']=np.array(RawData['f98'])
nobs=len(MDSDict['shipid'])
print('Number of obs read in: ',nobs)
return MDSDict
#************************************************************************
# ReadMDSextended
#************************************************************************
def ReadMDSextended(TheYear,TheMonth,TheType):
InDir = '/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/'+TheType+'/'
# InDir = '/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/'+TheType+'/'
# InDir = '/data/local/hadkw/HADCRUH2/MARINE/'
InFil = 'new_suite_'+TheYear+TheMonth+'_'+TheType+'_extended.txt'
TheFilee = InDir+InFil
print(TheFilee)
RawData=ReadData(TheFilee,TheTypesExt,TheDelimitersExt)
MDSDict=dict([])
MDSDict['shipid']=np.array(RawData['f0'])
MDSDict['UID']=np.array(RawData['f1'])
MDSDict['LAT']=np.array(RawData['f2'])/100.
MDSDict['LON']=np.array(RawData['f3'])/100.
MDSDict['YR']=np.array(RawData['f4'])
MDSDict['MO']=np.array(RawData['f5'])
MDSDict['DY']=np.array(RawData['f6'])
MDSDict['HR']=np.array(RawData['f7'])
MDSDict['SST']=np.array(RawData['f8'])/10.
MDSDict['SSTA']=np.array(RawData['f9'])/100.
MDSDict['SLP']=np.array(RawData['f10'])/10.
MDSDict['W']=np.array(RawData['f11'])/10.
MDSDict['AT']=np.array(RawData['f12'])/10.
MDSDict['ATA']=np.array(RawData['f13'])/100.
MDSDict['DPT']=np.array(RawData['f14'])/10.
MDSDict['DPTA']=np.array(RawData['f15'])/100.
MDSDict['SHU']=np.array(RawData['f16'])/10.
MDSDict['SHUA']=np.array(RawData['f17'])/100.
MDSDict['VAP']=np.array(RawData['f18'])/10.
MDSDict['VAPA']=np.array(RawData['f19'])/100.
MDSDict['CRH']=np.array(RawData['f20'])/10.
MDSDict['CRHA']=np.array(RawData['f21'])/100.
MDSDict['CWB']=np.array(RawData['f22'])/10.
MDSDict['CWBA']=np.array(RawData['f23'])/100.
MDSDict['DPD']=np.array(RawData['f24'])/10.
MDSDict['DPDA']=np.array(RawData['f25'])/100.
MDSDict['ATtbc']=np.array(RawData['f26'])/10.
MDSDict['ATAtbc']=np.array(RawData['f27'])/100.
MDSDict['DPTtbc']=np.array(RawData['f28'])/10.
MDSDict['DPTAtbc']=np.array(RawData['f29'])/100.
MDSDict['SHUtbc']=np.array(RawData['f30'])/10.
MDSDict['SHUAtbc']=np.array(RawData['f31'])/100.
MDSDict['VAPtbc']=np.array(RawData['f32'])/10.
MDSDict['VAPAtbc']=np.array(RawData['f33'])/100.
MDSDict['CRHtbc']=np.array(RawData['f34'])/10.
MDSDict['CRHAtbc']=np.array(RawData['f35'])/100.
MDSDict['CWBtbc']=np.array(RawData['f36'])/10.
MDSDict['CWBAtbc']=np.array(RawData['f37'])/100.
MDSDict['DPDtbc']=np.array(RawData['f38'])/10.
MDSDict['DPDAtbc']=np.array(RawData['f39'])/100.
MDSDict['AThc']=np.array(RawData['f40'])/10.
MDSDict['ATAhc']=np.array(RawData['f41'])/100.
MDSDict['DPThc']=np.array(RawData['f42'])/10.
MDSDict['DPTAhc']=np.array(RawData['f43'])/100.
MDSDict['SHUhc']=np.array(RawData['f44'])/10.
MDSDict['SHUAhc']=np.array(RawData['f45'])/100.
MDSDict['VAPhc']=np.array(RawData['f46'])/10.
MDSDict['VAPAhc']=np.array(RawData['f47'])/100.
MDSDict['CRHhc']=np.array(RawData['f48'])/10.
MDSDict['CRHAhc']=np.array(RawData['f49'])/100.
MDSDict['CWBhc']=np.array(RawData['f50'])/10.
MDSDict['CWBAhc']=np.array(RawData['f51'])/100.
MDSDict['DPDhc']=np.array(RawData['f52'])/10.
MDSDict['DPDAhc']=np.array(RawData['f53'])/100.
MDSDict['ATscn']=np.array(RawData['f54'])/10.
MDSDict['ATAscn']=np.array(RawData['f55'])/100.
MDSDict['DPTscn']=np.array(RawData['f56'])/10.
MDSDict['DPTAscn']=np.array(RawData['f57'])/100.
MDSDict['SHUscn']=np.array(RawData['f58'])/10.
MDSDict['SHUAscn']=np.array(RawData['f59'])/100.
MDSDict['VAPscn']=np.array(RawData['f60'])/10.
MDSDict['VAPAscn']=np.array(RawData['f61'])/100.
MDSDict['CRHscn']=np.array(RawData['f62'])/10.
MDSDict['CRHAscn']=np.array(RawData['f63'])/100.
MDSDict['CWBscn']=np.array(RawData['f64'])/10.
MDSDict['CWBAscn']=np.array(RawData['f65'])/100.
MDSDict['DPDscn']=np.array(RawData['f66'])/10.
MDSDict['DPDAscn']=np.array(RawData['f67'])/100.
MDSDict['ATslr']=np.array(RawData['f68'])/10.
MDSDict['ATAslr']=np.array(RawData['f69'])/100.
MDSDict['DPTslr']=np.array(RawData['f70'])/10.
MDSDict['DPTAslr']=np.array(RawData['f71'])/100.
MDSDict['SHUslr']=np.array(RawData['f72'])/10.
MDSDict['SHUAslr']=np.array(RawData['f73'])/100.
MDSDict['VAPslr']=np.array(RawData['f74'])/10.
MDSDict['VAPAslr']=np.array(RawData['f75'])/100.
MDSDict['CRHslr']=np.array(RawData['f76'])/10.
MDSDict['CRHAslr']=np.array(RawData['f77'])/100.
MDSDict['CWBslr']=np.array(RawData['f78'])/10.
MDSDict['CWBAslr']=np.array(RawData['f79'])/100.
MDSDict['DPDslr']=np.array(RawData['f80'])/10.
MDSDict['DPDAslr']=np.array(RawData['f81'])/100.
MDSDict['DCK']=np.array(RawData['f82'])
MDSDict['SID']=np.array(RawData['f83'])
MDSDict['PT']=np.array(RawData['f84'])
MDSDict['EOT']=np.array(RawData['f85']) # something up here
MDSDict['EOH']=np.array(RawData['f86'])
MDSDict['ESTE']=np.array(RawData['f87'])
MDSDict['LOV']=np.array(RawData['f88'])
MDSDict['HOP']=np.array(RawData['f89'])
MDSDict['HOT']=np.array(RawData['f90'])
MDSDict['HOB']=np.array(RawData['f91'])
MDSDict['HOA']=np.array(RawData['f92'])
MDSDict['ESTH']=np.array(RawData['f93'])
MDSDict['day']=np.array(RawData['f94'])
MDSDict['land']=np.array(RawData['f95'])
MDSDict['trk']=np.array(RawData['f96'])
MDSDict['date1']=np.array(RawData['f97'])
MDSDict['date2']=np.array(RawData['f98'])
MDSDict['pos']=np.array(RawData['f99'])
MDSDict['blklst']=np.array(RawData['f100'])
MDSDict['dup']=np.array(RawData['f101'])
MDSDict['SSTbud']=np.array(RawData['f102'])
MDSDict['SSTclim']=np.array(RawData['f103'])
MDSDict['SSTnonorm']=np.array(RawData['f104'])
MDSDict['SSTfreez']=np.array(RawData['f105'])
MDSDict['SSTrep']=np.array(RawData['f106'])
MDSDict['ATbud']=np.array(RawData['f107'])
MDSDict['ATclim']=np.array(RawData['f108'])
MDSDict['ATnonorm']=np.array(RawData['f109'])
MDSDict['ATround']=np.array(RawData['f110']) # round in place of nbud
MDSDict['ATrep']=np.array(RawData['f111'])
MDSDict['DPTbud']=np.array(RawData['f112'])
MDSDict['DPTclim']=np.array(RawData['f113'])
MDSDict['DPTssat']=np.array(RawData['f114'])
MDSDict['DPTround']=np.array(RawData['f115']) # round in place of nbud
MDSDict['DPTrep']=np.array(RawData['f116'])
MDSDict['DPTrepsat']=np.array(RawData['f117'])
nobs=len(MDSDict['shipid'])
print('Number of obs read in: ',nobs)
return MDSDict
#************************************************************************
# ReadMDSuncertainty
#************************************************************************
def ReadMDSuncertainty(TheYear,TheMonth,TheType):
InDir = '/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/'+TheType+'/'
# InDir = '/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/'+TheType+'/'
# InDir = '/data/local/hadkw/HADCRUH2/MARINE/'
InFil = 'new_suite_'+TheYear+TheMonth+'_'+TheType+'_uncertainty.txt'
TheFilee = InDir+InFil
print(TheFilee)
RawData=ReadData(TheFilee,TheTypesUnc,TheDelimitersUnc)
MDSDict=dict([])
MDSDict['shipid']=np.array(RawData['f0'])
MDSDict['UID']=np.array(RawData['f1'])
MDSDict['LAT']=np.array(RawData['f2'])/100.
MDSDict['LON']= | np.array(RawData['f3']) | numpy.array |
import numpy as np
import pytest
from ndsys.features import VolterraFeatures, prepare_data
def test_prepare_data():
x = np.vstack([1, 2, 3])
y = | np.vstack([10, 11, 12]) | numpy.vstack |
from __future__ import print_function
# This lets us use the python3-style print() function even in python2. It should have no effect if you're already running python3.
import os
import dwl
import numpy as np
# Configure the printing
| np.set_printoptions(suppress=True) | numpy.set_printoptions |
# Find progenitors or descendants.
# progen_finder() inputs snapshots and Caesar catalog, reads in particle IDs for specified type, runs find_progens() then write_progens()
# find_progens() can also be run stand-alone, if you have a list of particle ID and caesar objects. Returns progenitor and second-most massive progenitor.
# write_progens() inserts this info into the Caesar file.
import numpy as np
import h5py
from yt.funcs import mylog
from caesar.utils import memlog
from caesar.group import group_types
from joblib import Parallel, delayed
from scipy import stats
def find_progens(pid_current, pid_target, gid_current, gid_target, pid_hash, n_most=1, min_in_common=0.1, nproc=1):
"""Find most massive and second most massive progenitor/descendants.
Parameters
----------
pids_current : np.ndarray
particle IDs from the current snapshot.
pids_target : np.ndarray
particle IDs from the previous/next snapshot.
gids_current : np.ndarray
group IDs from the current snapshot.
gids_target : np.ndarray
group IDs from the previous/next snapshot.
pid_hash : np.ndarray
indexes for the start of each group in pids_current
n_most : int
Find n_most most massive progenitors/descendants. Current options are 1 or 2.
min_in_common : float
Require >this fraction of parts in common between object and progenitor to be a valid progenitor.
nproc : int
Number of cores for multiprocessing. Note that this doesn't help much since most of the time is spent in sorting.
"""
# Sort the progenitor IDs and object numbers for faster searching
isort_target = np.argsort(pid_target)
pid_target = pid_target[isort_target] # target particles' IDs
gid_target = gid_target[isort_target] # galaxy IDs for the target particles
ngal_curr = len(pid_hash)-1 # number of galaxies to find progens/descendants for
mylog.info('Progen: Sorted %d target IDs, doing %d groups'%(len(pid_target),ngal_curr))
# Loop over current objects to find progens for each
if nproc>1:
prog_index_tmp = Parallel(n_jobs=nproc)(delayed(find_target_group)(pid_current[pid_hash[ig]:pid_hash[ig+1]],pid_target,gid_target,min_in_common) for ig in range(ngal_curr))
prog_index_tmp = np.array(prog_index_tmp,dtype=int)
prog_index = | np.array(prog_index_tmp.T[0],dtype=int) | numpy.array |
from utils_for_test import MolecularImageDataset, collate_fn
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
from unet import UNet
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
plt.switch_backend('agg')
from generate_smiles import sdf2smiles
from copy import deepcopy
from utils import atom_vocab, charge_vocab
import rdkit
from rdkit import Chem
def leaky_relu(x):
x = np.maximum(x, 0.5 * x)
return x
atom_type_devocab = {j: i for i, j in atom_vocab.items()}
atom_type_devocab[0] = 'C'
atom_charge_devocab = {j: i for i, j in charge_vocab.items()}
bond_type_devocab = {0: 1, 1: 2, 2: 3, 3: 4, 4:5,5:6}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
atom_max_valence = {'<unkonw>': 4, 'O': 2, 'C': 4, 'N': 3, 'F': 1, 'H': 1, 'S': 6, 'Cl': 1, 'P': 5, 'Br': 1,
'B': 3, 'I': 1, 'Si': 4, 'Se': 6, 'Te': 6, 'As': 3, 'Al': 3, 'Zn': 2,
'Ca': 2, 'Ag': 1}
df = pd.read_csv('../data2/UOB/uob.csv')
dataset = MolecularImageDataset(df)
dataloader = DataLoader(dataset, 64, collate_fn=collate_fn)
model = UNet(in_channels=1, heads=[1,14,3,2,1,360,60,60])
model = nn.DataParallel(model)
model.load_state_dict(torch.load('weights0.2/unet_model_weights29.pkl'))
model = model.to(device)
torch.cuda.empty_cache()
model.eval()
total_nums = 0
with torch.no_grad():
results = []
for batch_num, imgs in enumerate(dataloader):
imgs = imgs.to(device)
atom_targets_pred, atom_types_pred, atom_charges_pred, atom_hs_pred, bond_targets_pred, \
bond_types_pred, bond_rhos_pred, bond_omega_types_pred = model(
imgs)
temp = torch.nn.functional.max_pool2d(atom_targets_pred, kernel_size=3,
stride=1, padding=1)
atom_targets_pred = (temp == atom_targets_pred) * (atom_targets_pred > -1).float()
temp = torch.nn.functional.max_pool2d(bond_targets_pred, kernel_size=3,
stride=1, padding=1)
bond_targets_pred = (temp == bond_targets_pred) * (bond_targets_pred > -1).float()
bond_rhos_pred = torch.abs(bond_rhos_pred)
bond_types_pred = bond_types_pred.view(-1, 6, 60, 128, 128)
bond_omega_types_pred2 = torch.cat(
[bond_omega_types_pred[:, 59:], bond_omega_types_pred, bond_omega_types_pred[:, :1]], dim=1)\
.permute(0, 2,3,1).reshape(-1, 128 * 128, 62)
bond_omega_types_pred2 = ((torch.nn.functional.max_pool1d(bond_omega_types_pred2, stride=1, kernel_size=3,
padding=0).reshape(-1, 128, 128, 60).permute(0, 3, 1,2) == bond_omega_types_pred) * \
(bond_omega_types_pred > -1)).float()
# def plot_surf(z):
# h, l = z.shape
# x, y = np.meshgrid(np.arange(0, h), np.arange(0, l))
# ax = plt.subplot(111, projection='3d')
# ax.plot_surface(x, y, z)
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
#
# plt.subplot(131)
# plt.imshow(imgs.cpu().numpy()[39, 0])
# plt.subplot(132)
# plt.imshow(atom_targets_pred.cpu().numpy()[39, 0])
# plt.subplot(133)
# plt.imshow(bond_targets_pred.cpu().numpy()[39, 0])
# plt.show()
# for x, y in zip(*((bond_targets[0]==1).nonzero())):
# rho = bond_rhos[:, x, y]
# omega = bond_omega_types[:, x, y].argmax() * (np.pi/60) + np.pi/120 - np.pi/2
# plt.plot([y - rho* np.sin(omega), y + rho*np.sin(omega)], [x - rho*np.cos(omega), x + rho*np.cos(omega)])
for j in range(atom_targets_pred.shape[0]):
smiles = df.loc[total_nums, 'smiles']
mol = Chem.MolFromSmiles(smiles)
smiles = Chem.MolToSmiles(mol, canonical=True)
total_nums += 1
if total_nums % 100 == 0:
print(total_nums)
if True:
img = imgs[j].detach().cpu().numpy()
atom_target_img = atom_targets_pred[j, 0]
atom_type_img = atom_types_pred[j].argmax(0)
atom_charge_img = atom_charges_pred[j].argmax(0)
atom_hs_img = atom_hs_pred[j].argmax(0)
bond_target_img = bond_targets_pred[j, 0]
bond_type_img = bond_types_pred[j].argmax(0)
bond_rhos_img = bond_rhos_pred[j]
bond_omega_img = bond_omega_types_pred[j]
bond_omega_img2 = bond_omega_types_pred2[j]
if (atom_target_img.sum()) == 0 or (bond_target_img.sum() == 0):
print(j, 'cannot find key target point')
results.append(None)
continue
bonds_position_list = []
bonds_property_list = []
bonds_delta_list = []
for position in bond_target_img.nonzero(as_tuple=False):
x, y = position
x, y = x.cpu().item(), y.cpu().item()
for omega_index in bond_omega_img[:, x, y].nonzero(as_tuple=False):
omega_index = omega_index.cpu().item()
if omega_index <= 28:
if bond_omega_img[omega_index, x, y] < bond_omega_img[(omega_index+29):(omega_index+31), x, y].max():
continue
elif omega_index == 29:
if bond_omega_img[omega_index, x, y] < bond_omega_img[(omega_index+29):(omega_index+30), x, y].max() or \
bond_omega_img[omega_index, x, y] < bond_omega_img[0,x,y]:
continue
elif omega_index == 30:
if bond_omega_img[omega_index, x, y] <= bond_omega_img[(omega_index-30):(omega_index-29), x, y].max() or \
bond_omega_img[omega_index, x, y] <= bond_omega_img[59, x, y]:
continue
elif omega_index >= 31:
if bond_omega_img[omega_index, x, y] <= bond_omega_img[(omega_index-31):(omega_index-29), x, y].max():
continue
omega = omega_index * (np.pi / 30) + np.pi / 60 - np.pi / 2
rho = bond_rhos_img[omega_index, x, y].cpu().item()
delta_x, delta_y = rho * np.cos(omega), rho * np.sin(omega)
bond_type = bond_type_img[omega_index, x, y].cpu().item()
bonds_position_list.append([x, y])
bonds_property_list.append(bond_type)
bonds_delta_list.append([delta_x, delta_y])
atoms_position_list = []
atoms_type_list = []
atoms_charge_list = []
atoms_hs_list = []
for position in atom_target_img.nonzero(as_tuple=False):
x, y = position
x, y = x.cpu().item(), y.cpu().item()
atom_type = atom_type_img[x, y].cpu().item()
atom_charge = atom_charge_img[x, y].cpu().item()
atom_h = atom_hs_img[x, y].cpu().item()
if len(atoms_position_list) > 0:
temp1 = np.array(atoms_position_list)
temp2 = np.array([[x, y]])
if np.sum(np.square(temp1 - temp2), axis=-1).min() < 4:
continue
atoms_position_list.append([x, y])
atoms_type_list.append(atom_type_devocab[atom_type])
atoms_charge_list.append(atom_charge_devocab[atom_charge])
atoms_hs_list.append(atom_h)
atom_pred_position1 = np.expand_dims(np.array(bonds_position_list) + np.array(bonds_delta_list), 1)
atom_pred_position2 = np.expand_dims(np.array(bonds_position_list) - | np.array(bonds_delta_list) | numpy.array |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from pandas.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lenlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sampleSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.append(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.append(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.copy()
# comun_ = self.data.copy()
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = pd.DataFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = pd.concat([outer_residuals, inner_residuals], axis=1)
mean_ = np.mean(self.data, 0)
# comun_ = comun_.apply(lambda row: row + mean_, axis=1)
sumOuterResid = pd.DataFrame.sum(
pd.DataFrame.sum(outer_residuals**2))
sumInnerResid = pd.DataFrame.sum(
pd.DataFrame.sum(inner_residuals**2))
divFun = sumOuterResid + sumInnerResid
return residuals, outer_residuals, inner_residuals, divFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def implied(self):
corLVs = pd.DataFrame.cov(self.fscores)
implied_ = pd.DataFrame.dot(self.outer_loadings, corLVs)
implied = pd.DataFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(len(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return pd.DataFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = pd.DataFrame(0, index=range(1, 6), columns=manifests)
for i in range(len(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].value_counts()
frequencia = frequencia / len(data) * 100
frequencia = frequencia.reindex_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillna(0).T
frequencia = frequencia[(frequencia.T != 0).any()]
maximo = pd.DataFrame.max(pd.DataFrame.max(data, axis=0))
if int(maximo) & 1:
neg = np.sum(frequencia.ix[:, 1: ((maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((maximo + 1) / 2)]
pos = np.sum(
frequencia.ix[:, (((maximo + 1) / 2) + 1):maximo], axis=1)
else:
neg = np.sum(frequencia.ix[:, 1:((maximo) / 2)], axis=1)
ind = 0
pos = np.sum(frequencia.ix[:, (((maximo) / 2) + 1):maximo], axis=1)
frequencia['Neg.'] = pd.Series(
neg, index=frequencia.index)
frequencia['Ind.'] = pd.Series(
ind, index=frequencia.index)
frequencia['Pos.'] = pd.Series(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMmax = pd.DataFrame.max(SEM)
ok = None
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = pd.concat([block, SEM], axis=1)
for j in range(SEMmax + 1):
dataSEM = (block.loc[data_[segmento] == j]
).drop(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.rename(j + 1)
ok = dataSEM if ok is None else pd.concat(
[ok, dataSEM], axis=1)
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].dropna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.std(self.data, 0)
mean_ = np.mean(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(len(self.data.columns))]
return [mean_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lenlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(len(Beta))]
beta_ = np.diag(beta_)
beta = pd.DataFrame(beta, index=self.latent, columns=self.latent)
mid = pd.DataFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(len(exoVar)):
for i in range(len(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.copy()
beta_ = pd.DataFrame(1, index=np.arange(
len(exoVar)), columns=np.arange(len(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(len(self.path_matrix)))
beta = pd.DataFrame(beta)
partial_ = pd.DataFrame.dot(self.outer_weights, beta.T.values)
prediction = pd.DataFrame.dot(partial_, self.outer_loadings.T.values)
predicted = pd.DataFrame.dot(self.data, prediction)
predicted.columns = self.manifests
mean_ = np.mean(self.data, 0)
intercept = mean_ - np.dot(mean_, prediction)
predictedData = predicted.apply(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.sum(abs(loadings))**2
denominador = numerador + (p - np.sum(loadings ** 2))
cr = numerador / denominador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = len(self.data_)
r2 = self.r2.values
r2adjusted = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
p = sum(self.LVariables['target'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))
comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]
comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]
comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.append(np.nanmean(block))
htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
def fornell(self):
cor_ = pd.DataFrame.corr(self.fscores)**2
AVE = self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
for i in range(len(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = pd.DataFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denominador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denominador)
if(np.isnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = pd.DataFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(pd.DataFrame.corr(self.fscores))
return pd.DataFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
p_ = len(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.sum(block, axis=1))
cor_ = pd.DataFrame.corr(block)
denominador = soma * correction**2
numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denominador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(len(self.data_.columns))
for i in range(len(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.append(1 / (1 - r2))
vif = pd.DataFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lenlatent):
for j in range(self.lenlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lenlatent, self.lenlatent])
for i in range(self.lenlatent):
for j in range(self.lenlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = pd.DataFrame(corTrue, corFalse.columns, corFalse.index)
# Loadings
attenuedOuter_loadings = pd.DataFrame(
0, index=self.manifests, columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
newLoad = (
weights.values * | np.sqrt(rA.ix[self.latent[i]].values) | numpy.sqrt |
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import inv
import numpy.linalg as LA
import pdb
#helper function
def calc_g(E, alpha, beta, g, eta = 1e-8):
return inv((E + 1j*eta)*np.eye(2) - alpha - np.dot(np.conj(beta).T, np.dot(g, beta)))
def calculate_surface(E, alpha, beta, eta = 1e-8, N = 10000, eps = 1e-6, brute = False):
# N : number of iterations
# eta : 0+ added to get the correct retarded Green's function
# eps : tolerance in the differnce in the norm of the final result divided by norm of the final result
# brute : if brute is True, N iterations are made, otherwise the iterations continue until eps is reached
#initial guess
g0 = inv(alpha)
g = g0
g_last = g0
for i in range(N):
g = calc_g(E, alpha, beta, g)
g = 0.5*(g_last + g)
if brute == False and LA.norm(g_last - g)/ | LA.norm(g) | numpy.linalg.norm |
import random
import time
import os
import warnings
import gym
from gym import spaces
from gym.utils import seeding
from PIL import Image
import numpy as np
from config import INPUT_DIM, MIN_STEERING, MAX_STEERING, JERK_REWARD_WEIGHT, MAX_STEERING_DIFF
from config import ROI, THROTTLE_REWARD_WEIGHT, MAX_THROTTLE, MIN_THROTTLE, REWARD_CRASH, CRASH_SPEED_WEIGHT
from environment.carla.client import make_carla_client, CarlaClient
from environment.carla.tcp import TCPConnectionError
from environment.carla.settings import CarlaSettings
from environment.carla.sensor import Camera
from environment.carla.carla_server_pb2 import Control
class Env(gym.Env):
def __init__(self, client, vae=None, min_throttle=0.4, max_throttle=0.6, n_command_history=20, frame_skip=1, n_stack=1, action_lambda=0.5):
self.client = client
# save last n commands
self.n_commands = 2
self.n_command_history = n_command_history
self.command_history = np.zeros((1, self.n_commands * n_command_history))
self.n_stack = n_stack
self.stacked_obs = None
# assumes that we are using VAE input
self.vae = vae
self.z_size = None
if vae is not None:
self.z_size = vae.z_size
self.observation_space = spaces.Box(low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1, self.z_size + self.n_commands * n_command_history),
dtype=np.float32)
self.action_space = spaces.Box(low=np.array([-MAX_STEERING, -1]),
high= | np.array([MAX_STEERING, 1]) | numpy.array |
from typing import Any
from typing import Dict
from typing import Union
import numpy as np
import pandas as pd
from scipy.sparse import issparse
from scipy.stats import ks_2samp
from sklearn.utils import check_random_state
from sklearn.utils import safe_indexing
from .base import BaseSelector
from .base import ONE_DIM_ARRAYLIKE_TYPE
from .base import TWO_DIM_ARRAYLIKE_TYPE
MAX_INT = np.iinfo(np.int32).max
class DropCollinearFeatures(BaseSelector):
"""Drop collinear features.
Examples
--------
>>> import numpy as np
>>> from automllib.feature_selection import DropCollinearFeatures
>>> sel = DropCollinearFeatures()
>>> X = [[1, 1, 100], [2, 2, 10], [1, 1, 1], [1, 1, np.nan]]
>>> Xt = sel.fit_transform(X)
>>> Xt.shape
(4, 2)
"""
def __init__(
self,
random_state: Union[int, np.random.RandomState] = None,
subsample: Union[int, float] = 1.0,
threshold: float = 0.95,
verbose: int = 0
) -> None:
super().__init__(verbose=verbose)
self.random_state = random_state
self.subsample = subsample
self.threshold = threshold
def _check_params(self) -> None:
pass
def _fit(
self,
X: TWO_DIM_ARRAYLIKE_TYPE,
y: ONE_DIM_ARRAYLIKE_TYPE = None
) -> 'DropCollinearFeatures':
random_state = check_random_state(self.random_state)
X = X.astype('float64')
n_samples, _ = X.shape
if isinstance(self.subsample, int):
max_samples = self.subsample
else:
max_samples = int(self.subsample * n_samples)
if max_samples < n_samples:
indices = random_state.choice(
n_samples,
max_samples,
replace=False
)
X = X[indices]
self.corr_ = pd._libs.algos.nancorr(X)
return self
def _get_support(self) -> ONE_DIM_ARRAYLIKE_TYPE:
triu = np.triu(self.corr_, k=1)
triu = np.abs(triu)
triu = np.nan_to_num(triu)
return np.all(triu <= self.threshold, axis=0)
def _more_tags(self) -> Dict[str, Any]:
return {'allow_nan': True}
class DropDriftFeatures(BaseSelector):
"""Drop drift features.
Examples
--------
>>> import numpy as np
>>> from automllib.feature_selection import DropDriftFeatures
>>> sel = DropDriftFeatures()
>>> X = [[1, 1, 100], [2, 2, 10], [1, 1, 1], [np.nan, 1, 1]]
>>> X_test = [[1, 1000, 100], [2, 300, 10], [1, 100, 1], [1, 100, 1]]
>>> Xt = sel.fit_transform(X, X_test=X_test)
>>> Xt.shape
(4, 2)
"""
def __init__(
self,
alpha: float = 0.05,
max_samples: int = 100_000,
random_state: Union[int, np.random.RandomState] = None,
verbose: int = 0
) -> None:
super().__init__(verbose=verbose)
self.alpha = alpha
self.max_samples = max_samples
self.random_state = random_state
def _check_params(self) -> None:
pass
def _fit(
self,
X: TWO_DIM_ARRAYLIKE_TYPE,
y: ONE_DIM_ARRAYLIKE_TYPE = None,
X_test: TWO_DIM_ARRAYLIKE_TYPE = None
) -> 'DropDriftFeatures':
if X_test is None:
self.pvalues_ = None
return self
X_test, _ = self._check_X_y(X_test)
random_state = check_random_state(self.random_state)
train_size, _ = X.shape
train_size = min(train_size, self.max_samples)
test_size, _ = X_test.shape
test_size = min(test_size, self.max_samples)
self.pvalues_ = np.empty(self.n_features_)
for j in range(self.n_features_):
column = X[:, j]
column_test = X_test[:, j]
is_nan = pd.isnull(column)
is_nan_test = pd.isnull(column_test)
train = | np.where(~is_nan) | numpy.where |
import os, glob, logging
import cPickle as pickle
from functools import partial
import random
import math
import multiprocessing as mp
import multiprocessing.sharedctypes as mps
from contextlib import closing
import numpy as n
from scipy.special import erf
import scipy.stats.mstats as mstats
import rtpipe.parsems as pm
import rtpipe.parsecal as pc
import rtpipe.parsesdm as ps
from rtpipe.version import __version__
import rtlib_cython as rtlib
import pyfftw
try:
import casautil
except ImportError:
import pwkit.environments.casa.util as casautil
# setup CASA and logging
qa = casautil.tools.quanta()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.captureWarnings(True)
logger = logging.getLogger('rtpipe')
def pipeline(d, segments):
""" Transient search pipeline running on single node.
Processes one or more segments of data (in which a single bgsub, (u,v,w), etc. can be used).
Can search completely, independently, and saves candidates.
If segments is a list of segments, then it will parallelize read/search processes.
Stages:
0) Take dictionary that defines metadata and search params
-- This defines state of pipeline, including times, uv extent, pipeline search parameters, etc.
1) Read data
-- Overlapping reads needed to maintain sensitivity to all DMs at all times
2) Prepare data
-- Reads/applies telcal/CASA solutions, flags, bg time subtraction
3) Search using all threads
-- Option for plug-and-play detection algorithm and multiple filters
4) Save candidate and noise info, if requested
"""
if type(segments) == int:
segments = [segments]
logger.info('Starting search of %s, scan %d, segments %s' % (d['filename'], d['scan'], str(segments)))
assert os.path.exists(d['gainfile']), 'Calibration file autodetection failed for gainfile {0}'.format(d['gainfile'])
# seed the pseudo-random number generator # TJWL
random.seed()
# set up shared arrays to fill
data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2); data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); u_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); v_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); w_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
# need these if debugging
data = numpyview(data_mem, 'complex64', datashape(d)) # optional
data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional
u = numpyview(u_mem, 'float32', d['nbl'], raw=False)
v = numpyview(v_mem, 'float32', d['nbl'], raw=False)
w = numpyview(w_mem, 'float32', d['nbl'], raw=False)
# plan fft
logger.debug('Planning FFT...')
arr = pyfftw.empty_aligned((d['npixx'], d['npixy']), dtype='complex64', n=16)
arr[:] = n.random.randn(*arr.shape) + 1j*n.random.randn(*arr.shape)
fft_arr = pyfftw.interfaces.numpy_fft.ifft2(arr)
results = {}
# only one needed for parallel read/process. more would overwrite memory space
with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool:
try:
# submit all segments to pool of 1. locking data should keep this from running away.
for segment in segments:
assert segment in range(d['nsegments']), 'Segment %d not in range of %d nsegments' % (segment, d['nsegments'])
candsfile = getcandsfile(d, segment)
if d['savecands'] and os.path.exists(candsfile):
logger.error('candsfile %s already exists. Ending processing...' % candsfile)
else:
results[segment] = readpool.apply_async(pipeline_dataprep, (d, segment)) # no need for segment here? need to think through structure...
# step through pool of jobs and pull data off as ready. this allows pool to continue to next segment.
while results.keys():
for segment in results.keys():
if results[segment].ready():
job = results.pop(segment)
d = job.get()
else:
continue
with data_mem.get_lock():
cands = search(d, data_mem, u_mem, v_mem, w_mem)
# save candidate info
if d['savecands']:
logger.info('Saving %d candidates for segment %d...'
% (len(cands), segment))
savecands(d, cands)
except KeyboardInterrupt:
logger.error('Caught Ctrl-C. Closing processing pool.')
readpool.terminate()
readpool.join()
raise
def pipeline_dataprep(d, segment):
""" Single-threaded pipeline for data prep that can be started in a pool.
"""
logger.debug('dataprep starting for segment %d' % segment)
# dataprep reads for a single segment, so d['segment'] defined here
d['segment'] = segment
# set up numpy arrays, as expected by dataprep functions
data_read = numpyview(data_read_mem, 'complex64', datashape(d), raw=False); data = numpyview(data_mem, 'complex64', datashape(d), raw=False)
u_read = numpyview(u_read_mem, 'float32', d['nbl'], raw=False); u = numpyview(u_mem, 'float32', d['nbl'], raw=False)
v_read = numpyview(v_read_mem, 'float32', d['nbl'], raw=False); v = numpyview(v_mem, 'float32', d['nbl'], raw=False)
w_read = numpyview(w_read_mem, 'float32', d['nbl'], raw=False); w = numpyview(w_mem, 'float32', d['nbl'], raw=False)
#### #### #### ####
# 1) Read data
#### #### #### ####
with data_read_mem.get_lock():
if d['dataformat'] == 'ms': # CASA-based read
segread = pm.readsegment(d, segment)
data_read[:] = segread[0]
(u_read[:], v_read[:], w_read[:]) = (segread[1][d['readints']/2], segread[2][d['readints']/2], segread[3][d['readints']/2]) # mid int good enough for segment. could extend this to save per chunk
del segread
elif d['dataformat'] == 'sdm':
data_read[:] = ps.read_bdf_segment(d, segment)
(u_read[:], v_read[:], w_read[:]) = ps.get_uvw_segment(d, segment)
#### #### #### ####
# 2) Prepare data
#### #### #### ####
# calibrate data
if os.path.exists(d['gainfile']):
try:
radec = (); spwind = []; calname = '' # set defaults
if '.GN' in d['gainfile']: # if telcal file
if d.has_key('calname'):
calname = d['calname']
sols = pc.telcal_sol(d['gainfile']) # parse gainfile
else: # if CASA table
if d.has_key('calradec'):
radec = d['calradec'] # optionally defined cal location
spwind = d['spw']
sols = pc.casa_sol(d['gainfile'], flagants=d['flagantsol']) # parse gainfile
sols.parsebp(d['bpfile']) # parse bpfile
# if gainfile parsed ok, choose best solution for data
sols.set_selection(d['segmenttimes'][segment].mean(), d['freq']*1e9, rtlib.calc_blarr(d), calname=calname, pols=d['pols'], radec=radec, spwind=spwind)
sols.apply(data_read)
except:
logger.warning('Could not parse or apply gainfile %s.' % d['gainfile'])
raise
else:
logger.warn('Calibration file not found. Proceeding with no calibration applied.')
# flag data
if len(d['flaglist']):
logger.info('Flagging with flaglist: %s' % d['flaglist'])
dataflag(d, data_read)
else:
logger.warn('No real-time flagging.')
# mean t vis subtration
if d['timesub'] == 'mean':
logger.info('Subtracting mean visibility in time...')
rtlib.meantsub(data_read, [0, d['nbl']])
else:
logger.warn('No mean time subtraction.')
# save noise pickle
if d['savenoise']:
noisepickle(d, data_read, u_read, v_read, w_read, chunk=200)
# phase to new location if l1,m1 set and nonzero value
try:
if any([d['l1'], d['m1']]):
logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (d['l1'], d['m1']))
rtlib.phaseshift_threaded(data_read, d, d['l1'], d['m1'], u_read, v_read)
d['l0'] = d['l1']
d['m0'] = d['m1']
else:
logger.debug('Not rephasing.')
except KeyError:
pass
if d['mock']: # could be list or int
# assume that std of vis in the middle of the segment is
# characteristic of noise throughout the segment
falsecands = {}
datamid = n.ma.masked_equal(data_read[d['readints']/2].real, 0, copy=True)
madstd = 1.4826 * n.ma.median(n.abs(datamid - n.ma.median(datamid)))/n.sqrt(d['npol']*d['nbl']*d['nchan'])
std = datamid.std()/n.sqrt(d['npol']*d['nbl']*d['nchan'])
logger.debug('Noise per vis in central int: madstd {}, std {}'.format(madstd, std))
dt = 1 # pulse width in integrations
if isinstance(d['mock'], int):
for i in n.random.randint(d['datadelay'][-1], d['readints'], d['mock']): # add nmock transients at random ints
(loff, moff, A, DM) = make_transient(madstd, max(d['dmarr']), Amin=1.2*d['sigma_image1'])
candid = (int(segment), int(i), DM, int(dt), int(0))
falsecands[candid] = [A/madstd, A, loff, moff]
elif isinstance(d['mock'], list):
for mock in d['mock']:
try:
(i, DM, loff, moff, SNR) = mock
candid = (int(segment), int(i), DM, int(dt), int(0))
falsecands[candid] = [SNR, SNR*madstd, loff, moff]
except:
logger.warn('Could not parse mock parameters: {}'.format(mock))
else:
logger.warn('Not a recognized type for mock.')
for candid in falsecands:
(segment, i, DM, dt, beamnum) = candid
(SNR, A, loff, moff) = falsecands[candid]
logger.info('Adding mock transient at int %d, DM %.1f, (l, m) = (%f, %f) at est SNR %.1f' % (i, DM, loff, moff, SNR))
add_transient(d, data_read, u_read, v_read, w_read, loff, moff, i, A, DM, dt)
if d['savecands']:
savecands(d, falsecands, domock=True)
with data_mem.get_lock():
data[:] = data_read[:]
u[:] = u_read[:]; v[:] = v_read[:]; w[:] = w_read[:]
logger.debug('All data unlocked for segment %d' % segment)
# d now has segment keyword defined
return d
def pipeline_reproduce(d, candloc=[], segment=None, lm=None, product='data'):
""" Reproduce data and/or candidates with given candloc or lm coordinate.
d and segment can be given, if only reading data.
candloc is length 5 or 6 with ([scan], segment, candint, dmind, dtind, beamnum).
product can be 'data', 'dataph', 'imdata', 'datacorr'.
lm is tuple of (l,m) coordinates in radians.
"""
# set up shared arrays to fill
data_reproduce_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
u_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
# get numpy views of memory spaces
data = numpyview(data_mem, 'complex64', datashape(d)) # optional
data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional
u = numpyview(u_mem, 'float32', d['nbl'], raw=False)
v = numpyview(v_mem, 'float32', d['nbl'], raw=False)
w = numpyview(w_mem, 'float32', d['nbl'], raw=False)
# set up state dict for merge pkl
if len(candloc) == 6:
scan, segment, candint, dmind, dtind, beamnum = candloc
# this is now defined by call to rtpipe.set_pipeline in parsecands.plot_cand
# d['scan'] = scan
# d['starttime_mjd'] = d['starttime_mjddict'][scan]
# d['nsegments'] = len(d['segmenttimesdict'][scan])
# d['segmenttimes'] = d['segmenttimesdict'][scan]
elif len(candloc) == 5: # if not a merge pkl, then d['scan'] is correct
segment, candint, dmind, dtind, beamnum = candloc
elif isinstance(segment, int):
assert product == 'data', 'If only providing segment, then only data product can be produced.'
else:
logger.error('candloc must be length 5 or 6 or segment provided.')
return
with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool:
readpool.apply(pipeline_dataprep, (d, segment))
if product == 'data':
logger.info('Returning prepared data...')
return data
elif product == 'dataph':
logger.info('Reproducing data and phasing...')
assert lm, 'lm must be tuple with (l, m) coords in radians.'
data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind, lm=lm)
return data
elif product == 'datacorr':
logger.info('Reproducing data...')
data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind)
return data
elif product == 'imdata':
logger.info('Reproducing candidate...')
im, data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind, candint=candint)
return im, data
else:
logger.error('product must be data, dataph, or imdata.')
def meantsubpool(d, data_read):
""" Wrapper for mean visibility subtraction in time.
Doesn't work when called from pipeline using multiprocessing pool.
"""
logger.info('Subtracting mean visibility in time...')
data_read = numpyview(data_read_mem, 'complex64', datashape(d))
tsubpart = partial(rtlib.meantsub, data_read)
blranges = [(d['nbl'] * t/d['nthread'], d['nbl']*(t+1)/d['nthread']) for t in range(d['nthread'])]
with closing(mp.Pool(1, initializer=initreadonly, initargs=(data_read_mem,))) as tsubpool:
tsubpool.map(tsubpart, blr)
def dataflag(d, data_read):
""" Flagging data in single process
"""
for flag in d['flaglist']:
mode, sig, conv = flag
# resultlist = []
# with closing(mp.Pool(4, initializer=initreadonly, initargs=(data_read_mem,))) as flagpool:
for ss in d['spw']:
chans = n.arange(d['spw_chanr_select'][ss][0], d['spw_chanr_select'][ss][1])
for pol in range(d['npol']):
status = rtlib.dataflag(data_read, chans, pol, d, sig, mode, conv)
logger.info(status)
# hack to get rid of bad spw/pol combos whacked by rfi
if 'badspwpol' in d:
logger.info('Comparing overall power between spw/pol. Removing those with %d times typical value' % d['badspwpol'])
spwpol = {}
for spw in d['spw']:
chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1])
for pol in range(d['npol']):
spwpol[(spw, pol)] = n.abs(data_read[:,:,chans,pol]).std()
meanstd = n.mean(spwpol.values())
for (spw,pol) in spwpol:
if spwpol[(spw, pol)] > d['badspwpol']*meanstd:
logger.info('Flagging all of (spw %d, pol %d) for excess noise.' % (spw, pol))
chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1])
data_read[:,:,chans,pol] = 0j
def dataflagatom(chans, pol, d, sig, mode, conv):
""" Wrapper function to get shared memory as numpy array into pool
Assumes data_mem is global mps.Array
"""
data = numpyview(data_mem, 'complex64', datashape(d))
# data = n.ma.masked_array(data, data==0j) # this causes massive overflagging on 14sep03 data
return rtlib.dataflag(data, chans, pol, d, sig, mode, conv)
def search(d, data_mem, u_mem, v_mem, w_mem):
""" Search function.
Queues all trials with multiprocessing.
Assumes shared memory system with single uvw grid for all images.
"""
data = numpyview(data_mem, 'complex64', datashape(d))
u = numpyview(u_mem, 'float32', d['nbl'])
v = numpyview(v_mem, 'float32', d['nbl'])
w = numpyview(w_mem, 'float32', d['nbl'])
data_resamp_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
logger.debug('Search of segment %d' % d['segment'])
beamnum = 0 # not yet implemented
cands = {}
candsfile = getcandsfile(d)
if d['savecands'] and os.path.exists(candsfile):
logger.warn('candsfile %s already exists' % candsfile)
return cands
# make wterm kernels
if d['searchtype'] == 'image2w':
wres = 100
npix = max(d['npixx_full'], d['npixy_full'])
bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], thresh=0.05)
# SUBMITTING THE LOOPS
if n.any(data):
logger.debug('Searching in %d chunks with %d threads' % (d['nchunk'], d['nthread']))
logger.info('Dedispering to max (DM, dt) of (%d, %d) ...' % (d['dmarr'][-1], d['dtarr'][-1]) )
# open pool
with closing(mp.Pool(d['nthread'], initializer=initresamp, initargs=(data_mem, data_resamp_mem))) as resamppool:
blranges = [(d['nbl'] * t/d['nthread'], d['nbl']*(t+1)/d['nthread']) for t in range(d['nthread'])]
for dmind in xrange(len(d['dmarr'])):
dm = d['dmarr'][dmind]
logger.debug('Dedispersing for %d' % dm,)
dedisppart = partial(correct_dm, d, dm) # moves in fresh data
dedispresults = resamppool.map(dedisppart, blranges)
dtlast = 1
for dtind in xrange(len(d['dtarr'])):
dt = d['dtarr'][dtind]
if dt > 1:
# dedispersion in shared memory, mapped over baselines
# set partial functions for pool.map
logger.debug('Resampling for %d' % dt,)
resample = dt/dtlast
resamppart = partial(correct_dt, d, resample) # corrects in place
resampresults = resamppool.map(resamppart, blranges)
dtlast = dt
# set dm- and dt-dependent int ranges for segment
nskip_dm = ((d['datadelay'][-1] - d['datadelay'][dmind]) / dt) * (d['segment'] != 0) # nskip=0 for first segment
searchints = (d['readints'] - d['datadelay'][dmind]) / dt - nskip_dm
logger.debug('Imaging %d ints from %d for (%d,%d)' % (searchints, nskip_dm, dm, dt),)
# imaging in shared memory, mapped over ints
image1part = partial(image1, d, u, v, w, dmind, dtind, beamnum)
nchunkdt = min(searchints, max(d['nthread'], d['nchunk']/dt)) # parallelize in range bounded by (searchints, nthread)
irange = [(nskip_dm + searchints*chunk/nchunkdt, nskip_dm + searchints*(chunk+1)/nchunkdt) for chunk in range(nchunkdt)]
imageresults = resamppool.map(image1part, irange)
# COLLECTING THE RESULTS per dm/dt. Clears the way for overwriting data_resamp
for imageresult in imageresults:
for kk in imageresult.keys():
cands[kk] = imageresult[kk]
if 'sigma_plot' in d:
from rtpipe.reproduce import make_cand_plot as makecp
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
snrs = n.array([value[snrcol] for value in cands.itervalues()])
maxsnr = max([0] + [value[snrcol] for value in cands.itervalues()]) # be sure max includes at least one value
if maxsnr > d['sigma_plot']:
segment, candint, dmind, dtind, beamnum = [key for key, value in cands.iteritems() if value[snrcol] == maxsnr][0]
logger.info('Making cand plot for scan %d, segment %d, candint %d, dmind %d, dtint %d with SNR %.1f.' % (d['scan'], segment, candint, dmind, dtind, maxsnr))
im, data = runreproduce(d, data_mem, data_resamp_mem, u, v, w, dmind, dtind, candint)
loclabel = [d['scan'], segment, candint, dmind, dtind, beamnum]
makecp(d, im, data, loclabel, version=2, snrs=snrs)
else:
logger.info('No candidate in segment %d above sigma_plot %.1f' % (d['segment'], d['sigma_plot']))
else:
logger.warn('Data for processing is zeros. Moving on...')
logger.info('Found %d cands in scan %d segment %d of %s. ' % (len(cands), d['scan'], d['segment'], d['filename']))
return cands
def runreproduce(d, data_mem, data_resamp_mem, u, v, w, dmind, dtind, candint=-1, lm=None, twindow=30):
""" Reproduce function, much like search.
Returns image and rephased data for given candint.
If no candint is given, it returns resampled data by default. Optionally rephases to lm=(l, m) coordinates.
"""
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
with closing(mp.Pool(1, initializer=initresamp, initargs=(data_mem, data_resamp_mem))) as repropool:
# dedisperse
logger.info('Dedispersing with DM=%.1f, dt=%d...' % (d['dmarr'][dmind], d['dtarr'][dtind]))
repropool.apply(correct_dmdt, [d, dmind, dtind, (0,d['nbl'])])
# set up image
if 'image1' in d['searchtype']:
npixx = d['npixx']
npixy = d['npixy']
elif 'image2' in d['searchtype']:
npixx = d['npixx_full']
npixy = d['npixy_full']
if candint > -1:
if lm:
logger.warn('Using candint image to get l,m. Not using provided l,m.')
# image
logger.info('Imaging int %d with %d %d pixels...' % (candint, npixx, npixy))
im = repropool.apply(image1wrap, [d, u, v, w, npixx, npixy, candint/d['dtarr'][dtind]])
snrmin = im.min()/im.std()
snrmax = im.max()/im.std()
logger.info('Made image with SNR min, max: %.1f, %.1f' % (snrmin, snrmax))
if snrmax > -1*snrmin:
l1, m1 = calc_lm(d, im, minmax='max')
else:
l1, m1 = calc_lm(d, im, minmax='min')
# rephase and trim interesting ints out
repropool.apply(move_phasecenter, [d, l1, m1, u, v])
minint = max(candint/d['dtarr'][dtind]-twindow/2, 0)
maxint = min(candint/d['dtarr'][dtind]+twindow/2, len(data_resamp)/d['dtarr'][dtind])
return(im, data_resamp[minint:maxint].mean(axis=1))
else:
if lm:
l1, m1 = lm
repropool.apply(move_phasecenter, [d, l1, m1, u, v])
return data_resamp
def add_transient(d, data, u, v, w, l1, m1, i, s, dm=0, dt=1):
""" Add a transient to data.
l1, m1 are relative direction cosines (location) of transient
added at integration i (at highest freq) with brightness s (per int/chan/bl/pol in data units)
dm/dt are dispersion (in pc/cm3) and pulse width (in s).
"""
ang = lambda ch: l1 * u * d['freq'][ch]/d['freq_orig'][0] + m1 * v * d['freq'][ch]/d['freq_orig'][0]
delay = lambda ch: n.round(4.1488e-3 * dm * (d['freq'][ch]**(-2) - d['freq'][-1]**(-2))/d['inttime'], 0).astype(int)
#snr_ideal = s/(data[i].real.std()/n.sqrt(d['npol']*d['nbl']*d['nchan']))
#logger.info('SNR of source with system brightness %.1f = %d (idealized; ok at low SNR)' % (s, int(snr_ideal)))
for ch in range(d['nchan']):
data[i+delay(ch):i+delay(ch)+dt, :, ch] += s * n.exp(2j*n.pi*ang(ch)[None,:,None])
def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.):
""" Produce a mock transient pulse source for the purposes of characterizing the
detection success of the current pipeline.
Assumes
- Code to inject the transients does so by inserting at an array index
- Noise level at the center of the data array is characteristic of the
noise level throughout
Input
std - noise level in visibilities(?) at mid-point of segment
DMmax - maximum DM at which mock transient can be inserted [pc/cm^3]
Amin/Amax is amplitude in units of the std (calculated below)
rmax/rmin is radius range in arcmin
DMmin is min DM
Returns
loff - direction cosine offset of mock transient from phase center [radians]
moff - direction cosine offset of mock transient from phase center [radians]
A - amplitude of transient [std units]
DM - dispersion measure of mock transient [pc/cm^3]
"""
rad_arcmin = math.pi/(180*60)
phimin = 0.0
phimax = 2*math.pi
# Amplitude of transient, done in units of the std
# std is calculated assuming that noise level in the middle of the data,
# at index d['readints']/2, is characteristic of that throughout the data
A = random.uniform(Amin, Amax) * std
# Position of transient, in direction cosines
r = random.uniform(rmin, rmax)
phi = random.uniform(phimin, phimax)
loff = r*math.cos(phi) * rad_arcmin
moff = r*math.sin(phi) * rad_arcmin
# Dispersion measure
DM = random.uniform(DMmin, DMmax)
return loff, moff, A, DM
def pipeline_refine(d0, candloc, scaledm=2.1, scalepix=2, scaleuv=1.0, chans=[], returndata=False):
"""
Reproduces candidate and potentially improves sensitivity through better DM and imaging parameters.
scale* parameters enhance sensitivity by making refining dmgrid and images.
Other options include:
d0['selectpol'] = ['RR']
d0['flaglist'] = [('blstd', 2.5, 0.05)]
"""
import rtpipe.parseparams as pp
assert len(candloc) == 6, 'candloc should be (scan, segment, candint, dmind, dtind, beamnum).'
scan, segment, candint, dmind, dtind, beamnum = candloc
d1 = d0.copy() # dont mess with original (mutable!)
segmenttimes = d1['segmenttimesdict'][scan]
# if file not at stated full path, assume it is local
if not os.path.exists(d1['filename']):
workdir = os.getcwd()
filename = os.path.join(workdir, os.path.basename(d1['filename']))
else:
filename = d1['filename']
# clean up d1 of superfluous keys
params = pp.Params() # will be used as input to rt.set_pipeline
for key in d1.keys():
if not hasattr(params, key):
_ = d1.pop(key)
d1['npix'] = 0; d1['uvres'] = 0
d1['savecands'] = False
d1['savenoise'] = False
d1['logfile'] = False
# redefine d. many parameters modified after this to keep from messing up time boundaries/cand location
d = set_pipeline(filename, scan, **d1)
if chans:
d['chans'] = chans
d['segmenttimes'] = segmenttimes
d['nsegments'] = len(segmenttimes)
data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
u_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
data = numpyview(data_mem, 'complex64', datashape(d))
u = numpyview(u_mem, 'float32', d['nbl'])
v = numpyview(v_mem, 'float32', d['nbl'])
w = numpyview(w_mem, 'float32', d['nbl'])
# fill data, uvw
data[:] = pipeline_reproduce(d, segment=segment, product='data')
d['segment'] = segment
u[:], v[:], w[:] = ps.get_uvw_segment(d, segment)
# refine parameters
dmcand = d['dmarr'][dmind]
if scaledm > 1.:
try:
dmdelta = d['dmarr'][dmind+1] - d['dmarr'][dmind]
except IndexError:
try:
dmdelta = d['dmarr'][dmind] - d['dmarr'][dmind-1]
except IndexError:
dmdelta = 0.1*dmcand
d['dmarr'] = list(n.arange(dmcand-dmdelta, dmcand+dmdelta, dmdelta/scaledm))
elif scaledm == 1.:
d['dmarr'] = [dmcand]
d['datadelay'] = [rtlib.calc_delay(d['freq'], d['inttime'],dm).max() for dm in d['dmarr']] + [d['datadelay'][-1]]
d['dtarr'] = [d['dtarr'][dtind]]
d['npixx'] = scalepix*d['npixx']
d['npixy'] = scalepix*d['npixy']
d['uvres'] = scaleuv*d['uvres']
# search
logger.info('Refining DM grid to %s and expanding images to (%d, %d) pix with uvres %d' % (str(d['dmarr']), d['npixx'], d['npixy'], d['uvres']))
cands = search(d, data_mem, u_mem, v_mem, w_mem)
cands = {tuple([scan]+list(loc)):list(prop) for (loc, prop) in cands.iteritems()}
d['featureind'].insert(0, 'scan')
# making cand plot from this
# need to keep from confusing old and new indices
# im, data = rt.pipeline_reproduce(d, loc[candnum], product='imdata')
# scan, segment, candint, dmind, dtind, beamnum = loc
# loclabel = scan, segment, candint, dmind, dtind, beamnum
# make_cand_plot(d, im, data, loclabel, outname=outname)
# return info to reproduce/visualize refined cands
if returndata:
return data
else:
return d, cands
def pipeline_lightcurve(d, l1=0, m1=0, segments=[], scan=-1):
""" Makes lightcurve at given (l1, m1)
l1, m1 define phase center. if not set, then image max is used.
"""
if scan == -1: scan = d['scan']
if segments == []: segments = range(d['nsegments'])
d = set_pipeline(d['filename'], scan, fileroot=d['fileroot'], dmarr=[0], dtarr=[1], savenoise=False, timesub='', logfile=False, nsegments=d['nsegments'])
# define memory and numpy arrays
data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_resamp_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
u_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional
u_read = numpyview(u_read_mem, 'float32', d['nbl'], raw=False)
v_read = numpyview(v_read_mem, 'float32', d['nbl'], raw=False)
w_read = numpyview(w_read_mem, 'float32', d['nbl'], raw=False)
lightcurve = n.zeros(shape=(d['nints'], d['nchan'], d['npol']), dtype='complex64')
phasecenters = []
with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool:
for segment in segments:
logger.info('Reading data...')
readpool.apply(pipeline_dataprep, (d, segment))
# get image peak for rephasing
if not any([l1, m1]):
im = sample_image(d, data_read, u_read, v_read, w_read, i=-1, verbose=1, imager='xy')
l2, m2 = calc_lm(d, im)
else:
l2 = l1
m2 = m1
logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (l2, m2))
rtlib.phaseshift_threaded(data_read, d, l2, m2, u_read, v_read)
phasecenters.append( (l2,m2) )
nskip = (24*3600*(d['segmenttimes'][segment,0] - d['starttime_mjd'])/d['inttime']).astype(int) # insure that lc is set as what is read
lightcurve[nskip: nskip+d['readints']] = data_read.mean(axis=1)
return phasecenters, lightcurve
def set_pipeline(filename, scan, fileroot='', paramfile='', **kwargs):
""" Function defines pipeline state for search. Takes data/scan as input.
fileroot is base name for associated products (cal files, noise, cands). if blank, it is set to filename.
paramfile is name of file that defines all pipeline parameters (python-like syntax).
kwargs used to overload paramfile definitions.
Many parameters take 0 as default, which auto-defines ideal parameters.
This definition does not yet consider memory/cpu/time limitations.
nsegments defines how to break jobs in time. nchunk defines how many jobs are sent to nthreads.
"""
workdir = os.path.dirname(os.path.abspath(filename))
filename = filename.rstrip('/')
assert os.path.exists(filename)
# then get all metadata
if os.path.exists(os.path.join(filename, 'Main.xml')):
d = ps.get_metadata(filename, scan, paramfile=paramfile, **kwargs) # can take file name or Params instance
d['dataformat'] = 'sdm'
else:
d = pm.get_metadata(filename, scan, paramfile=paramfile, **kwargs)
d['dataformat'] = 'ms'
# set version
d['rtpipe_version'] = __version__
# define rootname for in/out cal/products
if fileroot:
d['fileroot'] = fileroot
else:
d['fileroot'] = os.path.basename(os.path.abspath(filename))
# autodetect calibration products locally
if not d['gainfile'] or not os.path.exists(d['gainfile']):
# first try to get CASA gain file
gainfilelist = glob.glob(os.path.join(d['workdir'], d['fileroot'] + '.g?'))
bpfilelist = glob.glob(os.path.join(d['workdir'], d['fileroot'] + '.b?'))
# if not in workdir, look locally
if not gainfilelist or not bpfilelist:
gainfilelist = glob.glob(d['fileroot'] + '.g?')
bpfilelist = glob.glob(d['fileroot'] + '.b?')
if gainfilelist and bpfilelist:
gainfilelist.sort()
d['gainfile'] = gainfilelist[-1]
logger.info('Autodetected CASA gainfile %s' % d['gainfile'])
bpfilelist.sort()
d['bpfile'] = bpfilelist[-1]
logger.info('Autodetected CASA bpfile %s' % d['bpfile'])
# if that fails, look for telcal file
filelist = glob.glob(os.path.join(d['workdir'], filename + '.GN'))
if not filelist:
filelist = glob.glob(filename + '.GN')
if filelist:
d['gainfile'] = filelist[0]
logger.info('Autodetected telcal file %s' % d['gainfile'])
if not os.path.exists(d['gainfile']):
logger.warn('Calibration file autodetection failed for gainfile {0}'.format(d['gainfile']))
# define features
d['featureind'] = ['segment', 'int', 'dmind', 'dtind', 'beamnum'] # feature index. should be stable.
if 'features' not in d:
if d['searchtype'] == 'image1':
d['features'] = ['snr1', 'immax1', 'l1', 'm1'] # features returned by image1
elif d['searchtype'] == 'image1snip':
d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'im40', 'spec20']
elif d['searchtype'] == 'image1stats':
d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'specstd', 'specskew', 'speckurtosis', 'imskew', 'imkurtosis'] # note: spec statistics are all or nothing.
elif 'image2' in d['searchtype']:
d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'snr2', 'immax2', 'l2', 'm2'] # features returned by image1
# set imaging parameters to use
if d['uvres'] == 0:
d['uvres'] = d['uvres_full']
else:
urange = d['urange'][scan]*(d['freq'].max()/d['freq_orig'][0]) # uvw from get_uvw already in lambda at ch0
vrange = d['vrange'][scan]*(d['freq'].max()/d['freq_orig'][0])
powers = n.fromfunction(lambda i,j: 2**i*3**j, (14,10), dtype='int') # power array for 2**i * 3**j
rangex = n.round(d['uvoversample']*urange).astype('int')
rangey = n.round(d['uvoversample']*vrange).astype('int')
largerx = n.where(powers-rangex/d['uvres'] > 0, powers, powers[-1,-1])
p2x, p3x = n.where(largerx == largerx.min())
largery = n.where(powers-rangey/d['uvres'] > 0, powers, powers[-1,-1])
p2y, p3y = n.where(largery == largery.min())
d['npixx_full'] = (2**p2x * 3**p3x)[0]
d['npixy_full'] = (2**p2y * 3**p3y)[0]
# set number of pixels to image
d['npixx'] = d['npixx_full']
d['npixy'] = d['npixy_full']
if 'npix_max' in d:
if d['npix_max']:
d['npixx'] = min(d['npix_max'], d['npixx_full'])
d['npixy'] = min(d['npix_max'], d['npixy_full'])
if d['npix']:
d['npixx'] = d['npix']
d['npixy'] = d['npix']
else:
d['npix'] = max(d['npixx'], d['npixy']) # this used to define fringe time
# define dmarr, if not already
if len(d['dmarr']) == 0:
if d.has_key('dm_maxloss') and d.has_key('maxdm') and d.has_key('dm_pulsewidth'):
d['dmarr'] = calc_dmgrid(d, maxloss=d['dm_maxloss'], maxdm=d['maxdm'], dt=d['dm_pulsewidth'])
if d['maxdm'] > 0:
logger.info('Calculated %d dms for max sensitivity loss %.2f, maxdm %d pc/cm3, and pulse width %d ms' % (len(d['dmarr']), d['dm_maxloss'], d['maxdm'], d['dm_pulsewidth']/1000))
else:
d['dmarr'] = [0]
logger.info('Can\'t calculate dm grid without dm_maxloss, maxdm, and dm_pulsewidth defined. Setting to [0].')
# define times for data to read
d['t_overlap'] = rtlib.calc_delay(d['freq'], d['inttime'], max(d['dmarr'])).max()*d['inttime'] # time of overlap for total dm coverage at segment boundaries
d['datadelay'] = [rtlib.calc_delay(d['freq'], d['inttime'],dm).max() for dm in d['dmarr']]
d['nints'] = d['nints'] - d['nskip']
# pols
if d.has_key('selectpol'):
d['pols'] = [pol for pol in d['pols_orig'] if pol in d['selectpol']]
else:
d['pols'] = d['pols_orig']
d['npol'] = len(d['pols'])
# split imaging into chunks. ideally one per thread, but can modify to fit available memory
if d['nchunk'] == 0:
d['nchunk'] = d['nthread']
# if nsegments is 0, then auto-define within memory limit
if not d['nsegments']:
fringetime = calc_fringetime(d)
d['nsegments'] = max(1, min(d['nints'], int(d['scale_nsegments']*d['inttime']*d['nints']/(fringetime-d['t_overlap'])))) # at least 1, at most nints
calc_segment_times(d)
# if auto nsegment definition makes segment too large, try to scale it down to fit in memory_limit (if provided)
# limit defined for dm sweep time and max nchunk/nthread ratio
if d.has_key('memory_limit'):
(vismem0, immem0) = calc_memory_footprint(d, limit=True)
assert vismem0+immem0 < d['memory_limit'], 'memory_limit of {0} is smaller than best solution of {1}. Try forcing nsegments/nchunk larger than {2}/{3} or reducing maxdm/npix'.format(d['memory_limit'], vismem0+immem0, d['nsegments'], max(d['dtarr'])/min(d['dtarr']))
(vismem, immem) = calc_memory_footprint(d)
if vismem+immem > d['memory_limit']:
logger.info('Over memory limit of {4} when reading {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for solution down to {5}/{6} GB...'.format(d['nsegments'], d['nchunk'], vismem, immem, d['memory_limit'], vismem0, immem0))
while vismem+immem > d['memory_limit']:
(vismem, immem) = calc_memory_footprint(d)
logger.debug('Using {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for better solution...'.format(d['nchunk'], vismem, immem, d['memory_limit']))
d['scale_nsegments'] = d['scale_nsegments'] * (vismem+immem)/float(d['memory_limit'])
d['nsegments'] = max(1, min(d['nints'], int(d['scale_nsegments']*d['inttime']*d['nints']/(fringetime-d['t_overlap'])))) # at least 1, at most nints
calc_segment_times(d)
(vismem, immem) = calc_memory_footprint(d)
while vismem+immem > d['memory_limit']:
logger.debug('Doubling nchunk from %d to fit in %d GB memory limit.' % (d['nchunk'], d['memory_limit']))
d['nchunk'] = 2*d['nchunk']
(vismem, immem) = calc_memory_footprint(d)
if d['nchunk'] >= max(d['dtarr'])/min(d['dtarr'])*d['nthread']: # limit nchunk/nthread to at most the range in dt
d['nchunk'] = d['nthread']
break
(vismem, immem) = calc_memory_footprint(d)
# final set up of memory
calc_segment_times(d)
(vismem, immem) = calc_memory_footprint(d)
# scaling of number of integrations beyond dt=1
assert all(d['dtarr']) and (d['dtarr'] == sorted(d['dtarr'])), 'dtarr must be larger than 0 and in increasing order'
# calculate number of thermal noise candidates per segment
nfalse = calc_nfalse(d)
logger.info('')
logger.info('Pipeline summary:')
if '.GN' in d['gainfile']:
logger.info('\t Products saved with %s. telcal calibration with %s' % (d['fileroot'], os.path.basename(d['gainfile'])))
else:
logger.info('\t Products saved with %s. CASA calibration files (%s, %s)' % (d['fileroot'], os.path.basename(d['gainfile']), os.path.basename(d['bpfile'])))
logger.info('\t Using %d segment%s of %d ints (%.1f s) with overlap of %.1f s' % (d['nsegments'], "s"[not d['nsegments']-1:], d['readints'], d['t_segment'], d['t_overlap']))
if d['t_overlap'] > d['t_segment']/3.:
logger.info('\t\t Lots of segments needed, since Max DM sweep (%.1f s) close to segment size (%.2f s)' % (d['t_overlap'], d['t_segment']))
logger.info('\t Downsampling in time/freq by %d/%d and skipping %d ints from start of scan.' % (d['read_tdownsample'], d['read_fdownsample'], d['nskip']))
logger.info('\t Excluding ants %s' % (d['excludeants']))
logger.info('\t Using pols %s' % (d['pols']))
logger.info('')
logger.info('\t Search with %s and threshold %.1f.' % (d['searchtype'], d['sigma_image1']))
logger.info('\t Using %d DMs from %.1f to %.1f and dts %s.' % (len(d['dmarr']), min(d['dmarr']), max(d['dmarr']), d['dtarr']))
logger.info('\t Using uvgrid npix=(%d,%d) and res=%d.' % (d['npixx'], d['npixy'], d['uvres']))
logger.info('\t Expect %d thermal false positives per segment.' % nfalse)
logger.info('')
logger.info('\t Visibility memory usage is %.1f GB/segment' % vismem)
logger.info('\t Imaging in %d chunk%s using max of %.1f GB/segment' % (d['nchunk'], "s"[not d['nsegments']-1:], immem))
logger.info('\t Grand total memory usage: %.1f GB/segment' % (vismem + immem))
return d
def getcandsfile(d, segment=-1, domock=False):
""" Return name of candsfile for a given dictionary. Must have d['segment'] defined.
domock is option to save simulated cands.
"""
if domock:
prefix = 'candsmock_'
else:
prefix= 'cands_'
if d.has_key('segment'):
return os.path.join(d['workdir'], prefix + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(d['segment']) + '.pkl')
elif segment >= 0:
return os.path.join(d['workdir'], prefix + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(segment) + '.pkl')
else:
return ''
def getnoisefile(d, segment=-1):
""" Return name of noisefile for a given dictionary. Must have d['segment'] defined.
"""
if d.has_key('segment'):
return os.path.join(d['workdir'], 'noise_' + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(d['segment']) + '.pkl')
elif segment >= 0:
return os.path.join(d['workdir'], 'noise_' + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(segment) + '.pkl')
else:
return ''
def calc_nfalse(d):
""" Calculate the number of thermal-noise false positives per segment.
"""
dtfactor = n.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy']
qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2.
nfalse = int(qfrac*ntrials)
return nfalse
def calc_segment_times(d):
""" Helper function for set_pipeline to define segmenttimes list, given nsegments definition
"""
# this casts to int (flooring) to avoid 0.5 int rounding issue.
stopdts = n.linspace(d['nskip']+d['t_overlap']/d['inttime'], d['nints'], d['nsegments']+1)[1:] # nseg+1 assures that at least one seg made
startdts = n.concatenate( ([d['nskip']], stopdts[:-1]-d['t_overlap']/d['inttime']) )
segmenttimes = []
for (startdt, stopdt) in zip(d['inttime']*startdts, d['inttime']*stopdts):
starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['starttime_mjd']+startdt/(24*3600),'d'),form=['ymd'], prec=9)[0], 's'))[0]/(24*3600)
stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['starttime_mjd']+stopdt/(24*3600), 'd'), form=['ymd'], prec=9)[0], 's'))[0]/(24*3600)
segmenttimes.append((starttime, stoptime))
d['segmenttimes'] = n.array(segmenttimes)
totaltimeread = 24*3600*(d['segmenttimes'][:, 1] - d['segmenttimes'][:, 0]).sum() # not guaranteed to be the same for each segment
d['readints'] = n.round(totaltimeread / (d['inttime']*d['nsegments'])).astype(int)
d['t_segment'] = totaltimeread/d['nsegments']
def calc_memory_footprint(d, headroom=4., visonly=False, limit=False):
""" Given pipeline state dict, this function calculates the memory required
to store visibilities and make images.
headroom scales visibility memory size from single data object to all copies (and potential file read needs)
limit=True returns a the minimum memory configuration
Returns tuple of (vismem, immem) in units of GB.
"""
toGB = 8/1024.**3 # number of complex64s to GB
d0 = d.copy()
# limit defined for dm sweep time and max nchunk/nthread ratio
if limit:
d0['readints'] = d['t_overlap']/d['inttime']
d0['nchunk'] = max(d['dtarr'])/min(d['dtarr']) * d['nthread']
vismem = headroom * datasize(d0) * toGB
if visonly:
return vismem
else:
immem = d0['nthread'] * (d0['readints']/d0['nchunk'] * d0['npixx'] * d0['npixy']) * toGB
return (vismem, immem)
def calc_fringetime(d):
""" Estimate largest time span of a "segment".
A segment is the maximal time span that can be have a single bg fringe subtracted and uv grid definition.
Max fringe window estimated for 5% amp loss at first null averaged over all baselines. Assumes dec=+90, which is conservative.
Returns time in seconds that defines good window.
"""
maxbl = d['uvres']*d['npix']/2 # fringe time for imaged data only
fringetime = 0.5*(24*3600)/(2*n.pi*maxbl/25.) # max fringe window in seconds
return fringetime
def correct_dmdt(d, dmind, dtind, blrange):
""" Dedisperses and resamples data *in place*.
Drops edges, since it assumes that data is read with overlapping chunks in time.
"""
data = numpyview(data_mem, 'complex64', datashape(d))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
bl0,bl1 = blrange
data_resamp[:, bl0:bl1] = data[:, bl0:bl1]
rtlib.dedisperse_resample(data_resamp, d['freq'], d['inttime'], d['dmarr'][dmind], d['dtarr'][dtind], blrange, verbose=0) # dedisperses data.
def correct_dm(d, dm, blrange):
""" Dedisperses data into data_resamp
Drops edges, since it assumes that data is read with overlapping chunks in time.
"""
data = numpyview(data_mem, 'complex64', datashape(d))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
bl0,bl1 = blrange
data_resamp[:, bl0:bl1] = data[:, bl0:bl1]
rtlib.dedisperse_par(data_resamp, d['freq'], d['inttime'], dm, blrange, verbose=0) # dedisperses data.
def correct_dt(d, dt, blrange):
""" Resamples data_resamp
Drops edges, since it assumes that data is read with overlapping chunks in time.
"""
data = numpyview(data_mem, 'complex64', datashape(d))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
bl0,bl1 = blrange
rtlib.resample_par(data_resamp, d['freq'], d['inttime'], dt, blrange, verbose=0) # dedisperses data.
def calc_lm(d, im=[], pix=(), minmax='max'):
""" Helper function to calculate location of image pixel in (l,m) coords.
Assumes peak pixel, but input can be provided in pixel units.
minmax defines whether to look for image maximum or minimum.
"""
if len(pix) == 0: # default is to get pixel from image
if minmax == 'max':
peakl, peakm = n.where(im == im.max())
elif minmax == 'min':
peakl, peakm = n.where(im == im.min())
peakl = peakl[0]; peakm = peakm[0]
elif len(pix) == 2: # can also specify
peakl, peakm = pix
if len(im):
npixx, npixy = im.shape
else:
npixx = d['npixx']
npixy = d['npixy']
l1 = (npixx/2. - peakl)/(npixx*d['uvres'])
m1 = (npixy/2. - peakm)/(npixy*d['uvres'])
return l1, m1
def move_phasecenter(d, l1, m1, u, v):
""" Handler function for phaseshift_threaded
"""
logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (l1, m1))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
rtlib.phaseshift_threaded(data_resamp, d, l1, m1, u, v)
def calc_dmgrid(d, maxloss=0.05, dt=3000., mindm=0., maxdm=0.):
""" Function to calculate the DM values for a given maximum sensitivity loss.
maxloss is sensitivity loss tolerated by dm bin width. dt is assumed pulse width in microsec.
"""
# parameters
tsamp = d['inttime']*1e6 # in microsec
k = 8.3
freq = d['freq'].mean() # central (mean) frequency in GHz
bw = 1e3*(d['freq'][-1] - d['freq'][0])
ch = 1e3*(d['freq'][1] - d['freq'][0]) # channel width in MHz
# width functions and loss factor
dt0 = lambda dm: n.sqrt(dt**2 + tsamp**2 + ((k*dm*ch)/(freq**3))**2)
dt1 = lambda dm, ddm: n.sqrt(dt**2 + tsamp**2 + ((k*dm*ch)/(freq**3))**2 + ((k*ddm*bw)/(freq**3.))**2)
loss = lambda dm, ddm: 1 - n.sqrt(dt0(dm)/dt1(dm,ddm))
loss_cordes = lambda ddm, dfreq, dt, freq: 1 - (n.sqrt(n.pi) / (2 * 6.91e-3 * ddm * dfreq / (dt*freq**3))) * erf(6.91e-3 * ddm * dfreq / (dt*freq**3)) # not quite right for underresolved pulses
if maxdm == 0:
return [0]
else:
# iterate over dmgrid to find optimal dm values. go higher than maxdm to be sure final list includes full range.
dmgrid = n.arange(mindm, maxdm, 0.05)
dmgrid_final = [dmgrid[0]]
for i in range(len(dmgrid)):
ddm = (dmgrid[i] - dmgrid_final[-1])/2.
ll = loss(dmgrid[i],ddm)
if ll > maxloss:
dmgrid_final.append(dmgrid[i])
return dmgrid_final
def image1(d, u, v, w, dmind, dtind, beamnum, irange):
""" Parallelizable function for imaging a chunk of data for a single dm.
Assumes data is dedispersed and resampled, so this just images each integration.
Simple one-stage imaging that returns dict of params.
returns dictionary with keys of cand location and values as tuple of features
"""
i0, i1 = irange
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
# logger.info('i0 {0}, i1 {1}, dm {2}, dt {3}, len {4}'.format(i0, i1, dmind, dtind, len(data_resamp)))
ims,snr,candints = rtlib.imgallfullfilterxyflux(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0:i1], d['npixx'], d['npixy'], d['uvres'], d['sigma_image1'])
# logger.info('finished imaging candints {0}'.format(candints))
feat = {}
for i in xrange(len(candints)):
if snr[i] > 0:
l1, m1 = calc_lm(d, ims[i], minmax='max')
else:
l1, m1 = calc_lm(d, ims[i], minmax='min')
logger.info('Got one! Int=%d, DM=%d, dt=%d: SNR_im=%.1f @ (%.2e,%.2e).' % ((i0+candints[i])*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], l1, m1))
candid = (d['segment'], (i0+candints[i])*d['dtarr'][dtind], dmind, dtind, beamnum)
# logger.info(candid)
# assemble feature in requested order
ff = []
for feature in d['features']:
if feature == 'snr1':
ff.append(snr[i])
elif feature == 'immax1':
if snr[i] > 0:
ff.append(ims[i].max())
else:
ff.append(ims[i].min())
elif feature == 'l1':
ff.append(l1)
elif feature == 'm1':
ff.append(m1)
elif feature == 'im40': # 40 pixel image peak cutout
peakx, peaky = n.where(ims[i] == ims[i].max())
sizex, sizey = ims[i].shape
# set image window with min=0 and max=size
xmin = max(0, peakx - 20); xmax = min(peakx + 20, sizex)
ymin = max(0, peaky - 20); ymax = min(peaky + 20, sizey)
ff.append(ims[i][xmin:xmax,ymin:ymax])
elif feature == 'spec20': # 20 int spectrum cutout
# set int window with min 0 and max len()
imin = max(0, (i0+candints[i])*d['dtarr'][dtind] - 10)
imax = min( (i0+candints[i])*d['dtarr'][dtind] + 10, len(data_resamp))
data_cut = data_resamp[imin:imax].copy()
rtlib.phaseshift_threaded(data_cut, d, l1, m1, u, v)
ff.append(data_cut.mean(axis=1))
elif feature in ['specstd', 'specskew', 'speckurtosis']: # this is standard set and must all appear together
if feature == 'specstd': # first this one, then others will use same data
seli = (i0+candints[i])*d['dtarr'][dtind]
datasel = data_resamp[seli:seli+1].copy()
rtlib.phaseshift_threaded(datasel, d, l1, m1, u, v)
data = n.ma.masked_equal(datasel, 0j)
spec = data.mean(axis=3).mean(axis=1).mean(axis=0).real
std = spec.std(axis=0)
ff.append(std)
elif feature == 'specskew':
skew = float(mstats.skew(spec))
ff.append(skew)
elif feature == 'speckurtosis':
kurtosis = float(mstats.kurtosis(spec))
ff.append(kurtosis)
elif feature == 'imskew':
skew = float(mstats.skew(ims[i].flatten()))
ff.append(skew)
elif feature == 'imkurtosis':
kurtosis = float(mstats.kurtosis(ims[i].flatten()))
ff.append(kurtosis)
feat[candid] = list(ff)
return feat
def image2(d, i0, i1, u, v, w, dmind, dtind, beamnum):
""" Parallelizable function for imaging a chunk of data for a single dm.
Assumes data is dedispersed and resampled, so this just images each integration.
Two-stage imaging uses ideal uv coverage in second image.
returns dictionary with keys of cand location and values as tuple of features
"""
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
ims,snr,candints = rtlib.imgallfullfilterxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0:i1], d['npixx'], d['npixy'], d['uvres'], d['sigma_image1'])
feat = {}
for i in xrange(len(candints)):
# reimage
im2 = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), | n.outer(v, d['freq']/d['freq_orig'][0]) | numpy.outer |
import os
import pickle
import os.path
import numpy as np
from PIL import Image
import bf3s.utils as utils
import torch.utils.data as data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
_CIFAR_STD_PIXEL = [x / 255.0 for x in [63.0, 62.1, 66.7]]
_CIFAR_MEAN_PIXEL = [x / 255.0 for x in [125.3, 123.0, 113.9]]
class CIFAR100FewShot(data.Dataset):
def __init__(self, phase="train", do_not_use_random_transf=False,
data_dir="./datasets/CIFAR", data_split_dir="./data/cifar-fs_splits"):
assert phase in ("train", "val", "test")
self.phase = phase
self.name = "CIFAR100FewShot_" + phase
normalize = transforms.Normalize(mean=_CIFAR_MEAN_PIXEL, std=_CIFAR_STD_PIXEL)
if (self.phase == "test" or self.phase == "val") or do_not_use_random_transf:
self.transform = transforms.Compose([lambda x: np.asarray(x), transforms.ToTensor(), normalize])
else:
self.transform = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(), lambda x: np.asarray(x),
transforms.ToTensor(), normalize])
pass
cifar100_metadata_path = os.path.join(data_dir, "cifar-100-python", "meta")
all_category_names = pickle.load(open(cifar100_metadata_path, "rb"))["fine_label_names"]
def read_categories(filename):
with open(filename) as f:
categories = f.readlines()
categories = [x.strip() for x in categories]
return categories
def get_label_ids(category_names):
label_ids = [all_category_names.index(cname) for cname in category_names]
return label_ids
train_category_names = read_categories(os.path.join(data_split_dir, "train.txt"))
val_category_names = read_categories(os.path.join(data_split_dir, "val.txt"))
test_category_names = read_categories(os.path.join(data_split_dir, "test.txt"))
train_category_ids = get_label_ids(train_category_names)
val_category_ids = get_label_ids(val_category_names)
test_category_ids = get_label_ids(test_category_names)
print(f"Loading CIFAR-100 FewShot dataset - phase {phase}")
if self.phase == "train":
self.data_train = datasets.__dict__["CIFAR100"](data_dir, train=True,
download=True, transform=self.transform)
self.labels = self.data_train.targets
self.images = self.data_train.data
self.label2ind = utils.build_label_index(self.labels)
self.labelIds = sorted(self.label2ind.keys())
self.num_cats = len(self.labelIds)
self.labelIds_base = train_category_ids
self.num_cats_base = len(self.labelIds_base)
elif self.phase == "val" or self.phase == "test":
self.data_train = datasets.__dict__["CIFAR100"](data_dir, train=True,
download=True, transform=self.transform)
labels_train = self.data_train.targets
images_train = self.data_train.data
label2ind_train = utils.build_label_index(labels_train)
self.labelIds_novel = (val_category_ids if (self.phase == "val") else test_category_ids)
labels_novel = []
images_novel = []
for label_id in self.labelIds_novel:
indices = label2ind_train[label_id]
images_novel.append(images_train[indices])
labels_novel += [labels_train[index] for index in indices]
pass
images_novel = np.concatenate(images_novel, axis=0)
assert images_novel.shape[0] == len(labels_novel)
self.data_test = datasets.__dict__["CIFAR100"](data_dir, train=False,
download=True, transform=self.transform)
labels_test = self.data_test.targets
images_test = self.data_test.data
label2ind_test = utils.build_label_index(labels_test)
self.labelIds_base = train_category_ids
labels_base = []
images_base = []
for label_id in self.labelIds_base:
indices = label2ind_test[label_id]
images_base.append(images_test[indices])
labels_base += [labels_test[index] for index in indices]
pass
images_base = | np.concatenate(images_base, axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
# !TODO handle decimation in all transformations, template : T6_to_T3
import numpy as np
from numba import njit, prange
from typing import Union, Sequence
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from numpy import ndarray, newaxis
from concurrent.futures import ThreadPoolExecutor
from ..tri.triutils import edges_tri
from ..utils import cells_coords
from .topodata import edgeIds_TET4, edgeIds_H8
from .topodata import edges_Q4, edges_H8, faces_H8
from .topo import unique_topo_data
__cache = True
__all__ = [
'transform_topo',
'T3_to_T6', 'T6_to_T3',
'Q4_to_Q8',
'Q4_to_Q9', 'Q9_to_Q4',
'Q9_to_T6',
'Q4_to_T3',
'H8_to_H27',
'H8_to_TET4'
]
DataLike = Union[ndarray, Sequence[ndarray]]
def transform_topo(topo: ndarray, path: ndarray, data: ndarray = None,
*args, MT=True, max_workers=4, **kwargs):
nD = len(path.shape)
if nD == 1:
path = path.reshape(1, len(path))
assert nD <= 2, "Path must be 1 or 2 dimensional."
if data is None:
return _transform_topo_(topo, path)
else:
if isinstance(data, ndarray):
data = transform_topo_data(topo, data, path)
return _transform_topo_(topo, path), data
elif isinstance(data, Iterable):
def foo(d): return transform_topo_data(topo, d, path)
if MT:
with ThreadPoolExecutor(max_workers=max_workers) as executor:
dmap = executor.map(foo, data)
else:
dmap = map(foo, data)
return _transform_topo_(topo, path), list(dmap)
def transform_topo_data(topo: ndarray, data: ndarray, path: ndarray):
if data.shape[:2] == topo.shape[:2]:
# it is assumed that values are provided for each node of each
# cell
res = repeat_cell_nodal_data(data, path)
elif data.shape[0] == topo.shape[0]:
# assume that data is constant over the elements
res = np.repeat(data, path.shape[0], axis=0)
else:
raise NotImplementedError("Invalid data shape {}".format(data.shape))
return res
@njit(nogil=True, parallel=True, cache=__cache)
def _transform_topo_(topo: ndarray, path: ndarray):
nE = len(topo)
nSub, nSubN = path.shape
res = np.zeros((nSub * nE, nSubN), dtype=topo.dtype)
for iE in prange(nE):
c = iE * nSub
for jE in prange(nSubN):
for kE in prange(nSub):
res[c + kE, jE] = topo[iE, path[kE, jE]]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def repeat_cell_nodal_data(edata: ndarray, path: ndarray):
nSub, nSubN = path.shape
nE = edata.shape[0]
res = np.zeros((nSub*nE, nSubN) + edata.shape[2:], dtype=edata.dtype)
for i in prange(nE):
ii = nSub*i
for j in prange(nSub):
jj = ii+j
for k in prange(nSubN):
res[jj, k] = edata[i, path[j, k]]
return res
def T6_to_T3(coords: ndarray, topo: ndarray, data: DataLike = None,
*args, path: ndarray = None, decimate=True, **kwargs):
if isinstance(path, ndarray):
assert path.shape[1] == 3
else:
if path is None:
if decimate:
path = np.array([[0, 3, 5], [3, 1, 4],
[5, 4, 2], [5, 3, 4]], dtype=topo.dtype)
else:
path = | np.array([[0, 1, 2]], dtype=topo.dtype) | numpy.array |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils for evaluations using keras_util."""
import tempfile
import unittest
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.evaluators import keras_util
from tensorflow_model_analysis.metrics import metric_types
_TF_MAJOR_VERSION = int(tf.version.VERSION.split('.')[0])
class KerasSavedModelUtilTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
def _createBinaryClassificationMetrics(self):
return [
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.AUC(name='auc_pr', curve='PR'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.TruePositives(name='true_positives'),
tf.keras.metrics.FalsePositives(name='false_positives'),
tf.keras.metrics.TrueNegatives(name='true_negatives'),
tf.keras.metrics.FalseNegatives(name='false_negatives'),
tf.keras.metrics.SpecificityAtSensitivity(
0.5, name='specificity_at_sensitivity'),
tf.keras.metrics.SensitivityAtSpecificity(
0.5, name='sensitivity_at_specificity')
]
def _createBinaryClassificationLosses(self):
return [tf.keras.losses.BinaryCrossentropy()]
def _createBinaryClassificationModel(self,
sequential=True,
output_names=None,
add_custom_metrics=False):
if not output_names:
layer = tf.keras.layers.Input(shape=(1,), name='output')
if sequential:
model = tf.keras.models.Sequential([layer, layer])
else:
model = tf.keras.models.Model(layer, layer)
if add_custom_metrics:
model.add_metric(tf.reduce_sum(layer), name='custom')
model.compile(
loss=self._createBinaryClassificationLosses(),
metrics=self._createBinaryClassificationMetrics(),
weighted_metrics=self._createBinaryClassificationMetrics())
model.fit(np.array([[1]]), np.array([[1]]))
else:
layers_per_output = {}
metrics_per_output = {}
weighted_metrics_per_output = {}
losses_per_output = {}
for output_name in output_names:
layers_per_output[output_name] = tf.keras.layers.Input(
shape=(1,), name=output_name)
metrics_per_output[output_name] = (
self._createBinaryClassificationMetrics())
weighted_metrics_per_output[output_name] = (
self._createBinaryClassificationMetrics())
losses_per_output[output_name] = (
self._createBinaryClassificationLosses())
if sequential:
raise ValueError('Sequential not supported with multi-output models')
else:
model = tf.keras.models.Model(layers_per_output, layers_per_output)
if add_custom_metrics:
for output_name in output_names:
model.add_metric(
tf.reduce_sum(layers_per_output[output_name]),
name=f'custom_{output_name}',
)
model.compile(
loss=losses_per_output,
metrics=metrics_per_output,
weighted_metrics=weighted_metrics_per_output)
model.fit({n: np.array([[1]]) for n in output_names},
{n: np.array([[1]]) for n in output_names})
export_path = tempfile.mkdtemp()
model.save(export_path, save_format='tf')
return export_path
def _createMultiClassClassificationMetrics(self):
return [
tf.keras.metrics.Precision(name='precision@2', top_k=2),
tf.keras.metrics.Precision(name='precision@3', top_k=3),
tf.keras.metrics.Recall(name='recall@2', top_k=2),
tf.keras.metrics.Recall(name='recall@3', top_k=3)
]
def _createMultiClassClassificationLosses(self):
# Note cannot use SparseCategorialCrossentropy since we are using Precision
# and Recall for the metrics which require dense labels.
return [tf.keras.losses.CategoricalCrossentropy()]
def _createMultiClassClassificationModel(self,
sequential=True,
output_names=None,
add_custom_metrics=False):
if not output_names:
layer = tf.keras.layers.Input(shape=(5,), name='output')
if sequential:
model = tf.keras.models.Sequential([layer, layer])
else:
model = tf.keras.models.Model(layer, layer)
if add_custom_metrics:
model.add_metric(tf.reduce_sum(layer), name='custom')
model.compile(
loss=self._createMultiClassClassificationLosses(),
metrics=self._createMultiClassClassificationMetrics(),
weighted_metrics=self._createMultiClassClassificationMetrics())
model.fit(np.array([[1, 0, 0, 0, 0]]), np.array([[1, 0, 0, 0, 0]]))
else:
layers_per_output = {}
metrics_per_output = {}
weighted_metrics_per_output = {}
losses_per_output = {}
for output_name in output_names:
layers_per_output[output_name] = tf.keras.layers.Input(
shape=(5,), name=output_name)
metrics_per_output[output_name] = (
self._createMultiClassClassificationMetrics())
weighted_metrics_per_output[output_name] = (
self._createMultiClassClassificationMetrics())
losses_per_output[output_name] = (
self._createMultiClassClassificationLosses())
if sequential:
raise ValueError('Sequential not supported with multi-output models')
else:
model = tf.keras.models.Model(layers_per_output, layers_per_output)
if add_custom_metrics:
for output_name in output_names:
model.add_metric(
tf.reduce_sum(layers_per_output[output_name]),
name=f'custom_{output_name}',
)
model.compile(
loss=losses_per_output,
metrics=metrics_per_output,
weighted_metrics=weighted_metrics_per_output)
model.fit({n: np.array([[1, 0, 0, 0, 0]]) for n in output_names},
{n: np.array([[1, 0, 0, 0, 0]]) for n in output_names})
export_path = tempfile.mkdtemp()
model.save(export_path, save_format='tf')
return export_path
@parameterized.named_parameters(
('compiled_metrics_sequential_model', True, False),
('compiled_metrics_functional_model', False, False),
('evaluate', False, True),
)
@unittest.skipIf(_TF_MAJOR_VERSION < 2, 'not all options supported in TFv1')
def testWithBinaryClassification(self, sequential_model, add_custom_metrics):
# If custom metrics are used, then model.evaluate is called.
export_dir = self._createBinaryClassificationModel(
sequential=sequential_model, add_custom_metrics=add_custom_metrics)
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=export_dir)
computation = keras_util.metric_computations_using_keras_saved_model(
'', eval_shared_model.model_loader, None)[0]
inputs = [
metric_types.StandardMetricInputs(
labels=np.array([0.0]),
predictions=np.array([1.0]),
example_weights=np.array([0.5]),
input=self._makeExample(output=1.0).SerializeToString()),
metric_types.StandardMetricInputs(
labels=np.array([1.0]),
predictions=np.array([0.7]),
example_weights=np.array([0.7]),
input=self._makeExample(output=0.7).SerializeToString()),
metric_types.StandardMetricInputs(
labels=np.array([0.0]),
predictions=np.array([0.5]),
example_weights=np.array([0.9]),
input=self._makeExample(output=0.5).SerializeToString())
]
expected_values = {
'auc': 0.5,
'auc_pr': 0.30685,
'precision': 0.5,
'recall': 1.0,
'true_positives': 1.0,
'false_positives': 1.0,
'true_negatives': 1.0,
'false_negatives': 0.0,
'specificity_at_sensitivity': 0.5,
'sensitivity_at_specificity': 1.0,
'weighted_auc': 0.64286,
'weighted_auc_pr': 0.37467,
'weighted_precision': 0.5833333,
'weighted_recall': 1.0,
'weighted_true_positives': 0.7,
'weighted_false_positives': 0.5,
'weighted_true_negatives': 0.9,
'weighted_false_negatives': 0.0,
'weighted_specificity_at_sensitivity': 0.642857,
'weighted_sensitivity_at_specificity': 1.0,
'loss': 2.861993
}
if add_custom_metrics:
# Loss is different due to rounding errors from tf.Example conversion.
expected_values['loss'] = 2.8327076
expected_values['custom'] = 1.0 + 0.7 + 0.5
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create(inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeMetric' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
expected = {
metric_types.MetricKey(name=name, example_weighted=None): value
for name, value in expected_values.items()
}
self.assertDictElementsAlmostEqual(got_metrics, expected)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('compiled_metrics', False),
('evaluate', True),
)
@unittest.skipIf(_TF_MAJOR_VERSION < 2, 'not all options supported in TFv1')
def testWithBinaryClassificationMultiOutput(self, add_custom_metrics):
# If custom metrics are used, then model.evaluate is called.
export_dir = self._createBinaryClassificationModel(
sequential=False,
output_names=('output_1', 'output_2'),
add_custom_metrics=add_custom_metrics)
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=export_dir)
computation = keras_util.metric_computations_using_keras_saved_model(
'', eval_shared_model.model_loader, None)[0]
inputs = [
metric_types.StandardMetricInputs(
labels={
'output_1': | np.array([0.0]) | numpy.array |
import numpy as np
import math
from scipy.interpolate import RegularGridInterpolator, NearestNDInterpolator, LinearNDInterpolator
class vdf():
def __init__(self, v_max, resolution, coord_sys):
self.grid_cart = None
self.grid_spher = None
self.dvvv = None
self.vdf_interp = np.zeros((resolution,resolution,resolution))
grid_cart, grid_spher,
grid_cyl, dvvv = self.init_grid(v_max, resolution, coord_sys)
self.grid_cart_t = self.grid_cart.copy()
self.grid_spher_t = self.grid_spher.copy()
self.nb_counts = np.zeros((resolution,resolution,resolution))
def interpolate_cart_vdf(self, grid, vdf0, interpolate='near'):
if interpolate == 'near':
method_str = 'nearest'
elif interpolate == 'lin':
method_str = 'linear'
if interpolate in ['near', 'lin']:
if vdf0.ndim==2:
interpFunc = RegularGridInterpolator( (grid[0,:,0], grid[1,0,:]), vdf0,
bounds_error=False, method=method_str,
fill_value=np.nan)
d = interpFunc(self.grid_cart[[0,2],:,:,0].reshape(2,-1).T) ## Ugly AF.
d = d.reshape((self.vdf_interp.shape[0],self.vdf_interp.shape[0]))
self.vdf_interp = d[:,:,None]
elif vdf0.ndim==3:
interpFunc = RegularGridInterpolator( (grid[0,:,0,0], grid[1,0,:,0], grid[2,0,0,:]), vdf0,
bounds_error=False, method=method_str,
fill_value=np.nan)
d = interpFunc(self.grid_cart.reshape(3,-1).T)
self.vdf_interp = d.T.reshape(self.vdf_interp.shape) ## (res,res,res)
# #
# elif interpolate=='cub':
# d = np.zeros_like(gridCart[0]).flatten()
# ip = tricubic.tricubic(list(distribPeriod), [distribPeriod.shape[0],distribPeriod.shape[1],distribPeriod.shape[2]])
# deltaSpeed = (np.log10(pSet.speed[iS,1]/normTh)-np.log10(pSet.speed[iS,0]/normTh))
# ds = pSet.speed[iS,1:]/normTh-pSet.speed[iS,:-1]/normTh
# deltaTheta = pSet.theta[1]-pSet.theta[0]
# deltaPhi = pSet.phi[iS,1]-pSet.phi[iS,0]
# vMinSpeed = np.log10(pSet.speed[iS,0]/normTh)
# vMinTheta = 0.
# vMinPhi = 0.
#
# # gridS[0] = np.log10(gridS[0]) ## gridS here becomes an array of bin index, to which the coordinate belongs.
# # gridS[0] = (gridS[0]-vMinSpeed)/deltaSpeed #+ 1.5
# bin = np.digitize(gridS[0], pSet.speed[iS]/normTh)-1
# gridS[0] = bin + (gridS[0]-pSet.speed[iS,bin]/normTh)/ds[bin]
# gridS[1] = (gridS[1]-vMinTheta)/deltaTheta + .5
# gridS[2] = (gridS[2]-vMinPhi)/deltaPhi + .5
# for i, node in enumerate(gridS.reshape((3,-1)).T):
# d[i] = ip.ip(list(node))
# # itkTmp = (d<0)
# d = d.reshape((resFinal,resFinal,resFinal))
# d[gridS[0]<0] = np.nan ## "fill_value". Should also be done for values larger than, and not only smaller than.
# # d[itkTmp] = 0
# # sys.exit()
#
#_________________
def interpolate_spher_vdf(self, grid, vdf0, interpolate='near', psp=False):
speed = grid[0,:,0,0][::-1].copy()
theta = grid[1,0,:,0].copy()
phi = grid[2,0,0,:].copy()
vdf0 = np.flip(vdf0, axis=(0))
if interpolate == 'near':
interp_method = 'nearest'
elif interpolate == 'lin':
interp_method = 'linear'
# itk = self.grid_spher[2]>np.pi
# self.grid_spher[2,itk] -= 2.*np.pi
if psp:
phi -= 60.*np.pi/180.
phi %= 2.*np.pi
self.grid_spher_t[2] -= 60.*np.pi/180
self.grid_spher_t[2] %= 2.*np.pi
# phiPeriod = np.zeros(18)
# phiPeriod[1:-1] = phi
# phiPeriod[0] = phi[-1]-2*np.pi
# phiPeriod[-1] = phi[0]+2*np.pi
# thetaPeriod = np.zeros(10)
# thetaPeriod[1:-1] = theta
# thetaPeriod[0] = theta[-1]-np.pi
# thetaPeriod[-1] = theta[0]+np.pi
# distribPeriod = np.zeros((32,10,18))
# distribPeriod[:,1:-1,1:-1] = vdf0
# distribPeriod[:,1:-1,0] = vdf0[:,:,-1]
# distribPeriod[:,1:-1,-1] = vdf0[:,:,0]
# distribPeriod[:,0] = np.nanmean(distribPeriod[:,1], axis=1)[:,None]
# distribPeriod[:,9] = np.nanmean(distribPeriod[:,8], axis=1)[:,None]
# itkR = ~np.isnan(speed)
# interpFunc = RegularGridInterpolator( (speed, thetaPeriod, phiPeriod),
# distribPeriod,
# bounds_error=False, method=interp_method,
# fill_value=np.nan)
interpFunc = RegularGridInterpolator( (speed, theta, phi),
vdf0,
bounds_error=False, method=interp_method,
fill_value=np.nan)
d = interpFunc(self.grid_spher_t.reshape(3,-1).T)
d = d.T.reshape(self.vdf_interp.shape) ## (res,res,res)
d[np.isnan(d)] = 0.
self.nb_counts += (~np.isnan(d))
self.vdf_interp += d
def transform_grid(self, R=None, v=None, s=None):
if R is not None:
gc = self.grid_cart.copy()
self.grid_cart_t = np.dot(R, gc.reshape(3,-1)).reshape(self.grid_cart.shape)
if v is not None:
self.grid_cart_t -= v[:,None,None,None]
self.grid_spher_t = self.cart2spher(self.grid_cart_t)
# if interpolate=='near':
# interpFunc = RegularGridInterpolator( (speed, thetaPeriod, phiPeriod),
# (distribPeriod),
# bounds_error=False, method='nearest',
# fill_value=np.nan)
# d = interpFunc(self.grid_spher_t.reshape(3,-1).T)
# d = d.T.reshape(self.vdf_interp.shape) ## (res,res,res)
# d[np.isnan(d)] = 0.
# self.vdf_interp += d
# print(np.nanmin(d), np.nanmax(d))
#
#
#
# elif interpolate=='lin':
# interpFunc = RegularGridInterpolator( (speed, thetaPeriod, phiPeriod),
# (distribPeriod),
# bounds_error=False, method='linear',
# fill_value=np.nan)
# # interpFunc = RegularGridInterpolator( (speed, theta, phi),
# # vdf0,
# # bounds_error=False, method='linear',
# # fill_value=np.nan)
# d = interpFunc(self.grid_spher_t.reshape(3,-1).T)
# d = d.T.reshape(self.vdf_interp.shape) ## (res,res,res)
# d[np.isnan(d)] = 0.
# self.vdf_interp += d
#
# elif interpolate=='cub':
# d = np.zeros_like(gridCart[0]).flatten()
# ip = tricubic.tricubic(list(distribPeriod), [distribPeriod.shape[0],distribPeriod.shape[1],distribPeriod.shape[2]])
# deltaSpeed = (np.log10(pSet.speed[iS,1]/normTh)-np.log10(pSet.speed[iS,0]/normTh))
# ds = pSet.speed[iS,1:]/normTh-pSet.speed[iS,:-1]/normTh
# deltaTheta = pSet.theta[1]-pSet.theta[0]
# deltaPhi = pSet.phi[iS,1]-pSet.phi[iS,0]
# vMinSpeed = np.log10(pSet.speed[iS,0]/normTh)
# vMinTheta = 0.
# vMinPhi = 0.
#
# # gridS[0] = np.log10(gridS[0]) ## gridS here becomes an array of bin index, to which the coordinate belongs.
# # gridS[0] = (gridS[0]-vMinSpeed)/deltaSpeed #+ 1.5
# bin = np.digitize(gridS[0], pSet.speed[iS]/normTh)-1
# gridS[0] = bin + (gridS[0]-pSet.speed[iS,bin]/normTh)/ds[bin]
# gridS[1] = (gridS[1]-vMinTheta)/deltaTheta + .5
# gridS[2] = (gridS[2]-vMinPhi)/deltaPhi + .5
# for i, node in enumerate(gridS.reshape((3,-1)).T):
# d[i] = ip.ip(list(node))
# # itkTmp = (d<0)
# d = d.reshape((resFinal,resFinal,resFinal))
# d[gridS[0]<0] = np.nan ## "fill_value". Should also be done for values larger than, and not only smaller than.
# # d[itkTmp] = 0
# # sys.exit()
# if psp:
# self.grid_spher_t[2] += 60.*np.pi/180
# self.grid_spher_t[2] %= 2.*np.pi
#
#_________________
def init_grid(v_max, resolution, grid_geom):
"""Here we define the bin edges and centers, depending on the chosen
coordinate system."""
if grid_geom == 'cart':
edgesX = np.linspace(-v_max, v_max, resolution + 1,
dtype=np.float32)
centersX = (edgesX[:-1] + edgesX[1:]) * .5
# 3 x res x res_phi x res/2
grid_cart = np.mgrid[-v_max:v_max:resolution*1j,
-v_max:v_max:resolution*1j,
-v_max:v_max:resolution*1j]
grid_cart = grid_cart.astype(np.float32)
grid_spher = cart2spher(grid_cart)
grid_cyl = cart2cyl(grid_cart)
dv = centersX[1]-centersX[0]
dvvv = np.ones((resolution, resolution, resolution)) * dv ** 3
elif grid_geom == 'spher':
edges_rho = np.linspace(0, v_max, resolution + 1, dtype=np.float32)
edges_theta = np.linspace(0, np.pi, resolution + 1,
dtype=np.float32)
edges_phi = np.linspace(0, 2*np.pi, resolution + 1,
dtype=np.float32)
centers_rho = (edges_rho[:-1] + edges_rho[1:]) * .5
centers_theta = (edges_theta[:-1] + edges_theta[1:]) * .5
centers_phi = (edges_phi[:-1] + edges_phi[1:]) * .5
grid_spher = np.mgrid[centers_rho[0]:centers_rho[-1]:centers_rho.size*1j,
centers_theta[0]:centers_theta[-1]:centers_theta.size*1j,
centers_phi[0]:centers_phi[-1]:centers_phi.size*1j]
grid_spher = grid_spher.astype(np.float32)
grid_cart = spher2cart(grid_spher)
grid_cyl = cart2cyl(grid_cart)
d_rho = centers_rho[1]-centers_rho[0]
d_theta = centers_theta[1]-centers_theta[0]
d_phi = centers_phi[1]-centers_phi[0]
dv = centers_rho[1]-centers_rho[0]
dvvv = np.ones((resolution, resolution, resolution)) \
* centers_rho[:, None, None] * d_rho * d_theta * d_phi
elif grid_geom == 'cyl':
edges_rho = np.linspace(0, v_max, resolution+1, dtype=np.float32)
edges_phi = np.linspace(0, 2*np.pi, resolution+1, dtype=np.float32)
edges_z = np.linspace(-v_max, v_max, resolution+1, dtype=np.float32)
centers_rho = (edges_rho[:-1]+edges_rho[1:])*.5
centers_phi = (edges_phi[:-1]+edges_phi[1:])*.5
centers_z = (edges_z[:-1]+edges_z[1:])*.5
grid_cyl = np.mgrid[centers_rho[0]:centers_rho[-1]:centers_rho.size*1j,
centers_phi[0]:centers_phi[-1]:centers_phi.size*1j,
centers_z[0]:centers_z[-1]:centers_z.size*1j]
grid_cyl = grid_cyl.astype(np.float32)
grid_cart = cyl2cart(grid_cyl)
grid_spher = cart2spher(grid_cart)
dRho = centers_rho[1]-centers_rho[0]
dPhi = centers_phi[1]-centers_phi[0]
dZ = centers_z[1]-centers_z[0]
dvvv = np.ones((resolution, resolution, resolution)) \
* centers_rho[:, None, None]*dRho*dPhi*dZ
return grid_cart, grid_spher, grid_cyl, dvvv
def spher2cart(v_spher):
"""Coordinate system conversion
"""
v_cart = np.zeros_like(v_spher)
v_cart[0] = v_spher[0] * np.sin(v_spher[1]) * np.cos(v_spher[2])
v_cart[1] = v_spher[0] * np.sin(v_spher[1]) * np.sin(v_spher[2])
v_cart[2] = v_spher[0] * np.cos(v_spher[1])
return v_cart
def cart2spher(v_cart):
"""Coordinate system conversion
"""
v_spher = np.zeros_like(v_cart)
v_spher[0] = np.sqrt(np.sum(v_cart ** 2, axis=0))
v_spher[1] = np.arccos(v_cart[2] / v_spher[0])
v_spher[2] = np.arctan2(v_cart[1], v_cart[0])
itm = (v_spher[2] < 0.)
v_spher[2][itm] += 2*np.pi
return v_spher
def cyl2cart(v_cyl):
"""Coordinate system conversion
"""
v_cart = np.zeros_like(v_cyl)
v_cart[0] = v_cyl[0]*np.cos(v_cyl[1])
v_cart[1] = v_cyl[0]*np.sin(v_cyl[1])
v_cart[2] = v_cyl[2].copy()
return v_cart
def cart2cyl(v_cart):
"""Coordinate system conversion
"""
v_cyl = | np.zeros_like(v_cart) | numpy.zeros_like |
"""
Standalone routines to analyze and manipulate visibilities.
"""
# 20200226: introduced os.mkdir(outfile+'.touch') os.rmdir(outfile+'.touch')
# 20200226: to make sure we can handle sudden system break.
import os
import shutil
import inspect
import glob
import logging
import numpy as np
from scipy.ndimage import label
# Analysis utilities
import analysisUtils as au
# Pipeline versioning
from .pipelineVersion import version as pipeVer
# CASA stuff
from . import casaStuff
# Spectral lines
from . import utilsLines as lines
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Physical constants
sol_kms = 2.99792458e5
##########################################
# Split, copy, combine measurement sets. #
##########################################
def copy_ms(infile=None, outfile=None, use_symlink=True, overwrite=False):
"""
Copy a measurement set, optionally using symlink instead of
actually copying.
"""
# Check inputs
if infile is None:
logging.error("Please specify infile.")
raise Exception("Please specify infile.")
if outfile is None:
logging.error("Please specify outfile.")
raise Exception("Please specify outfile.")
if not os.path.isdir(infile):
logger.error(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
raise Exception(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
# Check for presence of existing outfile and abort if it is found
# without overwrite permission.
if os.path.isdir(outfile) and not os.path.isdir(outfile+'.touch'):
if not overwrite:
logger.warning(
'Found existing data "'+outfile+'", will not overwrite.')
return()
# Delete existing output data.
for suffix in ['', '.flagversions', '.touch']:
if os.path.islink(outfile+suffix):
os.unlink(outfile+suffix)
logger.debug('os.unlink "'+outfile+'"')
if os.path.isdir(outfile+suffix):
shutil.rmtree(outfile+suffix)
if use_symlink:
# Make links
if os.path.isdir(infile):
os.symlink(infile, outfile)
logger.debug(
'os.symlink "'+infile+'", "'+outfile+'"')
if os.path.isdir(infile+'.flagversions'):
os.symlink(infile+'.flagversions', outfile+'.flagversions')
logger.debug(
'os.symlink "'+infile+'.flagversions'+'", "' +
outfile+'.flagversions"')
# Check
if not os.path.islink(outfile):
logger.error(
'Failed to link the uv data to '+os.path.abspath(outfile)+'!')
logger.error(
'Please check your file system writing permission or '
'system breaks.')
raise Exception(
'Failed to link the uv data to the imaging directory.')
return()
else:
# Check existing output data
has_existing_outfile = False
if os.path.isdir(outfile) and not os.path.isdir(outfile+'.touch'):
if not overwrite:
has_existing_outfile = True
# delete existing copied data if not overwriting
if not has_existing_outfile:
for suffix in ['', '.flagversions', '.touch']:
if os.path.isdir(outfile+suffix):
shutil.rmtree(outfile+suffix)
logger.debug('shutil.rmtree "'+outfile+suffix+'"')
# copy the data (.touch directory is a temporary flagpost)
if not os.path.isdir(outfile+'.touch'):
os.mkdir(outfile+'.touch')
if os.path.isdir(infile):
shutil.copytree(infile, outfile)
logger.debug(
'shutil.copytree "'+infile+'", "'+outfile+'"')
if os.path.isdir(infile+'.flagversions'):
shutil.copytree(infile+'.flagversions', outfile+'.flagversions')
logger.debug(
'shutil.copytree "'+infile+'.flagversions'+'", "' +
outfile+'.flagversions'+'"')
if os.path.isdir(outfile+'.touch'):
os.rmdir(outfile+'.touch')
# check copied_file, make sure copying was done
if not os.path.isdir(outfile) or \
os.path.isdir(outfile+'.touch'):
logger.error(
'Failed to copy the uv data to '+os.path.abspath(outfile)+'!')
logger.error(
'Please check your file system writing permission or '
'system breaks.')
raise Exception(
'Failed to copy the uv data to the imaging directory.')
return()
return()
def split_science_targets(
infile=None, outfile=None, field='', intent='OBSERVE_TARGET*',
spw='', timebin='0s', do_statwt=False, overwrite=False):
"""
Split science targets from the input ALMA measurement set to form
a new, science-only measurement set. Optionally reweight the data
using statwt.
Relatively thin wrapper to split that smooths out some things like
handling of flagversions and which data tables to use.
Args:
infile (str): The input measurement set data.
outfile (str): The output measurement set data.
field, spw, intent (str): The field, spw, intent used for selection.
timebin: The time bin applied.
overwrite (bool): Set to True to overwrite existing output
data. The default is False, not overwriting anything.
Inputs:
infile: ALMA measurement set data folder.
Outputs:
outfile: ALMA measurement set data folder.
"""
# Check inputs
if infile is None:
logging.error("Please specify infile.")
raise Exception("Please specify infile.")
if outfile is None:
logging.error("Please specify outfile.")
raise Exception("Please specify outfile.")
if not os.path.isdir(infile):
logger.error(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
raise Exception(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
# Check for presence of existing outfile and abort if it is found
# without overwrite permission.
if os.path.isdir(outfile) and not os.path.isdir(outfile+'.touch'):
if not overwrite:
logger.warning(
'Found existing data "'+outfile+'", will not overwrite.')
return()
# Delete existing output data.
for suffix in ['', '.flagversions', '.touch']:
if os.path.islink(outfile+suffix):
os.unlink(outfile+suffix)
logger.debug('os.unlink "'+outfile+'"')
if os.path.isdir(outfile+suffix):
shutil.rmtree(outfile+suffix)
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("I will split out the data.")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("")
logger.info('Splitting from '+infile+' to '+outfile)
# Verify the column to use. If present, we use the corrected
# column. If not, then we use the data column.
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(infile, nomodify = True)
colnames = mytb.colnames()
if 'CORRECTED_DATA' in colnames:
logger.info("Data has a CORRECTED column. Will use that.")
use_column = 'CORRECTED'
else:
logger.info("Data lacks a CORRECTED column. Will use DATA column.")
use_column = 'DATA'
mytb.close()
logger.info('... intent: '+intent)
logger.info('... field: '+field)
logger.info('... spw: '+spw)
if not os.path.isdir(outfile+'.touch'):
# mark the beginning of our processing
os.mkdir(outfile+'.touch')
split_params = {
'vis': infile, 'intent': intent, 'field': field, 'spw': spw,
'datacolumn': use_column, 'outputvis': outfile,
'keepflags': False, 'timebin': timebin}
logger.info(
"... running CASA "+'split(' +
', '.join("{!s}={!r}".format(
k, split_params[k]) for k in split_params.keys()) +
')')
# an MS can have a SPW label for data that is no longer contained in the MS
# (e.g., it was fully flagged, and keepflags=False was used in a previous split)
# This try/except should work for CASA 6 and newer versions with CASA's improved
# exception handling.
try:
casaStuff.split(**split_params)
flag_split_success = True
except RuntimeError as exc:
logger.error("Splitting failed with exception: {}".format(exc))
flag_split_success = False
# Re-weight the data if desired.
# Only continue if the split was successful
if do_statwt and flag_split_success:
logger.info("Using statwt to re-weight the data.")
statwt_params = {'vis': outfile, 'datacolumn': 'DATA'}
logger.info(
"... running CASA "+'statwt(' +
', '.join("{!s}={!r}".format(
k, statwt_params[k]) for k in statwt_params.keys())+')')
casaStuff.statwt(**statwt_params)
if os.path.isdir(outfile+'.touch'):
# mark the end of our processing
os.rmdir(outfile+'.touch')
return()
def concat_ms(
infile_list=None, outfile=None, freqtol='', dirtol='',
copypointing=True, overwrite=False):
"""
Concatenate a list of measurement sets into one measurement set. A
thin wrapper to concat. Thin wrapper to concat. Might build out in
the future.
Args:
infile_list (list or str): The input list of measurement sets.
outfile (str): The output measurement set data with suffix ".ms".
Inputs:
infile: ALMA measurement set data folder.
Outputs:
outfile: ALMA measurement set data folder.
"""
# Check inputs
if infile_list is None:
logging.error("Please specify infile_list.")
raise Exception("Please specify infile_list.")
if outfile is None:
logging.error("Please specify outfile.")
raise Exception("Please specify outfile.")
# make sure the input infile_list is a list
if np.isscalar(infile_list):
infile_list = [infile_list]
# check file existence
for this_infile in infile_list:
if not os.path.isdir(this_infile):
logger.error(
'Error! The input measurement set "'+this_infile +
'" not found')
raise Exception(
'Error! The input measurement set "'+this_infile +
'" not found')
# Quit if output data are present and overwrite is off.
if os.path.isdir(outfile) and not os.path.isdir(outfile+'.touch'):
if not overwrite:
logger.warning(
'Found existing output data "'+outfile +
'", will not overwrite it.')
return()
# if overwrite or no file present, then delete existing output data.
for suffix in ['', '.flagversions', '.touch']:
if os.path.isdir(outfile+suffix):
shutil.rmtree(outfile+suffix)
# Concatenate all of the relevant files
concat_params = {
'vis': infile_list, 'concatvis': outfile, 'copypointing': copypointing}
if freqtol is not None and freqtol != '':
concat_params['freqtol'] = freqtol
if dirtol is not None and dirtol != '':
concat_params['dirtol'] = dirtol
logger.info(
"... running CASA "+'concat(' +
', '.join("{!s}={!r}".format(
k, concat_params[k]) for k in concat_params.keys()) +
')')
if not os.path.isdir(outfile+'.touch'):
os.mkdir(outfile+'.touch') # mark the beginning of our processing
casaStuff.concat(**concat_params)
if os.path.isdir(outfile+'.touch'):
os.rmdir(outfile+'.touch') # mark the end of our processing
return()
##########################
# Continuum subtraction. #
##########################
def contsub(
infile=None, outfile=None, ranges_to_exclude=[], solint='int',
fitorder=0, combine='', overwrite=False):
"""
Carry out uv continuum subtraction on a measurement set. First
figures out channels corresponding to spectral lines for a
provided suite of bright lines.
"""
# Error and file existence checking
if infile is None:
logging.error("Please specify infile.")
raise Exception("Please specify infile.")
if outfile is None:
outfile = infile+'.contsub'
if not os.path.isdir(infile):
logger.error(
'The input uv data measurement set "'+infile+'"does not exist.')
return()
# check existing output data in the imaging directory
if (os.path.isdir(infile+'.contsub') and
not os.path.isdir(infile+'.contsub'+'.touch')):
if not overwrite:
logger.warning(
'Found existing output data "'+infile+'.contsub' +
'", will not overwrite it.')
return
if os.path.isdir(infile+'.contsub'):
shutil.rmtree(infile+'.contsub')
if os.path.isdir(infile+'.contsub'+'.touch'):
shutil.rmtree(infile+'.contsub'+'.touch')
# Figure out which channels to exclude from the fit.
# find_spw_channels_for_lines
spw_flagging_string = spw_string_for_freq_ranges(
infile=infile, freq_ranges_ghz=ranges_to_exclude,
)
# uvcontsub, this outputs infile+'.contsub'
uvcontsub_params = {
'vis': infile,
'fitspw': spw_flagging_string,
'excludechans': True,
'combine': combine,
'fitorder': fitorder,
'solint': solint,
'want_cont': False}
logger.info(
"... running CASA "+'uvcontsub(' +
', '.join("{!s}={!r}".format(
k, uvcontsub_params[k]) for k in uvcontsub_params.keys()) +
')')
if not os.path.isdir(infile+'.contsub'+'.touch'):
# mark the beginning of our processing
os.mkdir(infile+'.contsub'+'.touch')
casaStuff.uvcontsub(**uvcontsub_params)
if os.path.isdir(infile+'.contsub'+'.touch'):
# mark the end of our processing
os.rmdir(infile+'.contsub'+'.touch')
# Could manipulate outfile names here.
return()
##########################################################
# Interface between spectral lines and spectral windows. #
##########################################################
def find_spws_for_line(
infile=None, line=None, restfreq_ghz=None,
vsys_kms=None, vwidth_kms=None, vlow_kms=None, vhigh_kms=None,
max_chanwidth_kms=None,
require_data=False, require_full_line_coverage=False,
exit_on_error=True, as_list=False):
"""
List the spectral windows in the input ms data that contains the
input line, given the line velocity (vsys_kms) and line width
(vwidth_kms), which are in units of km/s. Defaults to rest frequency
(with vsys_kms = 0.0 and vwidth_kms = 0.0).
"""
# Check inputs
if infile is None:
logging.error("Please specify infile.")
raise Exception("Please specify infile.")
# Verify file existence
if not os.path.isdir(infile):
logger.error(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
raise Exception(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
# Get the line name and rest-frame frequency in the line_list
# module for the input line
if restfreq_ghz is None:
if line is None:
logging.error(
"Specify a line name or provide a rest frequency in GHz.")
raise Exception("No rest frequency specified.")
restfreq_ghz = (
lines.get_line_name_and_frequency(line, exit_on_error=True))[1]
# Work out the frequencies at the line edes.
line_low_ghz, line_high_ghz = lines.get_ghz_range_for_line(
restfreq_ghz=restfreq_ghz,
vsys_kms=vsys_kms, vwidth_kms=vwidth_kms,
vlow_kms=vlow_kms, vhigh_kms=vhigh_kms)
logger.debug(
"... line: %s, line freq: %.6f - %.6f, rest-freq: %.6f" %
(line, line_low_ghz, line_high_ghz, restfreq_ghz))
# If channel width restrictions are in place, calculate the
# implied channel width requirement in GHz.
if max_chanwidth_kms is not None:
# line_freq_ghz = (line_low_ghz+line_high_ghz)*0.5
# Using RADIO convention for velocities:
max_chanwidth_ghz = restfreq_ghz*max_chanwidth_kms/sol_kms
# using the high-z convention used to be this (delete eventually)
# max_chanwidth_ghz = line_freq_ghz*max_chanwidth_kms/sol_kms
logger.debug(
"... max_chanwidth_kms: %.3f, max_chanwidth_ghz: %.6f" %
(max_chanwidth_kms, max_chanwidth_ghz))
else:
max_chanwidth_ghz = None
# Work out which spectral windows contain the line by looping over
# SPWs one at a time.
spw_list = []
spw_lowest_ghz = None
spw_highest_ghz = None
logger.debug("... vm = au.ValueMapping(infile) ...")
vm = au.ValueMapping(infile)
logger.debug("... vm = au.ValueMapping(infile) done")
for this_spw in vm.spwInfo.keys():
spw_high_ghz = np.max(vm.spwInfo[this_spw]['edgeChannels'])/1e9
spw_low_ghz = np.min(vm.spwInfo[this_spw]['edgeChannels'])/1e9
logger.debug(
"... spw: %s, freq: %.6f - %.6f GHz" %
(this_spw, spw_low_ghz, spw_high_ghz))
if spw_high_ghz < line_low_ghz:
continue
if spw_low_ghz > line_high_ghz:
continue
if max_chanwidth_ghz is not None:
spw_chanwidth_ghz = abs(vm.spwInfo[this_spw]['chanWidth'])/1e9
if spw_chanwidth_ghz > max_chanwidth_ghz:
continue
if require_data:
if len(vm.scansForSpw[this_spw]) == 0:
continue
if require_full_line_coverage and not (
spw_high_ghz > line_high_ghz and spw_low_ghz < line_low_ghz):
continue
spw_list.append(this_spw)
if spw_lowest_ghz is None:
spw_lowest_ghz = spw_low_ghz
else:
spw_lowest_ghz = min(spw_lowest_ghz, spw_low_ghz)
if spw_highest_ghz is None:
spw_highest_ghz = spw_high_ghz
else:
spw_highest_ghz = max(spw_highest_ghz, spw_high_ghz)
# If we don't find the line in this data set, issue a warning and
# return.
if len(spw_list) == 0:
logger.warning('No spectral windows contain the input line.')
spw_list = []
spw_list_string = None # can't be '', that selects all
if as_list:
return (spw_list)
else:
return (spw_list_string)
else:
# sort and remove duplicates
spw_list = sorted(list(set(spw_list)))
# make spw_list_string appropriate for use in selection
spw_list_string = ','.join(np.array(spw_list).astype(str))
# return
if as_list:
return(spw_list)
else:
return(spw_list_string)
def find_spws_for_science(
infile=None, require_data=False, exit_on_error=True, as_list=False):
"""
List all spectral windows that we judge likely to be used for
science. Mostly wraps analysisUtils rather than reinventing the
wheel.
"""
# Check inputs
if infile is None:
logging.error("Please specify infile.")
raise Exception("Please specify infile.")
# Verify file existence
if not os.path.isdir(infile):
logger.error(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
raise Exception(
'Error! The input uv data measurement set "'+infile +
'"does not exist!')
# Call the analysisUtil version.
spw_string = au.getScienceSpws(
infile, intent='OBSERVE_TARGET*')
if spw_string is None or len(spw_string) == 0:
spw_string = au.getScienceSpws(
infile, intent='OBSERVE_TARGET#ON_SOURCE')
spw_list = []
for this_spw_string in spw_string.split(','):
spw_list.append(int(this_spw_string))
# Shouldn't get here, I think, because of the analysisUtils logic
if require_data:
vm = au.ValueMapping(infile)
for spw in spw_list:
if len(vm.scansForSpw[spw]) == 0:
spw_list.remove(spw)
# Return
if len(spw_list) == 0:
logger.warning('No science spectral windows found.')
spw_list_string = None # can't be '', that selects all
else:
# sort and remove duplicates
spw_list = sorted(list(set(spw_list)))
# make spw_list_string appropriate for use in selection
spw_list_string = ','.join(np.array(spw_list).astype(str))
if as_list:
return(spw_list)
else:
return(spw_list_string)
def spw_string_for_freq_ranges(
infile=None, freq_ranges_ghz=[], just_spw=[],
complement=False, fail_on_empty=False):
"""
Given an input measurement set, return the spectral
List the spectral window and channels corresponding to the input
lines in the input ms data. Galaxy system velocity (vsys) and
velocity width (vwidth) in units of km/s are needed.
"""
# Check file existence
if infile is None:
logging.error("Please specify an input file.")
raise Exception("Please specify an input file.")
if not os.path.isdir(infile):
logger.error(
'The input measurement set "'+infile+'"does not exist.')
raise Exception(
'The input measurement set "'+infile+'"does not exist.')
# Make sure that we have a list
if not isinstance(freq_ranges_ghz, list):
freq_ranges_ghz = [freq_ranges_ghz]
if not isinstance(just_spw, list):
just_spw = [just_spw]
vm = au.ValueMapping(infile)
# Loop over spectral windows
spw_flagging_string = ''
first_string = True
for this_spw in vm.spwInfo:
if len(just_spw) > 0:
if this_spw not in just_spw:
continue
freq_axis = vm.spwInfo[this_spw]['chanFreqs']
half_chan = abs(freq_axis[1]-freq_axis[0])*0.5
chan_axis = np.arange(len(freq_axis))
mask_axis = np.zeros_like(chan_axis, dtype='bool')
for this_freq_range in freq_ranges_ghz:
low_freq_hz = this_freq_range[0]*1e9
high_freq_hz = this_freq_range[1]*1e9
ind = (
((freq_axis-half_chan) >= low_freq_hz) *
((freq_axis+half_chan) <= high_freq_hz))
mask_axis[ind] = True
if complement:
mask_axis = np.invert(mask_axis)
if fail_on_empty:
if np.sum(np.invert(mask_axis)) == 0:
return(None)
regions = (label(mask_axis))[0]
max_reg = np.max(regions)
for ii in range(1, max_reg+1):
this_mask = (regions == ii)
low_chan = np.min(chan_axis[this_mask])
high_chan = np.max(chan_axis[this_mask])
this_spw_string = (
str(this_spw)+':'+str(low_chan)+'~'+str(high_chan))
if first_string:
spw_flagging_string += this_spw_string
first_string = False
else:
spw_flagging_string += ','+this_spw_string
logger.info("... returning SPW selection string:")
logger.info(spw_flagging_string)
return(spw_flagging_string)
def compute_common_chanwidth(
infile_list=None, line=None,
vsys_kms=None, vwidth_kms=None, vlow_kms=None, vhigh_kms=None,
require_full_line_coverage=False):
"""
Calculates the coarsest channel width among all spectral windows
in the input measurement set that contain the input line.
Args:
Returns:
"""
if infile_list is None:
logging.error(
"Please specify one or more input files via infile_list.")
Exception(
"Please specify one or more input files via infile_list.")
if np.isscalar(infile_list):
infile_list = [infile_list]
# Get the line name and line center rest-frame frequency
# in the line_list module for the input line
line_name, restfreq_ghz = lines.get_line_name_and_frequency(
line, exit_on_error=True)
# Work out the frequencies at the line edes and central frequency
line_low_ghz, line_high_ghz = lines.get_ghz_range_for_line(
line=line_name, vsys_kms=vsys_kms, vwidth_kms=vwidth_kms,
vlow_kms=vlow_kms, vhigh_kms=vhigh_kms)
line_freq_ghz = (line_high_ghz+line_low_ghz)/2.0
coarsest_channel = None
for this_infile in infile_list:
# Find spws for line
spw_list_string = find_spws_for_line(
this_infile, line, vsys_kms=vsys_kms, vwidth_kms=vwidth_kms,
require_full_line_coverage=require_full_line_coverage)
chan_widths_hz = au.getChanWidths(this_infile, spw_list_string)
# Convert to km/s and return
for this_chan_width_hz in chan_widths_hz:
# Using RADIO convention for velocities:
chan_width_kms = abs(
this_chan_width_hz / (line_freq_ghz*1e9)*sol_kms)
if coarsest_channel is None:
coarsest_channel = chan_width_kms
else:
if chan_width_kms > coarsest_channel:
coarsest_channel = chan_width_kms
return(coarsest_channel)
#######################################################
# Extract a single-line, common grid measurement set. #
#######################################################
def batch_extract_line(
infile_list=[], outfile=None,
target_chan_kms=None, restfreq_ghz=None, line=None,
vsys_kms=None, vwidth_kms=None, vlow_kms=None, vhigh_kms=None,
method='regrid_then_rebin', exact=False, freqtol='',
clear_pointing=True, require_full_line_coverage=False,
overwrite=False):
"""
Run a batch line extraction.
"""
# Check that we have an output file defined.
if outfile is None:
logging.error("Please specify an output file.")
raise Exception("Please specify an output file.")
# Check existence of output data and abort if found and overwrite is off
if os.path.isdir(outfile) and not os.path.isdir(outfile+'.touch'):
if not overwrite:
logger.warning(
'... found existing output data "'+outfile +
'", will not overwrite it.')
return()
# Else, clear all previous files and temporary files
for suffix in ['', '.flagversions', '.touch', '.temp*']:
for temp_outfile in glob.glob(outfile+suffix):
if os.path.isdir(temp_outfile):
logger.debug('... shutil.rmtree(%r)' % (temp_outfile))
shutil.rmtree(temp_outfile)
# Feed directly to generate an extraction scheme. This does a lot
# of the error checking.
schemes = suggest_extraction_scheme(
infile_list=infile_list, target_chan_kms=target_chan_kms,
method=method, exact=exact, restfreq_ghz=restfreq_ghz, line=line,
vsys_kms=vsys_kms, vwidth_kms=vwidth_kms,
vlow_kms=vlow_kms, vhigh_kms=vhigh_kms,
require_full_line_coverage=require_full_line_coverage)
for this_infile in schemes.keys():
logger.info(
"For this line ({}), I will extract SPWs {} "
"from infile {}".format(
line, schemes[this_infile].keys(), this_infile))
# Execute the extraction scheme
split_file_list = []
for this_infile in schemes.keys():
for this_spw in schemes[this_infile].keys():
this_scheme = schemes[this_infile][this_spw]
# Specify output file and check for existence
this_outfile = this_infile+'.temp_spw'+str(this_spw).strip()
this_scheme['outfile'] = this_outfile
this_scheme['overwrite'] = overwrite
this_scheme['require_full_line_coverage'] = \
require_full_line_coverage
split_file_list.append(this_outfile)
# Execute line extraction
del this_scheme['chan_width_kms']
del this_scheme['chan_width_ghz']
extract_line(**this_scheme)
# Deal with pointing table - testing shows it to be a
# duplicate for each SPW here, so we remove all rows for
# all SPWs except the first one.
if clear_pointing:
# This didn't work:
# os.system('rm -rf '+this_outfile+'/POINTING')
# This zaps the whole table:
if os.path.exists(this_outfile+os.sep+'POINTING'):
au.clearPointingTable(this_outfile)
else:
copy_pointing = False
#logger.debug('Warning! Failed to run au.clearPointingTable(%r)'%(this_outfile))
# Concatenate and combine the output data sets
concat_ms(
infile_list=split_file_list, outfile=outfile, freqtol=freqtol,
overwrite=overwrite, copypointing=(not clear_pointing))
# Clean up, deleting intermediate files
for this_file in split_file_list:
shutil.rmtree(this_file)
return()
def choose_common_res(
vals=[], epsilon=1e-4):
"""
Choose a common resolution given a list and an inflation
parameter epsilon. Returns max*(1+epsilon).).
"""
if len(vals) == 0:
return(None)
ra = np.array(np.abs(vals))
common_res = np.max(ra)*(1.+epsilon)
return(common_res)
def suggest_extraction_scheme(
infile_list=[], target_chan_kms=None, restfreq_ghz=None, line=None,
vsys_kms=None, vwidth_kms=None, vlow_kms=None, vhigh_kms=None,
method='regrid_then_rebin', exact=False,
require_full_line_coverage=False):
"""
Recommend extraction parameters given an input list of files, a
desired target channel width, and a preferred algorithm. Returns a
dictionary suitable for putting into the extraction routine.
"""
# Check inputs
if infile_list is None:
logging.error("Please specify a list of infiles.")
raise Exception("Please specify a list of infiles.")
# make sure the input infile_list is a list
if np.isscalar(infile_list):
infile_list = [infile_list]
# Require a valid method choice
valid_methods = [
'regrid_then_rebin', 'rebin_then_regrid', 'just_regrid', 'just_rebin']
if method.lower().strip() not in valid_methods:
logger.error("Not a valid line extraction method - "+str(method))
raise Exception("Please specify a valid line extraction method.")
# Get the line name and rest-frame frequency in the line_list
# module for the input line
if restfreq_ghz is None:
if line is None:
logging.error(
"Specify a line name or provide a rest frequency in GHz.")
raise Exception("No rest frequency specified.")
restfreq_ghz = (
lines.get_line_name_and_frequency(line, exit_on_error=True))[1]
# # Work out the frequencies at the line edes.
# line_low_ghz, line_high_ghz = lines.get_ghz_range_for_line(
# restfreq_ghz=restfreq_ghz, vsys_kms=vsys_kms, vwidth_kms=vwidth_kms,
# vlow_kms=vlow_kms, vhigh_kms=vhigh_kms)
# line_freq_ghz = 0.5*(line_low_ghz+line_high_ghz)
# ----------------------------------------------------------------
# Loop over infiles and spectral windows and record information
# ----------------------------------------------------------------
scheme = {}
chan_width_list = []
binfactor_list = []
for this_infile in infile_list:
vm = au.ValueMapping(this_infile)
spw_list = find_spws_for_line(
this_infile, restfreq_ghz=restfreq_ghz,
vsys_kms=vsys_kms, vwidth_kms=vwidth_kms,
vlow_kms=vlow_kms, vhigh_kms=vhigh_kms,
require_data=True, as_list=True,
require_full_line_coverage=require_full_line_coverage)
scheme[this_infile] = {}
for this_spw in spw_list:
chan_width_ghz = | np.abs(vm.spwInfo[this_spw]['chanWidth']) | numpy.abs |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The triangular lattice"""
from dataclasses import asdict
from itertools import product
from math import pi
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from retworkx import PyGraph
from .lattice import LatticeDrawStyle, Lattice
from .boundary_condition import BoundaryCondition
class TriangularLattice(Lattice):
"""Triangular lattice."""
def _coordinate_to_index(self, coord: np.ndarray) -> int:
"""Convert the coordinate of a lattice point to an integer for labeling.
When self.size=(l0, l1), then a coordinate (x0, x1) is converted as
x0 + x1*l0.
Args:
coord: Input coordinate to be converted.
Returns:
int: Return x0 + x1*l0 when coord=np.array([x0, x1]) and self.size=(l0, l1).
"""
dim = 2
size = self.size
base = np.array([np.prod(size[:i]) for i in range(dim)], dtype=int)
return np.dot(coord, base).item()
def _self_loops(self) -> List[Tuple[int, int, complex]]:
"""Return a list consisting of the self-loops on all the nodes.
Returns:
List[Tuple[int, int, complex]] : List of the self-loops.
"""
size = self.size
onsite_parameter = self.onsite_parameter
num_nodes = np.prod(size)
return [(node_a, node_a, onsite_parameter) for node_a in range(num_nodes)]
def _bulk_edges(self) -> List[Tuple[int, int, complex]]:
"""Return a list consisting of the edges in th bulk, which don't cross the boundaries.
Returns:
List[Tuple[int, int, complex]] : List of weighted edges that don't cross the boundaries.
"""
size = self.size
edge_parameter = self.edge_parameter
list_of_edges = []
rows, cols = size
coordinates = list(product(*map(range, size)))
for x, y in coordinates:
node_a = self._coordinate_to_index(np.array([x, y]))
for i in range(3):
# x direction
if i == 0 and x != rows - 1:
node_b = self._coordinate_to_index(np.array([x, y]) + np.array([1, 0]))
# y direction
elif i == 1 and y != cols - 1:
node_b = self._coordinate_to_index(np.array([x, y]) + np.array([0, 1]))
# diagonal direction
elif i == 2 and x != rows - 1 and y != cols - 1:
node_b = self._coordinate_to_index(np.array([x, y]) + | np.array([1, 1]) | numpy.array |
"""a module that houses TOV solvers in the "standard" formulation
"""
__author__ = "<NAME> (<EMAIL>)"
#-------------------------------------------------
import numpy as np
from scipy.integrate import odeint
from scipy.special import hyp2f1
from universality.utils.units import (G, c2, Msun)
#-------------------------------------------------
#DEFAULT_MAX_DR = 1e5 ### maximum step size allowed within the integrator (in standard units, which should be in cm)
DEFAULT_MAX_DR = 1e6
DEFAULT_MIN_DR = 1.0 ### the smallest step size we allow (in standard units, which should be cm)
DEFAULT_GUESS_FRAC = 0.1 ### how much of the way to the vanishing pressure we guess via Newton's method
DEFAULT_INITIAL_FRAC = 1e-3 ### the initial change in pressure we allow when setting the intial conditions
DEFAULT_RTOL = 1e-4
DEFAULT_MXSTEP = 10000
#------------------------
TWOPI = 2*np.pi
FOURPI = 2*TWOPI
Gc2 = G/c2
#-------------------------------------------------
### Standard formulation of the TOV equations
#-------------------------------------------------
### basic evolutionary equations
def dmdr(r, epsc2):
return FOURPI * r**2 * epsc2
def dmbdr(r, rho, m):
return dmdr(r, rho) * (1 - 2*Gc2*m/r)**-0.5
def dpc2dr(r, pc2, m, epsc2):
return - Gc2 * (epsc2 + pc2)*(m + FOURPI * r**3 * pc2)/(r * (r - 2*Gc2*m))
def detadr(r, pc2, m, eta, epsc2, cs2c2):
invf = (1. - 2.*Gc2*m/r)**-1
A = 2. * invf * (1. - 3.*Gc2*m/r - TWOPI*Gc2*r**2 * (epsc2 + 3.*pc2))
B = invf * (6. - FOURPI*Gc2*r**2 * (epsc2 + pc2)*(3. + 1./cs2c2))
return -1.*(eta*(eta - 1.) + A*eta - B)/r
def domegadr(r, pc2, m, omega, epsc2):
P = FOURPI * Gc2 * r**3 * (epsc2 + pc2)/ (r - 2.*Gc2*m)
return (P*(omega + 4.) - omega*(omega + 3.))/r
#-------------------------------------------------
# functions for values at the stellar surface
#-------------------------------------------------
def eta2lambda(r, m, eta): ### dimensionless tidal deformability
C = Gc2*m/r # compactness
fR = 1.-2.*C
F = hyp2f1(3., 5., 6., 2.*C) # a hypergeometric function
z = 2.*C
dFdz = (5./(2.*z**6.)) * (z*(z*(z*(3.*z*(5. + z) - 110.) + 150.) - 60.) / (z - 1.)**3 + 60.*np.log(1. - z))
RdFdr = -2.*C*dFdz # log derivative of hypergeometric function
k2el = 0.5*(eta - 2. - 4.*C/fR) / (RdFdr -F*(eta + 3. - 4.*C/fR)) # gravitoelectric quadrupole Love number
return (2./3.)*(k2el/C**5)
def omega2i(r, omega): ### moment of inertia
return (omega/(3. + omega)) * r**3/(2.*Gc2)
#-------------------------------------------------
# initial conditions
#-------------------------------------------------
def initial_pc2(pc2i, frac):
return (1. - frac)*pc2i ### assume a constant slope over a small change in the pressure
def initial_r(pc2i, ec2i, frac):
return (frac*pc2i / ( G * (ec2i + pc2i) * (ec2i/3. + pc2i) * TWOPI ) )**0.5 ### solve for the radius that corresponds to that small change
def initial_m(r, ec2i):
return FOURPI * r**3 * ec2i / 3. # gravitational mass
def initial_mb(r, rhoi):
return FOURPI * r**3 * rhoi / 3. # gravitational mass
def initial_eta(r, pc2i, ec2i, cs2c2i):
return 2. + FOURPI * Gc2 * r**2 * (9.*pc2i + 13.*ec2i + 3.*(pc2i+ec2i)/cs2c2i)/21. # intial perturbation for dimensionless tidal deformability
def initial_omega(r, pc2i, ec2i):
return 16.*np.pi * Gc2 * r**2 * (pc2i + ec2i)/5. # initial frame-dgragging function
#-------------------------------------------------
# central loop that solves the TOV equations given a set of coupled ODEs
#-------------------------------------------------
def engine(
r,
vec,
eos,
dvecdr_func,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
mxstp=DEFAULT_MXSTEP,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density)
"""
vec = np.array(vec, dtype=float)
while vec[0] > 0: ### continue until pressure vanishes
vec0 = vec[:] # store the current location as the old location
r0 = r
### estimate the radius at which this p will vanish via Newton's method
r = r0 + max(min_dr, min(max_dr, guess_frac * abs(vec[0]/dvecdr_func(vec, r, eos)[0])))
### integrate out until we hit that estimate
vec[:] = odeint(dvecdr_func, vec0, (r0, r), args=(eos,), rtol=rtol, hmax=max_dr, mxstep=mxstep)[-1,:] ### retain only the last point
### return to client, who will then interpolate to find the surface
### interpolate to find stellar surface
p = [vec0[0], vec[0]]
# radius
r = np.interp(0, p, [r0, r])
# the rest of the macro properties
vals = [np.interp(0, p, [vec0[i], vec[i]]) for i in range(1, len(vec))]
return r, vals
#-------------------------------------------------
### the solver that yields all known macroscopic quantites
MACRO_COLS = ['M', 'R', 'Lambda', 'I', 'Mb'] ### the column names for what we compute
def dvecdr(vec, r, eos):
pc2, m, eta, omega, mb = vec
epsc2 = np.interp(pc2, eos[0], eos[1])
rho = np.interp(pc2, eos[0], eos[2])
cs2c2 = np.interp(pc2, eos[0], eos[3])
return \
dpc2dr(r, pc2, m, epsc2), \
dmdr(r, epsc2), \
detadr(r, pc2, m, eta, epsc2, cs2c2), \
domegadr(r, pc2, m, omega, epsc2), \
dmbdr(r, rho, m)
def initial_condition(pc2i, eos, frac=DEFAULT_INITIAL_FRAC):
"""determines the initial conditions for a stellar model with central pressure pc
this is done by analytically integrating the TOV equations over very small radii to avoid the divergence as r->0
"""
ec2i = np.interp(pc2i, eos[0], eos[1])
rhoi = np.interp(pc2i, eos[0], eos[2])
cs2c2i = np.interp(pc2i, eos[0], eos[3])
pc2 = initial_pc2(pc2i, frac)
r = initial_r(pc2i, ec2i, frac)
m = initial_m(r, ec2i)
mb = initial_mb(r, rhoi)
eta = initial_eta(r, pc2i, ec2i, cs2c2i)
omega = initial_omega(r, pc2i, ec2i)
return r, (pc2, m, eta, omega, mb)
def integrate(
pc2i,
eos,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density, baryon_density, cs2c2)
"""
r, vec = initial_condition(pc2i, eos, frac=initial_frac)
if vec[0] < 0: ### guarantee that we enter the loop
raise RuntimeError('bad initial condition!')
r, (m, eta, omega, mb) = engine(
r,
vec,
eos,
dvecdr,
min_dr=min_dr,
max_dr=max_dr,
guess_frac=guess_frac,
rtol=rtol,
)
# compute tidal deformability
l = eta2lambda(r, m, eta)
# compute moment of inertia
i = omega2i(r, omega)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
mb /= Msun
r *= 1e-5 ### convert from cm to km
i /= 1e45 ### normalize this to a common value but still in CGS
return m, r, l, i, mb
#-------------------------------------------------
### light-weight solver that only includes M and R
MACRO_COLS_MR = ['M', 'R']
def dvecdr_MR(vec, r, eos):
'''returns d(p, m)/dr
expects: pressurec2, energy_densityc2 = eos
'''
pc2, m = vec
epsc2 = | np.interp(pc2, eos[0], eos[1]) | numpy.interp |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import pytest
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch import optim
from torch.autograd import Variable
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from scipy.stats import pearsonr
from art.attacks.evasion import LowProFool
from art.estimators.classification.scikitlearn import ScikitlearnLogisticRegression
from art.estimators.classification import PyTorchClassifier
from art.estimators.classification.scikitlearn import ScikitlearnSVC
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
@pytest.fixture
def splitter():
return StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
@pytest.fixture
def iris_dataset(splitter):
iris = datasets.load_iris()
design_matrix = pd.DataFrame(data=iris["data"], columns=iris["feature_names"])
labels = pd.Series(data=iris["target"])
scaler = StandardScaler().fit(design_matrix)
design_matrix = pd.DataFrame(data=scaler.transform(design_matrix), columns=design_matrix.columns)
clip_values = (design_matrix.min(), design_matrix.max())
[[train_idx, valid_idx]] = list(splitter.split(design_matrix, labels))
x_train = design_matrix.iloc[train_idx].copy()
x_valid = design_matrix.iloc[valid_idx].copy()
y_train = labels.iloc[train_idx].copy()
y_valid = labels.iloc[valid_idx].copy()
return (x_train, y_train, x_valid, y_valid), scaler, clip_values
@pytest.fixture
def breast_cancer_dataset(splitter):
cancer = datasets.load_breast_cancer()
design_matrix = pd.DataFrame(data=cancer["data"], columns=cancer["feature_names"])
labels = pd.Series(data=cancer["target"])
scaler = StandardScaler().fit(design_matrix)
design_matrix = pd.DataFrame(data=scaler.transform(design_matrix), columns=design_matrix.columns)
clip_values = (design_matrix.min(), design_matrix.max())
[[train_idx, valid_idx]] = list(splitter.split(design_matrix, labels))
x_train = design_matrix.iloc[train_idx].copy()
x_valid = design_matrix.iloc[valid_idx].copy()
y_train = labels.iloc[train_idx].copy()
y_valid = labels.iloc[valid_idx].copy()
return (x_train, y_train, x_valid, y_valid), scaler, clip_values
@pytest.fixture
def wine_dataset(splitter):
wine = datasets.load_wine()
design_matrix = pd.DataFrame(data=wine["data"], columns=wine["feature_names"])
labels = pd.Series(data=wine["target"])
scaler = StandardScaler().fit(design_matrix)
design_matrix = pd.DataFrame(data=scaler.transform(design_matrix), columns=design_matrix.columns)
clip_values = (design_matrix.min(), design_matrix.max())
[[train_idx, valid_idx]] = list(splitter.split(design_matrix, labels))
x_train = design_matrix.iloc[train_idx].copy()
x_valid = design_matrix.iloc[valid_idx].copy()
y_train = labels.iloc[train_idx].copy()
y_valid = labels.iloc[valid_idx].copy()
return (x_train, y_train, x_valid, y_valid), scaler, clip_values
class NeuralNetwork:
def __init__(self):
self.loss_fn = torch.nn.MSELoss(reduction="sum")
@staticmethod
def get_nn_model(input_dimensions, output_dimensions, hidden_neurons):
return torch.nn.Sequential(
nn.Linear(input_dimensions, hidden_neurons),
nn.ReLU(),
nn.Linear(hidden_neurons, output_dimensions),
nn.Softmax(dim=1),
)
def train_nn(self, nn_model, x, y, learning_rate, epochs):
optimizer = optim.SGD(nn_model.parameters(), lr=learning_rate)
for _ in range(epochs):
y_pred = nn_model.forward(x)
loss = self.loss_fn(y_pred, y)
nn_model.zero_grad()
loss.backward()
optimizer.step()
def test_general_iris_lr(iris_dataset):
"""
Check whether the produced adversaries are correct,
given Logistic Regression model and iris flower dataset.
"""
(x_train, y_train, x_valid, y_valid), _, clip_values = iris_dataset
# Setup classifier.
lr_clf = LogisticRegression(penalty="none")
lr_clf.fit(x_train, y_train)
clf_slr = ScikitlearnLogisticRegression(model=lr_clf, clip_values=clip_values)
lpf_slr = LowProFool(classifier=clf_slr, n_steps=25, eta=0.02, lambd=1.5)
lpf_slr.fit_importances(x_train, y_train)
sample = x_valid
# Draw targets different from the original labels and then save as one-hot encoded.
target = np.eye(3)[np.array(y_valid.apply(lambda x: np.random.choice([i for i in [0, 1, 2] if i != x])))]
adversaries = lpf_slr.generate(x=sample, y=target)
expected = np.argmax(target, axis=1)
predicted = np.argmax(lr_clf.predict_proba(adversaries), axis=1)
correct = expected == predicted
success_rate = np.sum(correct) / correct.shape[0]
expected = 0.75
logger.info(
"[Irises, Scikit-learn Logistic Regression] success rate of adversarial attack (expected >{:.2f}): "
"{:.2f}%".format(expected * 100, success_rate * 100)
)
assert success_rate > expected
def test_general_wines_lr(wine_dataset):
"""
Check whether the produced adversaries are correct,
given Logistic Regression classifier and sklearn wines dataset.
"""
(x_train, y_train, x_valid, y_valid), _, clip_values = wine_dataset
# Setup classifier
lr_clf = LogisticRegression(penalty="none")
lr_clf.fit(x_train, y_train)
clf_slr = ScikitlearnLogisticRegression(model=lr_clf, clip_values=clip_values)
lpf_slr = LowProFool(classifier=clf_slr, n_steps=80, eta=0.1, lambd=1.25)
lpf_slr.fit_importances(x_train, y_train)
sample = x_valid
# Draw targets different from original labels and then save as one-hot encoded.
target = np.eye(3)[np.array(y_valid.apply(lambda x: | np.random.choice([i for i in [0, 1, 2] if i != x]) | numpy.random.choice |
import numpy as np
import pandas as pd
import torch
from pathlib import Path
from PIL import Image
#from wilds.common.metrics.all_metrics import MultiTaskAccuracy
from wilds.datasets.wilds_dataset import WILDSDataset
class GWHDDataset(WILDSDataset):
"""
The GWHD-wilds wheat head localization dataset.
This is a modified version of the original Global Wheat Head Dataset.
This dataset is not part of the official WILDS benchmark.
We provide it for convenience and to reproduce observations discussed in the WILDS paper.
Supported `split_scheme`:
'official' for WILDS related tasks.
To reproduce the baseline, several splits are needed:
- to train a model on train domains and test against a all test split: 'train_in-dist'
- to train a model on a portion of a specific val or test domain and test it against the remaining portion:
"{domain}_in-dist" where domain is the id of a domain (usask_1, uq_1, utokyo_1, utokyo_2, nau_1)
no validation datasets are accessible for the baseline splits
Input (x):
1024x1024 RGB images of wheat field canopy between flowering and ripening.
Output (y):
y is a nx4-dimensional vector where each line represents a box coordinate (top-x,top-y,height,width)
Metadata:
Each image is annotated with the ID of the domain it came from (integer from 0 to 10).
Website:
http://www.global-wheat.com/
Original publication:
@article{david_global_2020,
title = {Global {Wheat} {Head} {Detection} ({GWHD}) {Dataset}: {A} {Large} and {Diverse} {Dataset} of {High}-{Resolution} {RGB}-{Labelled} {Images} to {Develop} and {Benchmark} {Wheat} {Head} {Detection} {Methods}},
volume = {2020},
url = {https://doi.org/10.34133/2020/3521852},
doi = {10.34133/2020/3521852},
journal = {Plant Phenomics},
author = {<NAME>, Pouria and <NAME> and <NAME> and <NAME> and Kirchgessner, Norbert and <NAME> and <NAME> and <NAME>. and <NAME> and <NAME>, Benoit and Hund, Andreas and Chapman, <NAME>. and <NAME> and <NAME> and <NAME>},
month = aug,
year = {2020},
note = {Publisher: AAAS},
pages = {3521852},
}
License:
This dataset is distributed under the MIT license.
https://github.com/snap-stanford/ogb/blob/master/LICENSE
"""
def __init__(self, root_dir='data', download=False, split_scheme='official'):
self._dataset_name = 'gwhd'
self._version = '1.0'
self._original_resolution = (1024, 1024)
self._download_url = 'https://worksheets.codalab.org/rest/bundles/0x42fa9775eacc453489a428abd59a437d/contents/blob/'
self._data_dir = self.initialize_data_dir(root_dir, download)
self.root = Path(self.data_dir)
self._split_scheme = split_scheme
# Get filenames
self._split_scheme = split_scheme
if split_scheme =="official":
train_data_df = pd.read_csv(self.root / f'{split_scheme}_train.csv')
val_data_df = pd.read_csv(self.root / f'{split_scheme}_val.csv')
test_data_df = pd.read_csv(self.root / f'{split_scheme}_test.csv')
elif split_scheme == "train_in-dist":
train_data_df = pd.read_csv(self.root / f'official_train.csv')
test_data_df = pd.read_csv(self.root / f'{split_scheme}_test.csv')
val_data_df = pd.DataFrame(columns=["image","labels","group"])
elif split_scheme in [f"{domain}_in-dist" for domain in ["nau_1", "utokyo_1", "utokyo_2", "usask_1" , "uq_1"]]:
train_data_df = pd.read_csv(self.root / f'{split_scheme}_train.csv')
test_data_df = pd.read_csv(self.root / f'{split_scheme}_test.csv')
val_data_df = pd.DataFrame(columns=["image","labels","group"])
elif split_scheme == "in-dist":
train_data_df = pd.read_csv(self.root / f'{split_scheme}_train.csv')
test_data_df = pd.read_csv(self.root / f'{split_scheme}_test.csv')
val_data_df = pd.DataFrame(columns=["image","labels","group"])
self._image_array = []
self._split_array, self._y_array, self._metadata_array = [], [], []
for i, df in enumerate([train_data_df, val_data_df, test_data_df]):
self._image_array.extend(list(df['image'].values))
labels = list(df['labels'].values)
self._split_array.extend([i] * len(labels))
labels = [{"boxes": torch.stack([ torch.tensor([int(i) for i in box.split(" ")]) for box in boxes.split(";")]) ,"labels": torch.tensor([1.]*len(list(boxes.split(";")))).long() } if type(boxes) != float else {"boxes":torch.empty(0,4),"labels":torch.empty(0,1,dtype=torch.long)} for boxes in labels]
self._y_array.extend(labels)
self._metadata_array.extend(list(df['group'].values))
self._y_size = 1
self._metadata_fields = ["domain"]
self._split_array = | np.array(self._split_array) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 11 14:45:29 2019
@author: txuslopez
"""
'''
This Script is a RUN function which uses the cellular automation defined in 'biosystem.py' to classify data from the popular Iris Flower dataset. Error between predicted results is then calculated and compared to other models.
'''
#import os
#os.system("%matplotlib inline")
from skmultiflow.trees.hoeffding_tree import HoeffdingTree
from skmultiflow.lazy.knn import KNN
from copy import deepcopy
from skmultiflow.drift_detection.adwin import ADWIN
#from collections import deque
from skmultiflow.drift_detection.eddm import EDDM
from skmultiflow.drift_detection.ddm import DDM
from skmultiflow.bayes import NaiveBayes
from skmultiflow.drift_detection.page_hinkley import PageHinkley
#from sklearn import preprocessing
from timeit import default_timer as timer
from sklearn.model_selection import RandomizedSearchCV,GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
#from skmultiflow.data import DataStream
#from pylatex import Document, LongTable, MultiColumn
from CA_VonNeumann_estimator import CA_VonNeumann_Classifier
from sklearn import preprocessing
#import matplotlib.animation as animat; animat.writers.list()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
import warnings
import pickle
import psutil
import sys
import traceback
import logging
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams.update({'font.size': 12})
#mpl.rcParams['lines.linewidth'] = 2.0
#style.use('seaborn-dark') #sets the size of the charts
#style.use('ggplot')
#==============================================================================
# CLASSES
#==============================================================================
#==============================================================================
# FUNCTIONS
#==============================================================================
def empty_mutant(b):
invB = np.flip(b, axis=0)
empty = [0]
for b in invB:
build = deepcopy(empty)
empty = []
for i in range(0,b):
empty.append(build)
return np.array(empty).tolist()
def empties(b):
invB = np.flip(b, axis=0)
empty = []
for b in invB:
build = deepcopy(empty)
empty = []
for i in range(0,b):
empty.append(build)
return np.array(empty).tolist()
def plot_CA_boundaries_allCAs(cellular_aut,ca_names,punto,num_automs,buch_X,buch_y,X_columns,y_columns,mutant_cs,mutants_t,mutants_d):#mutants_w
images=[]
for ca in range(num_automs):
dim=cellular_aut[ca].dimensions
# Create image arrays
img = deepcopy(empties(dim))
# Set variables to model results
cells = cellular_aut[ca].cells
for i in range(0, len(cells)):
for j in range(0, len(cells)):
if cells[i][j]:
s = cells[i][j][0].species
if int(s)==0:
rgb = [255, 157, 137]#254,232,138-99,194,131
else:
rgb = [255, 82, 115]#196,121,0-99,100,139
img[i][j] = rgb
else:
img[i][j] = [255,255,255]
# Convert image arrays to appropriate data types
rotated_img= np.rot90(img, 1)
img = np.array(rotated_img, dtype='uint8')
images.append(img)
# Show the results
# fig, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(14, 7))
fig = plt.figure(figsize=(30, 15))
# ax1 = fig.add_subplot(1,5,1, aspect=1.0)
buch_pd_X=pd.DataFrame(buch_X)
buch_pd_X.columns=X_columns
buch_pd_y=pd.DataFrame(buch_y)
buch_pd_y.columns=[y_columns]
todo=pd.concat([buch_pd_X,buch_pd_y],axis=1)
X1=todo[todo[y_columns]==0]
X2=todo[todo[y_columns]==1]
# X3=todo[todo['class']==2]
# Data Subplot
ax1 = fig.add_subplot(1,5,1,aspect=0.8)
# ax1.set_xlim([0.0,1.0])
# ax1.set_ylim([0.0,1.0])
ax1.set_xlabel('$x_1$',fontsize=22)
ax1.set_ylabel('$x_2$',fontsize=22)
ax1.title.set_text('Learned instances')
ax1.scatter(X1.iloc[:,0], X1.iloc[:,1], color='#ff9d89', marker='.',edgecolors='k',linewidths=0.0, s=200)#FEE88A-#63c283
ax1.scatter(X2.iloc[:,0], X2.iloc[:,1], color='#ff5273', marker='.',edgecolors='k',linewidths=0.0, s=200)#C47900-#63648b
if num_automs==1:
ax2_t = fig.add_subplot(1,5,2)
elif num_automs==2:
ax2_t = fig.add_subplot(1,5,2)
ax3_t = fig.add_subplot(1,5,3)
if num_automs==1:
ax2_t.set_xticks([], [])
ax2_t.set_yticks([], [])
ax2_t.title.set_text('CURIE 2x10')
ax2_t.imshow(images[0])
flipped_mutants_t=np.flip(mutants_t[0],0)
rotated_mutant_t= np.rot90(flipped_mutants_t, 2)
elif num_automs==2:
ax2_t.set_xticks([], [])
ax2_t.set_yticks([], [])
ax2_t.title.set_text('CURIE 2x10')
ax2_t.imshow(images[0])
flipped_mutants_t=np.flip(mutants_t[0],0)
rotated_mutant_t= np.rot90(flipped_mutants_t, 2)
for i in range(0, len(rotated_mutant_t)):
for j in range(0, len(rotated_mutant_t)):
ax2_t.text(i,j,rotated_mutant_t[i][j][0],ha='center',va='center')
ax3_t.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
ax3_t.title.set_text(ca_names[1]+': t last mut')
ax3_t.imshow(images[1])
flipped_mutants_t=np.flip(mutants_t[1],0)
rotated_mutant_t= np.rot90(flipped_mutants_t, 2)
for i in range(0, len(rotated_mutant_t)):
for j in range(0, len(rotated_mutant_t)):
ax3_t.text(i,j,rotated_mutant_t[i][j][0],ha='center',va='center')
fig.tight_layout()
plt.savefig('current_image_'+str(punto)+'.svg')
plt.show()
def prequential_acc(predicted_class,Y_tst,PREQ_ACCS,t,f):
#Prequential accuracy
pred=0
if predicted_class==Y_tst:
pred=1
else:
pred=0
if t==0:
preqAcc=1
else:
preqAcc=(PREQ_ACCS[-1]+float((pred-PREQ_ACCS[-1])/(t-f+1)))
return preqAcc
#def cellular_automatas_naming(cellular_automatas):
#
# ca_names=[str()]*len(cellular_automatas)
# for ca in range(len(cellular_automatas)):
# ca_names[ca]=r'\texttt{sCA}$'
#
# return ca_names
#def automatas_Texttable(cellular_automatas,automatas_results_mean,automatas_results_std,bd,ad,drift_position,measure_position_after_drift,t_automatas,title,names):
#
# bd_automatas_mean=[[]]*len(cellular_automatas)
# bd_automatas_std=[[]]*len(cellular_automatas)
# for h in range(len(cellular_automatas)):
# bd_automatas_mean[h]=np.round((automatas_results_mean[h][bd]),3)
# bd_automatas_std[h]=np.round((automatas_results_std[h][bd]),3)
#
# d_automatas_mean=[[]]*len(cellular_automatas)
# d_automatas_std=[[]]*len(cellular_automatas)
# for h in range(len(cellular_automatas)):
# d_automatas_mean[h]=np.round((automatas_results_mean[h][measure_position_after_drift]),3)
# d_automatas_std[h]=np.round((automatas_results_std[h][measure_position_after_drift]),3)
#
# ad_automatas_mean=[[]]*len(cellular_automatas)
# ad_automatas_std=[[]]*len(cellular_automatas)
# for h in range(len(cellular_automatas)):
# ad_automatas_mean[h]=np.round((automatas_results_mean[h][ad]),3)
# ad_automatas_std[h]=np.round((automatas_results_std[h][ad]),3)
#
# for h in range(len(cellular_automatas)):
# t_automatas.add_rows([['AUTOMATAS_'+title, 'Accuracy BD','Accuracy D','Accuracy AD'],[str(names[h]),str(bd_automatas_mean[h])+str('+-')+str(bd_automatas_std[h]),str(d_automatas_mean[h])+str('+-')+str(d_automatas_std[h]),str(ad_automatas_mean[h])+str('+-')+str(ad_automatas_std[h])]])
#
# print (t_automatas.draw())
def plot_automatas(size_X,size_Y,color,font_size,title,ca_name,no_scores,drift_pos,mean_scores):
fig, axes = plt.subplots(1,1,figsize=(size_X,size_Y))
plt.title(title,size=font_size)
axes.set_xlabel(r't',size=font_size)
axes.set_ylabel(r'Prequential accuracy',size=font_size)
plt.ylim(0.0,1.0)
axes.set_xlim(0,len(mean_scores))
axes.plot(mean_scores,color='b',label=ca_name,linestyle='-')
axes.axvspan(0, no_scores, alpha=0.5, color='#C47900')
for ps in range(len(drift_pos)):
axes.axvline(x=drift_pos[ps],color='k', linestyle='-')
plt.show()
#def plot_learners(size_X,size_Y,color,font_size,title,learner_name,no_scores,drift_pos,mean_scores,stds_scores):
#
# fig, axes = plt.subplots(1,1,figsize=(size_X,size_Y))
#
# plt.title(title,size=font_size)
# axes.set_xlabel(r't',size=font_size)
# axes.set_ylabel(r'Prequential accuracy',size=font_size)
# plt.ylim(0.0,1.0)
# axes.set_xlim(0,len(mean_scores))
#
# axes.plot(mean_scores,color='b',label=learner_name,linestyle='-')
# axes.fill_between(range(len(mean_scores)), mean_scores-stds_scores, mean_scores+stds_scores,facecolor='#C47900', alpha=0.1)
#
# axes.axvspan(0, no_scores, alpha=0.5, color='#C47900')
#
# for ps in range(len(drift_pos)):
# axes.axvline(x=drift_pos[ps],color='k', linestyle='-')
#
# plt.show()
def get_neighbourhood(matrix, coordinates, distance):
dimensions = len(coordinates)
neigh = []
app = neigh.append
def recc_von_neumann(arr, curr_dim=0, remaining_distance=distance, isCenter=True):
#the breaking statement of the recursion
if curr_dim == dimensions:
if not isCenter:
app(arr)
return
dimensions_coordinate = coordinates[curr_dim]
if not (0 <= dimensions_coordinate < len(arr)):
return
dimesion_span = range(dimensions_coordinate - remaining_distance,
dimensions_coordinate + remaining_distance + 1)
for c in dimesion_span:
if 0 <= c < len(arr):
recc_von_neumann(arr[c],
curr_dim + 1,
remaining_distance - abs(dimensions_coordinate - c),
isCenter and dimensions_coordinate == c)
return
recc_von_neumann(matrix)
return neigh
#def prequential_mut_calc(m,alpha,t,prev_fading_sum,prev_fading_increment):
#
# f_sum=m+(alpha*prev_fading_sum)
# f_increment=1+(alpha*prev_fading_increment)
# preq_mut=f_sum/f_increment
#
# return preq_mut
def hyperparametertuning_classifiers(learn,X,y,knn_max_w_size):
cl_name=learn.__class__.__name__
# print (cl_name)
scor='balanced_accuracy'
cv=10
if cl_name=='KNN':
KNN_grid = {'n_neighbors': [3,5,7,10,15],
'leaf_size': [3,5,7,10,15],
'algorithm':['kd_tree']
}
grid_cv_KNN = GridSearchCV(estimator=KNeighborsClassifier(), cv=cv,scoring=scor,param_grid=KNN_grid)
# grid_cv_KNN = RandomizedSearchCV(estimator=KNeighborsClassifier(), cv=cv,scoring=scor,param_distributions=KNN_grid)
grid_cv_KNN.fit(X.as_matrix(),y.as_matrix().ravel())
# print('grid_cv_KNN.best_params_: ',grid_cv_KNN.best_params_)
n_neighbors=grid_cv_KNN.best_params_['n_neighbors']
leaf_size=grid_cv_KNN.best_params_['leaf_size']
tuned_params = {'n_neighbors': n_neighbors,'leaf_size': leaf_size,'max_window_size':knn_max_w_size}
tuned_learn=KNN()
tuned_learn.set_params(**tuned_params)
tuned_learn.fit(X.as_matrix(), y.as_matrix().ravel())
elif cl_name=='HoeffdingTree':
grace_period_range=np.array([25,75,150,300])
tie_threshold_range=np.linspace(0.001,1.0,5)
split_confidence_range=np.linspace(0.000000001,0.1,5)
split_criterion_range=['gini','info_gain', 'hellinger']
leaf_prediction_range=['mc','nb', 'nba']
HT_grid = {
'grace_period': grace_period_range,
'tie_threshold': tie_threshold_range,
'split_confidence': split_confidence_range,
'split_criterion':split_criterion_range,
'leaf_prediction':leaf_prediction_range
}
grid_cv_HT=GridSearchCV(estimator=learn,scoring=scor,cv=cv,param_grid=HT_grid)
# grid_cv_HT=RandomizedSearchCV(estimator=learn,scoring=scor,cv=cv,param_distributions=HT_grid)
grid_cv_HT.fit(X.as_matrix(), y.as_matrix().ravel())
# print('grid_cv_HT.best_params_: ',grid_cv_HT.best_params_)
tuned_params=grid_cv_HT.best_params_
tuned_learn=grid_cv_HT.best_estimator_
elif cl_name=='NaiveBayes':
tuned_params = {'nominal_attributes': None}
tuned_learn=NaiveBayes()
tuned_learn.set_params(**tuned_params)
tuned_learn.fit(X.as_matrix(), y.as_matrix().ravel())
# print('Final tuned algorithm: ',tuned_learn)
return tuned_learn,tuned_params
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
#def genenerate_LatexTable():
# geometry_options = {
# "margin": "2.54cm",
# "includeheadfoot": True
# }
# doc = Document(page_numbers=True, geometry_options=geometry_options)
#
#
# # Generate data table
# with doc.create(LongTable("l l l")) as data_table:
#
# data_table.add_hline()
# data_table.add_row(["header 1", "header 2", "header 3"])
# data_table.add_hline()
# data_table.end_table_header()
# data_table.add_hline()
# data_table.add_row((MultiColumn(3, align='r',data='Continued on Next Page'),))
# data_table.add_hline()
# data_table.end_table_footer()
# data_table.add_hline()
# data_table.add_row((MultiColumn(3, align='r',data='Not Continued on Next Page'),))
# data_table.add_hline()
# data_table.end_table_last_footer()
#
# row = ["Content1", "9", "Longer String"]
# for i in range(3):
# data_table.add_row(row)
#
# doc.generate_pdf('ejemplo', clean_tex=False)
#
# doc.generate_pdf('synteticos', clean_tex=False)
#==============================================================================
# DATASETS REALES
#==============================================================================
#==============================================================================
# DATASETS SINTETICOS
#==============================================================================
#TXUS
#name_data='txus'
datasets=['sine','rt','mixed','sea','stagger']#['noaa']#['gmsc']#['poker']
tipos=['abrupto','gradual']#['real']
#noise=0.0
#==============================================================================
# VARIABLES
#==============================================================================
#CA
bins_margin=0.001
mutation_period=10#5,10,20,50
num_mutantneighs_fordetection=2#2-synthetics,4-real
preparatory_size=50#50. Para real=500
sliding_window_size=preparatory_size#50
radius=2#2
#row_ref=2
#column_ref=3
#==============================================================================
# MAIN
#==============================================================================
# Ignore warnings
warnings.simplefilter("ignore")
path_saving_results='//home//txuslopez//Insync//<EMAIL>//Google Drive//Dropbox//jlopezlobo//Publicaciones//ECML_2021//Results//F2//'
DAT_SCORES=[]
DAT_TIMES=[]
DAT_RAMS=[]
DAT_DETECTIONS=[]
for dats in datasets:
TIPO_SCORES=[]
TIPO_TIMES=[]
TIPO_RAMS=[]
TIPO_DETECTIONS=[]
for tipo in tipos:
if dats=='sine':
functions_order=[3,2,1,0]
functions_name_file=[3,2,1,0]
columns=['X1','X2','class']
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)
n_bins=20#divisiones por feature
elif dats=='rt':
functions_order=[2563,7896,9856,8873]
functions_name_file=[2563,7896,9856,8873]
columns=['X1','X2','class']
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)
n_bins=20#divisiones por feature
elif dats=='mixed':
functions_order=[1,0,1,0]
functions_name_file=[1,0,1,0]
columns=['X1','X2','X3','X4','class']
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)
n_bins=10#divisiones por feature
elif dats=='sea':
functions_order=[3,2,1,0]
functions_name_file=[3,2,1,0]
columns=['X1','X2','X3','class']
noise=0.2#0.0,0.2
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)+'_noise_'+str(noise)
n_bins=10#divisiones por feature
elif dats=='stagger':
functions_order=[2,1,0,2]
functions_name_file=[2,1,0,2]
columns=['X1','X2','X3','class']
n_bins=10#divisiones por feature
# elif dats=='noaa':
# columns=['X1','X2','X3','X4','X5','X6','X7','X8','class']
# n_bins=3#3#divisiones por feature
# elif dats=='gmsc':
# columns=['X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','class']
# n_bins=3#3#divisiones por feature
# elif dats=='poker':
# columns=['X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','class']
# n_bins=3#3#divisiones por feature
if tipo=='gradual':
drift_positions=[9500,20000,30500]
anch=1000
elif tipo=='abrupto':
drift_positions=[10000,20000,30000]
lengt_concept=9500
anch=1
# if dats=='noaa':
# path='/home/txuslopez/Insync/<EMAIL>/Google Drive/Dropbox/jlopezlobo/PY/multiflow_txus/scikit-multiflow-master/src/skmultiflow/data/datasets/weather.csv'
# raw_data= pd.read_csv(path, sep=',',header=0)
#
# x = raw_data.values
# min_max_scaler = preprocessing.MinMaxScaler()
# x_scaled = min_max_scaler.fit_transform(x)
# raw_data = pd.DataFrame(x_scaled)
#
# elif dats=='gmsc':
# path='/home/txuslopez/Insync/<EMAIL>/Google Drive/Dropbox/jlopezlobo/Data sets/Non stationary environments/GMSC/cs-training_Amazon_def.csv'
# raw_data = pd.read_csv(path, sep=',', header=0)
#
# raw_data = raw_data.drop('Unnamed: 0', 1)#Quitamos la primera columna
# raw_data=raw_data.dropna(how='any')#Se quitan las filas con Nan
# raw_data=raw_data[0:20000]#Limitar datos a 20k samples
# raw_data.columns=['RevolvingUtilizationOfUnsecuredLines', 'age',
# 'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome',
# 'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate',
# 'NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse',
# 'NumberOfDependents', 'class']
#
#
# x = raw_data.values
# min_max_scaler = preprocessing.MinMaxScaler()
# x_scaled = min_max_scaler.fit_transform(x)
# raw_data = pd.DataFrame(x_scaled)
#
# elif dats=='poker':
# path='/home/txuslopez/Insync/<EMAIL>/Google Drive/Dropbox/jlopezlobo/Data sets/Non stationary environments/Poker_hand/norm.csv'
# raw_data = pd.read_csv(path, sep=',', header=None)
# raw_data=raw_data.iloc[np.random.permutation(len(raw_data))]
# raw_data=raw_data.iloc[:20000]
#
# else:
path='//home//txuslopez//Insync//<EMAIL>//Google Drive//Dropbox//jlopezlobo//Publicaciones//ECML_2021//Data//F2//'
raw_data= pd.read_csv(path +file_name+'.csv', sep=',',header=True)
#print(path +file_name+'.csv')
if dats=='sine' or dats=='rt':
caso=raw_data[raw_data.columns[0:3]]
XT=caso.iloc[:,0:2]
YT=caso.iloc[:,2]
elif dats=='mixed':
caso=raw_data[raw_data.columns[0:5]]
XT=caso.iloc[:,0:4]
YT=caso.iloc[:,4]
elif dats=='sea':
caso=raw_data[raw_data.columns[0:4]]
XT=caso.iloc[:,0:3]
YT=caso.iloc[:,3]
elif dats=='stagger':
caso=raw_data[raw_data.columns[0:4]]
XT=caso.iloc[:,0:3]
YT=caso.iloc[:,3]
elif dats=='noaa':
caso=raw_data[raw_data.columns[0:9]]
XT=caso.iloc[:,0:8]
YT=caso.iloc[:,8]
elif dats=='gmsc':
caso=raw_data[raw_data.columns[0:11]]
XT=caso.iloc[:,0:10]
YT=caso.iloc[:,10]
elif dats=='poker':
caso=raw_data[raw_data.columns[0:11]]
XT=caso.iloc[:,0:10]
YT=caso.iloc[:,10]
caso.columns=columns
columns=columns[:-1]#le quitamos el class a partir de ahora
n_feats=len(columns)
#Data
features=pd.DataFrame(XT)
labels=pd.DataFrame(YT)
features.columns=columns
labels.columns=['class']
n_samples=XT.shape[0]-preparatory_size
######################## CURIE ###################
lst_dim=[n_bins]*n_feats
curie=CA_VonNeumann_Classifier(bins=[],bins_margin=bins_margin,dimensions=lst_dim, cells=empties(lst_dim))
limits_automata=list(np.zeros(1))
#ca_names=['CURIE']
mutants_time=empty_mutant(curie.dimensions)
######################## LEARNERS ###################
learners_ref=[HoeffdingTree(),KNN(),NaiveBayes()]
######################## DETECTORS ###################
detectores_ref=[DDM(),EDDM(),ADWIN(),PageHinkley(),curie]
n_pasos=len(datasets)*len(tipos)*len(learners_ref)*len(detectores_ref)
SCORES_LER=[]
TIMES_LER=[]
RAMS_LER=[]
DETECTIONS_LER=[]
for ler in range(len(learners_ref)):
learner=deepcopy(learners_ref[ler])
SCORES_DET=[]
TIMES_DET=[]
RAMS_DET=[]
DETECTIONS_DET=[]
for det in range(len(detectores_ref)):
scores_ler=[]
time_ler=0
ram_ler=0
f_ler=1
detections=[]
detector=deepcopy(detectores_ref[det])
for s in range(features.shape[0]):
sample=np.array(features.iloc[s,:]).reshape(1, -1)
lab=np.array(labels.iloc[s,:])
if s<preparatory_size-1:
scores_ler.append(np.nan)
# time_ler.append(np.nan)
# ram_ler.append(np.nan)
elif s==preparatory_size:
# print ('PREPARATORY PROCESS ...')
X_init=features.iloc[0:preparatory_size,:]
y_init=labels.iloc[0:preparatory_size,:]
#Hyperparameter tuning for learners
tuned_learner,tuned_params=hyperparametertuning_classifiers(learner,X_init,y_init,sliding_window_size)
learner=deepcopy(tuned_learner)
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
learner.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
#CURIE
if detector.__class__.__name__=='CA_VonNeumann_Classifier':
detector,lim_automat=detector.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
scores_ler.append(np.nan)
time_ler+=process_time
ram_ler+=process_ram
elif s>preparatory_size:
# print ('TEST-THEN-TRAIN PROCESS ...')
#Testing
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
pred=learner.predict(sample)
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
time_ler+=process_time
ram_ler+=process_ram
#Scoring
if str(scores_ler[-1])=='nan':
if pred==lab:
scores_ler.append(1.0)
else:
scores_ler.append(0.0)
else:
preqAcc=prequential_acc(pred,lab,scores_ler,s,f_ler)
scores_ler.append(preqAcc)
#Training
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
learner.partial_fit(sample,lab)
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
time_ler+=process_time
ram_ler+=process_ram
############
#DETECTION
############
change=False
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
if detector.__class__.__name__=='CA_VonNeumann_Classifier':
#Train
detector,lim_automat,muta,indxs=detector.partial_fit(sample,lab,s,lim_automat)
if muta:
if dats=='sine' or dats=='rt':
mutants_time[indxs[0]][indxs[1]][0]=s
elif dats=='mixed':
mutants_time[indxs[0]][indxs[1]][indxs[2]][indxs[3]][0]=s
elif dats=='sea' or dats=='stagger':
mutants_time[indxs[0]][indxs[1]][indxs[2]][0]=s
#Buscamos drift
vecinos_mutantes_drift=get_neighbourhood(mutants_time, indxs, radius)
num_mutantes_drift=0
ms=[]
for v in range(len(vecinos_mutantes_drift)):
if vecinos_mutantes_drift[v][0]>s-mutation_period and vecinos_mutantes_drift[v][0]<=s:
num_mutantes_drift+=1
ms.append(vecinos_mutantes_drift[v][0])
#Si hay drift:
if num_mutantes_drift>=num_mutantneighs_fordetection:
change=True
#Adaptacion
mutants_time=empty_mutant(detector.dimensions)
X_init=features.iloc[s-preparatory_size:s,:]
y_init=labels.iloc[s-preparatory_size:s,:]
detector=deepcopy(curie)
detector,lim_automat=detector.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
else:
if pred==lab:
detector.add_element(1)
else:
detector.add_element(0)
if detector.detected_change():
change=True
if change:
############
#ADAPTATION
############
f_ler=s
detections.append(s)
#Se reinicia el detector
detector=deepcopy(detectores_ref[det])
X_init=features.iloc[s-preparatory_size:s,:]
y_init=labels.iloc[s-preparatory_size:s,:]
learner=deepcopy(learners_ref[ler])
learner.set_params(**tuned_params)
learner.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
time_ler+=process_time
ram_ler+=process_ram
SCORES_DET.append(scores_ler)
TIMES_DET.append(time_ler)
RAMS_DET.append(ram_ler)
DETECTIONS_DET.append(detections)
SCORES_LER.append(SCORES_DET)
TIMES_LER.append(TIMES_DET)
RAMS_LER.append(RAMS_DET)
DETECTIONS_LER.append(DETECTIONS_DET)
TIPO_SCORES.append(SCORES_LER)
TIPO_TIMES.append(TIMES_LER)
TIPO_RAMS.append(RAMS_LER)
TIPO_DETECTIONS.append(DETECTIONS_LER)
DAT_SCORES.append(TIPO_SCORES)
DAT_TIMES.append(TIPO_TIMES)
DAT_RAMS.append(TIPO_RAMS)
DAT_DETECTIONS.append(TIPO_DETECTIONS)
######################## SAVING ########################
output = open(path_saving_results+'DAT_SCORES_'+dats+'.pkl', 'wb')
pickle.dump(DAT_SCORES, output)
output.close()
output = open(path_saving_results+'DAT_TIMES_'+dats+'.pkl', 'wb')
pickle.dump(DAT_TIMES, output)
output.close()
output = open(path_saving_results+'DAT_RAMS_'+dats+'.pkl', 'wb')
pickle.dump(DAT_RAMS, output)
output.close()
output = open(path_saving_results+'DAT_DETECTIONS_'+dats+'.pkl', 'wb')
pickle.dump(DAT_DETECTIONS, output)
output.close()
######################## RESUMEN ########################
for ds in range(len(datasets)):
print('######## DATASET: ',datasets[ds])
######################## LOADING RESULTS AND METRICS ########################
fil = open(path_saving_results+'DAT_SCORES_'+dats+'.pkl','rb')
DAT_SCORES = pickle.load(fil)
fil.close()
fil = open(path_saving_results+'DAT_TIMES_'+dats+'.pkl','rb')
DAT_TIMES = pickle.load(fil)
fil.close()
fil = open(path_saving_results+'DAT_RAMS_'+dats+'.pkl','rb')
DAT_RAMS = pickle.load(fil)
fil.close()
fil = open(path_saving_results+'DAT_DETECTIONS_'+dats+'.pkl','rb')
DAT_DETECTIONS = pickle.load(fil)
fil.close()
dat_score=DAT_SCORES[ds]
dat_time=DAT_TIMES[ds]
dat_ram=DAT_RAMS[ds]
dat_detections=DAT_DETECTIONS[ds]
for tip in range(len(tipos)):
print('###### TIPO: ',tipos[tip])
tipo_score=dat_score[tip]
tipo_times=dat_time[tip]
tipo_rams=dat_ram[tip]
tipo_detections=dat_detections[tip]
for l in range(len(learners_ref)):
print('#### LEARNER: ',learners_ref[l].__class__.__name__)
scores_ler=tipo_score[l]
times_ler=tipo_times[l]
rams_ler=tipo_rams[l]
detections_ler=tipo_detections[l]
for d in range(len(detectores_ref)):
print('## DETECTOR: ',detectores_ref[d].__class__.__name__)
scores_det=scores_ler[d]
times_det=times_ler[d]
rams_det=rams_ler[d]
detections_det=detections_ler[d]
print('')
print('-MEAN PREQ.ACC: ',np.nanmean(scores_det))
print('-TIME: ',np.nanmean(times_det))
print('-RAM: ',np.nanmean(rams_det))
print('-RAM-Hour: ',(np.nanmean(rams_det)/1073741824)*(np.nanmean(times_det)/360))#bytes/secs to gigas/hours)
print('-DETECTIONS: ',detections_det)
print('')
with open(path_saving_results+'temp.csv', mode='w') as results_synths:
results_synths_writer = csv.writer(results_synths, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
results_synths_writer.writerow(['Dataset','Type','Learner','Detector','pACC','RAM-Hours','TP','FP','TN','FN','UD','Precision','Recall','MCC'])
# try:
friedman_DDM_preqacc=[]
friedman_DDM_ramhours=[]
friedman_DDM_ud=[]
friedman_DDM_mcc=[]
friedman_EDDM_preqacc=[]
friedman_EDDM_ramhours=[]
friedman_EDDM_ud=[]
friedman_EDDM_mcc=[]
friedman_ADWIN_preqacc=[]
friedman_ADWIN_ramhours=[]
friedman_ADWIN_ud=[]
friedman_ADWIN_mcc=[]
friedman_PH_preqacc=[]
friedman_PH_ramhours=[]
friedman_PH_ud=[]
friedman_PH_mcc=[]
friedman_CURIE_preqacc=[]
friedman_CURIE_ramhours=[]
friedman_CURIE_ud=[]
friedman_CURIE_mcc=[]
for ds in range(len(datasets)):
dat_score=DAT_SCORES[ds]
dat_time=DAT_TIMES[ds]
dat_ram=DAT_RAMS[ds]
dat_detections=DAT_DETECTIONS[ds]
for tip in range(len(tipos)):
tipo_score=dat_score[tip]
tipo_times=dat_time[tip]
tipo_rams=dat_ram[tip]
tipo_detections=dat_detections[tip]
for l in range(len(learners_ref)):
scores_ler=tipo_score[l]
times_ler=tipo_times[l]
rams_ler=tipo_rams[l]
detections_ler=tipo_detections[l]
for det in range(len(detectores_ref)):
scores_det=scores_ler[det]
times_det=times_ler[det]
rams_det=rams_ler[det]
detections_det=detections_ler[det]
if tipos[tip]=='abrupto' or tipos[tip]=='gradual':
if tipos[tip]=='abrupto':
detection_margin=0.02
elif tipos[tip]=='gradual':
detection_margin=0.1
lear_tp=0
lear_fp=0
lear_tn=0
lear_fn=0
lear_mcc=0
lear_udd=0
cont_udd=0
for d in detections_det:
#Checking BEFORE drift 1
if d<drift_positions[0]:
lear_fp+=1
#Checking drift 1
elif d>drift_positions[0] and d<drift_positions[1] and d-drift_positions[0]<=detection_margin*lengt_concept:
lear_tp+=1
lear_udd+=(d-drift_positions[0])
cont_udd+=1
elif d>drift_positions[0] and d<drift_positions[1] and d-drift_positions[0]>detection_margin*lengt_concept:
lear_fp+=1
#Checking drift 2
elif d>drift_positions[1] and d<drift_positions[2] and d-drift_positions[1]<=detection_margin*lengt_concept:
lear_tp+=1
lear_udd+=(d-drift_positions[1])
cont_udd+=1
elif d>drift_positions[1] and d<drift_positions[2] and d-drift_positions[1]>detection_margin*lengt_concept:
lear_fp+=1
#Checking drift 3
elif d>drift_positions[2] and d-drift_positions[2]<=detection_margin*lengt_concept:
lear_tp+=1
lear_udd+=(d-drift_positions[2])
cont_udd+=1
elif d>drift_positions[2] and d-drift_positions[2]>detection_margin*lengt_concept:
lear_fp+=1
lear_tn=n_samples-len(detections_det)
lear_fn=len(drift_positions)-lear_tp
if lear_fn<0:
lear_fn=0
if cont_udd>0:
lear_udd=np.round(lear_udd/cont_udd,2)
else:
lear_udd=np.inf
if (lear_tp+lear_fp)==0:
lear_precision=0.0
else:
lear_precision=lear_tp/(lear_tp+lear_fp)
if (lear_tp+lear_fn)==0:
lear_recall=0.0
else:
lear_recall=lear_tp/(lear_tp+lear_fn)
if np.sqrt((lear_tp+lear_fp)*(lear_tp+lear_fn)*(lear_tn+lear_fp)*(lear_tn+lear_fn))==0:
lear_mcc=0.0
else:
lear_mcc=((lear_tp*lear_tn)-(lear_fp*lear_fn))/np.sqrt((lear_tp+lear_fp)*(lear_tp+lear_fn)*(lear_tn+lear_fp)*(lear_tn+lear_fn))
lear_ram_hours=(rams_det/1073741824)*(times_det/360)#bytes/secs to gigas/hours
results_synths_writer.writerow([datasets[ds],tipos[tip],learners_ref[l].__class__.__name__,detectores_ref[det].__class__.__name__,np.round(np.nanmean(scores_det),2),np.round(lear_ram_hours,6),lear_tp,lear_fp,lear_tn,lear_fn,np.round(lear_udd,2),np.round(lear_precision,2),np.round(lear_recall,2),np.round(lear_mcc,2)])
###### FRIEDMAN
if detectores_ref[det].__class__.__name__=='DDM':
friedman_DDM_preqacc.append(np.round(np.nanmean(scores_det),2))
friedman_DDM_ramhours.append(np.round(lear_ram_hours,6))
friedman_DDM_ud.append(np.round(lear_udd,2))
friedman_DDM_mcc.append(np.round(lear_mcc,2))
elif detectores_ref[det].__class__.__name__=='EDDM':
friedman_EDDM_preqacc.append(np.round(np.nanmean(scores_det),2))
friedman_EDDM_ramhours.append(np.round(lear_ram_hours,6))
friedman_EDDM_ud.append(np.round(lear_udd,2))
friedman_EDDM_mcc.append(np.round(lear_mcc,2))
elif detectores_ref[det].__class__.__name__=='ADWIN':
friedman_ADWIN_preqacc.append(np.round( | np.nanmean(scores_det) | numpy.nanmean |
import numpy as np
from skimage import draw
import gym
import pygame
class EnvWrapper(gym.Env):
def __init__(self, env, debug, args):
self.wrapped_env = env
self.metadata = env.metadata
self.made_screen = False
self.debug = debug
self.scaling = args.render_scaling
self.args = args
if self.args.episode_limit > 0:
try:
self.wrapped_env.unwrapped.update_limit(self.args.episode_limit)
except AttributeError:
print("Cant update the episode limit for this env")
def step(self, a):
return self.wrapped_env.step(a)
def reset(self):
return self.wrapped_env.reset()
def render(self, mode="human", close=False, debug_info={}):
return self.debug_render(mode=mode, close=close, debug_info=debug_info)
def visits_and_frontier_overlayed(self, log=False):
try:
return self.wrapped_env.unwrapped.visits_and_frontier_states()
except AttributeError:
if log:
print("No visits and frontier vis here")
def xp_and_frontier_overlayed(self, log=False):
try:
return self.wrapped_env.unwrapped.xp_and_frontier_states()
except AttributeError:
if log:
print("No xp and frontier vis here")
def bonus_xp_and_frontier_overlayed(self, log=False):
try:
return self.wrapped_env.unwrapped.bonus_xp_and_frontier_states()
except AttributeError:
if log:
print("No bonus_xp and frontier vis here")
def trained_on_states(self, states, log=False):
try:
return self.wrapped_env.unwrapped.trained_on_states(states, self.args)
except AttributeError:
if log:
print("No trained on states vis for this environment")
def xp_replay_states(self, player_positions, log=False, bonus_replay=False):
try:
return self.wrapped_env.unwrapped.xp_replay_states(
player_positions, self.args, bonus_replay=bonus_replay
)
except AttributeError:
if log:
print("No xp replay states for this environment")
def visitations(self, player_positions, log=False):
try:
return self.wrapped_env.unwrapped.player_visits(player_positions, self.args)
except AttributeError:
if log:
print("No visitations for this environment")
def frontier(self, exp_model, max_bonus, log=False):
try:
return self.wrapped_env.unwrapped.frontier(exp_model, self.args, max_bonus)
except AttributeError:
if log:
print("No frontier for this environment")
def explorations(self, player_positions, exploration_bonuses, max_bonus, log=False):
try:
return self.wrapped_env.unwrapped.bonus_landscape(
player_positions, exploration_bonuses, max_bonus, self.args
)
except AttributeError:
if log:
print("No bonus landscape for this environment")
def log_visitation(self):
try:
return self.wrapped_env.unwrapped.log_player_pos()
except AttributeError:
pass
def state_to_player_pos(self, state):
try:
return self.wrapped_env.unwrapped.state_to_player_pos(state)
except AttributeError:
pass
def state_to_image(self, state):
try:
return self.wrapped_env.unwrapped.state_to_image(state)
except AttributeError:
return self.debug_render(mode="rgb_array")
def count_state_action_space(self, count_model):
try:
return self.wrapped_env.unwrapped.count_state_action_space(count_model, self.args)
except AttributeError:
pass
def q_value_estimates(self, count_model, nn):
try:
return self.wrapped_env.unwrapped.q_value_estimates(count_model, nn, self.args)
except AttributeError:
pass
def state_counts(self):
try:
return self.wrapped_env.unwrapped.counts
except Exception:
pass
def debug_render(self, debug_info={}, mode="human", close=False):
if self.debug:
if mode == "human":
if close:
pygame.quit()
return
if "human" in self.wrapped_env.metadata["render.modes"]:
self.wrapped_env.render(mode="human")
rgb_array = self.debug_render(debug_info, mode="rgb_array")
if not self.made_screen:
pygame.init()
screen_size = (
rgb_array.shape[1] * self.scaling,
rgb_array.shape[0] * self.scaling,
)
screen = pygame.display.set_mode(screen_size)
self.screen = screen
self.made_screen = True
self.screen.fill((0, 0, 0))
for x in range(rgb_array.shape[0]):
for y in range(rgb_array.shape[1]):
if not np.all(rgb_array[x, y, :] == (0, 0, 0)):
pygame.draw.rect(
self.screen,
rgb_array[x, y],
(
y * self.scaling,
x * self.scaling,
self.scaling,
self.scaling,
),
)
pygame.display.update()
elif mode == "rgb_array":
env_image = self.wrapped_env.render(mode="rgb_array")
env_image = np.swapaxes(env_image, 0, 1)
image_x = env_image.shape[0]
image_y = env_image.shape[1] + 1
if "CTS_State" in debug_info:
image_x += 5 + debug_info["CTS_State"].shape[0]
if "Q_Values" in debug_info:
image_y += 50
image = np.zeros(shape=(image_x, image_y, 3))
image[: env_image.shape[0], : env_image.shape[1], :] = env_image
if "Action_Bonus" in debug_info:
# Add the action bonus stuff to the q values
debug_info["Q_Values"] = (
debug_info["Q_Values"] + debug_info["Action_Bonus"]
)
# Draw the Q-Values
if "Q_Values" in debug_info:
q_vals_image = self.draw_q_values(
debug_info, env_image.shape[0] - 1, 48
)
image[
1 : env_image.shape[0],
env_image.shape[1] + 2 : env_image.shape[1] + 50,
:,
] = q_vals_image
# Draw the Pseudo-Count stuff
if "CTS_State" in debug_info:
count_image = self.draw_count(
debug_info,
5 - 1 + debug_info["CTS_State"].shape[0],
image_y - 1,
)
image[env_image.shape[0] + 1 :, :-1, :] = count_image
image = np.swapaxes(image, 0, 1)
return image
else:
return self.wrapped_env.render(mode, close)
def draw_q_values(self, info, width, height):
image = np.zeros((width, height, 3))
red = (255, 0, 0)
yellow = (255, 255, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
purple = (255, 0, 255)
orange = (255, 165, 0)
q_values = info["Q_Values"]
max_q_value = info["Max_Q_Value"]
min_q_value = info["Min_Q_Value"]
chosen_action = info["Action"]
epsilon = info["Epsilon"]
forced_action = -1
if "Forced_Action" in info:
forced_action = info["Forced_Action"]
# Hack
if max_q_value == min_q_value:
q_val_sizes = [(height - 4) for _ in q_values]
else:
q_val_sizes = [
int((q_val - min_q_value) / (max_q_value - min_q_value) * (height - 4))
+ 4
for q_val in q_values
]
greedy_action = np.argmax(q_values)
actions = len(q_values)
bar_width = int(width / actions)
# Draw the Q-Values
for i, q_size in enumerate(q_val_sizes):
if i == forced_action:
q_color = red
elif i == greedy_action:
q_color = yellow
elif i == chosen_action:
q_color = green
else:
q_color = blue
rect_coords = [
(i * bar_width, 4),
(i * bar_width, q_size),
((i + 1) * bar_width, q_size),
((i + 1) * bar_width, 4),
]
rect_row = [r[0] for r in rect_coords]
rect_col = [r[1] for r in rect_coords]
rect_array_coords = draw.polygon(rect_row, rect_col)
draw.set_color(image, rect_array_coords, q_color)
# Draw the action bonus stuff if it is there
if "Action_Bonus" in info:
q_val_bonuses = info["Action_Bonus"]
q_val_bonus_sizes = [
int(
(q_bonus - min_q_value) / (max_q_value - min_q_value) * (height - 4)
)
+ 4
for q_bonus in q_val_bonuses
]
for i, q_size in enumerate(q_val_bonus_sizes):
q_color = orange
rect_coords = [
(i * bar_width, 4),
(i * bar_width, q_size),
((i + 1) * bar_width, q_size),
((i + 1) * bar_width, 4),
]
rect_row = [r[0] for r in rect_coords]
rect_col = [r[1] for r in rect_coords]
rect_array_coords = draw.polygon(rect_row, rect_col)
draw.set_color(image, rect_array_coords, q_color)
# Epsilon
bar_width = int(width * epsilon)
bonus_rect_coords = [(0, 0), (0, 3), (bar_width, 3), (bar_width, 0)]
rect_row = [r[0] for r in bonus_rect_coords]
rect_col = [r[1] for r in bonus_rect_coords]
rect_array_coords = draw.polygon(rect_row, rect_col)
draw.set_color(image, rect_array_coords, purple)
return np.fliplr(image)
def draw_count(self, info, width, height):
image = np.zeros((width, height, 3))
red = (255, 0, 0)
bonus = info["Bonus"]
max_bonus = info["Max_Bonus"]
cts_image = info["CTS_State"]
cts_pg = info["Pixel_PG"]
if max_bonus == 0:
max_bonus = 0.000001
# Bar
bar_height = int(height * bonus / max_bonus)
bonus_rect_coords = [(0, 0), (0, bar_height), (3, bar_height), (3, 0)]
rect_row = [r[0] for r in bonus_rect_coords]
rect_col = [r[1] for r in bonus_rect_coords]
rect_array_coords = draw.polygon(rect_row, rect_col)
draw.set_color(image, rect_array_coords, red)
# PG per pixel
cts_gray = np.concatenate([cts_image for _ in range(3)], axis=2)
cts_pg_image = np.empty_like(cts_gray)
for x in range(cts_image.shape[0]):
for y in range(cts_image.shape[1]):
pg = cts_pg[x, y]
if pg < 0:
# Blue
pg = max(-1, pg)
cts_pg_image[x, y, :] = (0, 0, int(-pg * 255))
else:
# Red
pg = min(pg, 1)
cts_pg_image[x, y, :] = (int(pg * 255), 0, 0)
cts_alpha = np.stack([np.abs(np.clip(cts_pg, -1, 1)) for _ in range(3)], axis=2)
cts_colour_image = cts_alpha * cts_pg_image + (1 - cts_alpha) * cts_gray
image[4:, -cts_image.shape[1] :, :] = np.fliplr(
np.swapaxes(cts_colour_image, 0, 1)
)
return | np.fliplr(image) | numpy.fliplr |
import bisect
import numpy as np
import albumentations
from PIL import Image
from torch.utils.data import Dataset, ConcatDataset
class ConcatDatasetWithIndex(ConcatDataset):
"""Modified from original pytorch code to return dataset idx"""
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx], dataset_idx
class ImagePaths(Dataset):
def __init__(self, paths, size=None, random_crop=False, labels=None, watermark=None):
self.size = size
self.random_crop = random_crop
self.labels = dict() if labels is None else labels
self.labels["file_path_"] = paths
self._length = len(paths)
self.watermark=watermark
if self.size is not None and self.size > 0:
# self.rescaler = albumentations.SmallestMaxSize(max_size = self.size + self.size//16)
if not self.random_crop:
self.cropper = albumentations.CenterCrop(height=self.size,width=self.size)
else:
self.cropper = albumentations.RandomCrop(height=self.size,width=self.size)
self.flipper = albumentations.HorizontalFlip()
self.preprocessor = albumentations.Compose([self.cropper, self.flipper])
# self.preprocessor = albumentations.Compose([self.rescaler, self.cropper, self.flipper])
else:
self.preprocessor = lambda **kwargs: kwargs
def __len__(self):
return self._length
def preprocess_image(self, image_path):
image = Image.open(image_path)
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
image = self.preprocessor(image=image)["image"]
image = np.array(image).astype(np.float32)
if self.watermark is not None:
alpha = Image.open(self.watermark)
if not alpha.mode == "RGB":
alpha = alpha.convert("RGB")
alpha = np.array(alpha, dtype=np.float32)
alpha = 1.0 * alpha / 255.0
gray = np.full(image.shape, 128, dtype=np.float32)
image = image * (1.0 - alpha) + gray * alpha
image = np.round(np.clip(image, 0, 255))
image = (image/127.5 - 1.0)
return image
def __getitem__(self, i):
example = dict()
example["image"] = self.preprocess_image(self.labels["file_path_"][i])
for k in self.labels:
example[k] = self.labels[k][i]
return example
class NumpyPaths(ImagePaths):
def preprocess_image(self, image_path):
image = | np.load(image_path) | numpy.load |
# This script does the replication of [B&M 2011] component of the demoforestation paper
# Import required modules
import numpy as np
import pandas as pd
import statsmodels.api as stats
from matplotlib import pyplot as plt
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/demoforestation.csv')
# (1) Replicating Figure 1
# Structuring dataframes
Yf1 = data['Rate_9000']
Xf11 = stats.add_constant(data['Democracy(20)_90'])
Xf12 = stats.add_constant(data[['Democracy(20)_90', 'Democracy(20)_90_2']])
f1m1 = stats.OLS(Yf1,Xf11)
f1m2 = stats.OLS(Yf1,Xf12)
f1r1 = f1m1.fit()
print(f1r1.summary())
file = open('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1_model_1.txt', 'w')
file.write(f1r1.summary().as_text())
file.close()
f1r2 = f1m2.fit()
print(f1r2.summary())
file = open('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1_model_2.txt', 'w')
file.write(f1r2.summary().as_text())
file.close()
# Recreating the plot
plt.figure()
plt.scatter(data['Democracy(20)_90'], data['Rate_9000'], s = 40)
plt.xlabel('Democracy Index')
plt.ylabel('Deforestation Rate')
plt.ylim(-15.5,10.5)
plt.xlim(-10.5,10.5)
basis = [i/10 for i in range(-120,121)]
l1 = [0.0741 + 0.0041*(i/10) for i in range(-120,121)]
l2 = [0.9628 + 0.0480*(i/10) - 0.0220*(i/10)**2 for i in range(-120,121)]
plt.plot(basis, l1, 'k-', linewidth = 4)
plt.plot(basis, l2, 'r-', linewidth = 4)
plt.savefig('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1.eps')
# (2) Replicating the 7 regression models
df1 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2']].dropna()
df2 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'CCI_90']].dropna()
df3 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land']].dropna()
df4 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90']].dropna()
df5 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']].dropna()
df6 = data[['Rate_9000', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']].dropna()
df7 = data[['Rate_9000', 'GDP_cap_90', 'GDP_cap_90_2']].dropna()
X1 = stats.add_constant(df1[['Democracy(20)_90', 'Democracy(20)_90_2']])
X2 = stats.add_constant(df2[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'CCI_90']])
X3 = stats.add_constant(df3[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land']])
X4 = stats.add_constant(df4[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90']])
X5 = stats.add_constant(df5[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']])
X6 = stats.add_constant(df6[['Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']])
X7 = stats.add_constant(df7[['GDP_cap_90', 'GDP_cap_90_2']])
mod1 = stats.OLS(df1['Rate_9000'],X1)
mod2 = stats.OLS(df2['Rate_9000'],X2)
mod3 = stats.OLS(df3['Rate_9000'],X3)
mod4 = stats.OLS(df4['Rate_9000'],X4)
mod5 = stats.OLS(df5['Rate_9000'],X5)
mod6 = stats.OLS(df6['Rate_9000'],X6)
mod7 = stats.OLS(df7['Rate_9000'],X7)
mods = [mod1, mod2, mod3, mod4, mod5, mod6, mod7]
res_list = []
for mod in mods:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Demoforestation/Replication/Model_' + str(mods.index(mod)+1) + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Demoforestation/Replication/restab_replication.txt')
# (3) Replicating the cluster analyses
# Recreate the statistics in Table (3) in the original paper
# Record group level statistics
Type6 = pd.DataFrame(np.zeros((2,6)), columns = data.Type6.unique(), index = ['Democracy', 'Rate_9000'])
Type3 = pd.DataFrame( | np.zeros((2,3)) | numpy.zeros |
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.graph import GraphTripleConvNet, _init_weights, make_mlp
import numpy as np
class Sg2ScVAEModel(nn.Module):
"""
VAE-based network for scene generation and manipulation from a scene graph.
It has a shared embedding of shape and bounding box latents.
"""
def __init__(self, vocab,
embedding_dim=128,
batch_size=32,
train_3d=True,
decoder_cat=False,
Nangle=24,
gconv_pooling='avg',
gconv_num_layers=5,
gconv_num_shared_layer=3,
distribution_before=True,
mlp_normalization='none',
vec_noise_dim=0,
use_AE=False,
with_changes=True,
replace_latent=True,
use_angles=False,
num_box_params=6,
residual=False):
super(Sg2ScVAEModel, self).__init__()
self.replace_latent = replace_latent
self.with_changes = with_changes
self.dist_before = distribution_before
self.use_angles = use_angles
gconv_dim = embedding_dim
gconv_hidden_dim = gconv_dim * 4
box_embedding_dim = int(embedding_dim)
shape_embedding_dim = int(embedding_dim)
if use_angles:
angle_embedding_dim = int(embedding_dim / 4)
box_embedding_dim = int(embedding_dim * 3 / 4)
Nangle = 24
obj_embedding_dim = embedding_dim
self.batch_size = batch_size
self.embedding_dim = embedding_dim
self.train_3d = train_3d
self.decoder_cat = decoder_cat
self.vocab = vocab
self.vec_noise_dim = vec_noise_dim
self.use_AE = use_AE
num_objs = len(vocab['object_idx_to_name'])
num_preds = len(vocab['pred_idx_to_name'])
# build network components for encoder and decoder
self.obj_embeddings_ec_box = nn.Embedding(num_objs + 1, obj_embedding_dim)
self.obj_embeddings_ec_shape = nn.Embedding(num_objs + 1, obj_embedding_dim)
self.pred_embeddings_ec_box = nn.Embedding(num_preds, 2 * embedding_dim)
self.pred_embeddings_ec_shape = nn.Embedding(num_preds, 2 * embedding_dim)
self.obj_embeddings_dc_box = nn.Embedding(num_objs + 1, 2*obj_embedding_dim)
self.obj_embeddings_dc_man = nn.Embedding(num_objs + 1, 2*obj_embedding_dim)
self.obj_embeddings_dc_shape = nn.Embedding(num_objs + 1, 2*obj_embedding_dim)
self.pred_embeddings_dc_box = nn.Embedding(num_preds, 4*embedding_dim)
self.pred_embeddings_dc_shape = nn.Embedding(num_preds, 4*embedding_dim)
if self.decoder_cat:
self.pred_embeddings_dc = nn.Embedding(num_preds, embedding_dim * 2)
self.pred_embeddings_man_dc = nn.Embedding(num_preds, embedding_dim * 6)
if self.train_3d:
self.box_embeddings = nn.Linear(num_box_params, box_embedding_dim)
self.shape_embeddings = nn.Linear(128, shape_embedding_dim)
else:
self.box_embeddings = nn.Linear(4, box_embedding_dim)
if self.use_angles:
self.angle_embeddings = nn.Embedding(Nangle, angle_embedding_dim)
# weight sharing of mean and var
self.box_mean_var = make_mlp([embedding_dim * 2, gconv_hidden_dim, embedding_dim * 2],
batch_norm=mlp_normalization)
self.box_mean = make_mlp([embedding_dim * 2, box_embedding_dim], batch_norm=mlp_normalization, norelu=True)
self.box_var = make_mlp([embedding_dim * 2, box_embedding_dim], batch_norm=mlp_normalization, norelu=True)
self.shape_mean_var = make_mlp([embedding_dim * 2, gconv_hidden_dim, embedding_dim * 2],
batch_norm=mlp_normalization)
self.shape_mean = make_mlp([embedding_dim * 2, shape_embedding_dim], batch_norm=mlp_normalization, norelu=True)
self.shape_var = make_mlp([embedding_dim * 2, shape_embedding_dim], batch_norm=mlp_normalization, norelu=True)
if self.use_angles:
self.angle_mean_var = make_mlp([embedding_dim * 2, gconv_hidden_dim, embedding_dim * 2],
batch_norm=mlp_normalization)
self.angle_mean = make_mlp([embedding_dim * 2, angle_embedding_dim], batch_norm=mlp_normalization, norelu=True)
self.angle_var = make_mlp([embedding_dim * 2, angle_embedding_dim], batch_norm=mlp_normalization, norelu=True) # graph conv net
self.gconv_net_ec = None
self.gconv_net_dc = None
gconv_kwargs_ec = {
'input_dim_obj': gconv_dim * 2,
'input_dim_pred': gconv_dim * 2,
'hidden_dim': gconv_hidden_dim,
'pooling': gconv_pooling,
'num_layers': gconv_num_layers,
'mlp_normalization': mlp_normalization,
'residual': residual
}
gconv_kwargs_dc = {
'input_dim_obj': gconv_dim * 2,
'input_dim_pred': gconv_dim * 2,
'hidden_dim': gconv_hidden_dim,
'pooling': gconv_pooling,
'num_layers': gconv_num_layers,
'mlp_normalization': mlp_normalization,
'residual': residual
}
gconv_kwargs_shared = {
'input_dim_obj': gconv_hidden_dim,
'input_dim_pred': gconv_hidden_dim,
'hidden_dim': gconv_hidden_dim,
'pooling': gconv_pooling,
'num_layers': gconv_num_shared_layer,
'mlp_normalization': mlp_normalization,
'residual': residual
}
if self.with_changes:
gconv_kwargs_manipulation = {
'input_dim_obj': embedding_dim * 6,
'input_dim_pred': embedding_dim * 6,
'hidden_dim': gconv_hidden_dim * 2,
'output_dim': embedding_dim * 2,
'pooling': gconv_pooling,
'num_layers': gconv_num_layers,
'mlp_normalization': mlp_normalization,
'residual': residual
}
if self.decoder_cat:
gconv_kwargs_dc['input_dim_obj'] = gconv_dim * 4
gconv_kwargs_dc['input_dim_pred'] = gconv_dim * 4
if not self.dist_before:
gconv_kwargs_shared['input_dim_obj'] = gconv_hidden_dim * 2
gconv_kwargs_shared['input_dim_pred'] = gconv_hidden_dim * 2
self.gconv_net_ec_box = GraphTripleConvNet(**gconv_kwargs_ec)
self.gconv_net_ec_shape = GraphTripleConvNet(**gconv_kwargs_ec)
self.gconv_net_dec_box = GraphTripleConvNet(**gconv_kwargs_dc)
self.gconv_net_dec_shape = GraphTripleConvNet(**gconv_kwargs_dc)
self.gconv_net_shared = GraphTripleConvNet(**gconv_kwargs_shared)
if self.with_changes:
self.gconv_net_manipulation = GraphTripleConvNet(**gconv_kwargs_manipulation)
# box prediction net
if self.train_3d:
box_net_dim = num_box_params
else:
box_net_dim = 4
box_net_layers = [gconv_dim * 4, gconv_hidden_dim, box_net_dim]
self.box_net = make_mlp(box_net_layers, batch_norm=mlp_normalization, norelu=True)
if self.use_angles:
# angle prediction net
angle_net_layers = [gconv_dim * 4, gconv_hidden_dim, Nangle]
self.angle_net = make_mlp(angle_net_layers, batch_norm=mlp_normalization, norelu=True)
shape_net_layers = [gconv_dim * 4, gconv_hidden_dim, 128]
self.shape_net = make_mlp(shape_net_layers, batch_norm=mlp_normalization, norelu=True)
# initialization
self.box_embeddings.apply(_init_weights)
self.box_mean_var.apply(_init_weights)
self.box_mean.apply(_init_weights)
self.box_var.apply(_init_weights)
if self.use_angles:
self.angle_mean_var.apply(_init_weights)
self.angle_mean.apply(_init_weights)
self.angle_var.apply(_init_weights)
self.shape_mean_var.apply(_init_weights)
self.shape_mean.apply(_init_weights)
self.shape_var.apply(_init_weights)
self.shape_net.apply(_init_weights)
self.box_net.apply(_init_weights)
def encoder(self, objs, triples, boxes_gt, shapes_gt, attributes, angles_gt=None):
O, T = objs.size(0), triples.size(0)
s, p, o = triples.chunk(3, dim=1) # All have shape (T, 1)
s, p, o = [x.squeeze(1) for x in [s, p, o]] # Now have shape (T,)
edges = torch.stack([s, o], dim=1) # Shape is (T, 2)
obj_vecs_box = self.obj_embeddings_ec_box(objs)
obj_vecs_shape = self.obj_embeddings_ec_shape(objs)
shape_vecs = self.shape_embeddings(shapes_gt)
pred_vecs_box = self.pred_embeddings_ec_box(p)
pred_vecs_shape = self.pred_embeddings_ec_shape(p)
boxes_vecs = self.box_embeddings(boxes_gt)
if self.use_angles:
angle_vecs = self.angle_embeddings(angles_gt)
obj_vecs_box = torch.cat([obj_vecs_box, boxes_vecs, angle_vecs], dim=1)
else:
obj_vecs_box = torch.cat([obj_vecs_box, boxes_vecs], dim=1)
obj_vecs_shape = torch.cat([obj_vecs_shape, shape_vecs], dim=1)
if self.gconv_net_ec_box is not None:
obj_vecs_box, pred_vecs_box = self.gconv_net_ec_box(obj_vecs_box, pred_vecs_box, edges)
obj_vecs_shape, pred_vecs_shapes = self.gconv_net_ec_shape(obj_vecs_shape, pred_vecs_shape, edges)
if self.dist_before:
obj_vecs_shared = torch.cat([obj_vecs_box, obj_vecs_shape], dim=1)
pred_vecs_shared = torch.cat([pred_vecs_box, pred_vecs_shapes], dim=1)
obj_vecs_shared, pred_vecs_shared =self.gconv_net_shared(obj_vecs_shared, pred_vecs_shared, edges)
obj_vecs_box, obj_vecs_shape = obj_vecs_shared[:, :obj_vecs_box.shape[1]], obj_vecs_shared[:, obj_vecs_box.shape[1]:]
obj_vecs_box_norot = self.box_mean_var(obj_vecs_box)
mu_box = self.box_mean(obj_vecs_box_norot)
logvar_box = self.box_var(obj_vecs_box_norot)
if self.use_angles:
obj_vecs_angle = self.angle_mean_var(obj_vecs_box)
mu_angle = self.angle_mean(obj_vecs_angle)
logvar_angle = self.angle_var(obj_vecs_angle)
mu_box = torch.cat([mu_box, mu_angle], dim=1)
logvar_box = torch.cat([logvar_box, logvar_angle], dim=1)
obj_vecs_shape = self.shape_mean_var(obj_vecs_shape)
mu_shape = self.shape_mean(obj_vecs_shape)
logvar_shape = self.shape_var(obj_vecs_shape)
else:
obj_vecs_box_norot = self.box_mean_var(obj_vecs_box)
mu_box = self.box_mean(obj_vecs_box_norot)
logvar_box = self.box_var(obj_vecs_box_norot)
if self.use_angles:
obj_vecs_angle = self.angle_mean_var(obj_vecs_box)
mu_angle = self.angle_mean(obj_vecs_angle)
logvar_angle = self.angle_var(obj_vecs_angle)
mu_box = torch.cat([mu_box, mu_angle], dim=1)
logvar_box = torch.cat([logvar_box, logvar_angle], dim=1)
obj_vecs_shape = self.shape_mean_var(obj_vecs_shape)
mu_shape = self.shape_mean(obj_vecs_shape)
logvar_shape = self.shape_var(obj_vecs_shape)
mu = torch.cat([mu_box, mu_shape], dim=1)
logvar = torch.cat([logvar_box, logvar_shape], dim=1)
return mu, logvar
def manipulate(self, z, objs, triples, attributes):
s, p, o = triples.chunk(3, dim=1) # All have shape (T, 1)
s, p, o = [x.squeeze(1) for x in [s, p, o]] # Now have shape (T,)
edges = torch.stack([s, o], dim=1) # Shape is (T, 2)
obj_vecs = self.obj_embeddings_dc_man(objs)
pred_vecs = self.pred_embeddings_man_dc(p)
man_z = torch.cat([z, obj_vecs], dim=1)
man_z, _ = self.gconv_net_manipulation(man_z, pred_vecs, edges)
return man_z
def decoder(self, z, objs, triples, attributes, manipulate=False):
s, p, o = triples.chunk(3, dim=1) # All have shape (T, 1)
s, p, o = [x.squeeze(1) for x in [s, p, o]] # Now have shape (T,)
edges = torch.stack([s, o], dim=1) # Shape is (T, 2)
obj_vecs_box = self.obj_embeddings_dc_box(objs)
obj_vecs_shape = self.obj_embeddings_dc_shape(objs)
pred_vecs_box = self.pred_embeddings_dc_box(p)
pred_vecs_shape = self.pred_embeddings_dc_shape(p)
if not self.dist_before:
obj_vecs_box = torch.cat([obj_vecs_box, z], dim=1)
obj_vecs_shape = torch.cat([obj_vecs_shape, z], dim=1)
obj_vecs_shared = torch.cat([obj_vecs_box, obj_vecs_shape], dim=1)
pred_vecs_shared = torch.cat([pred_vecs_box, pred_vecs_shape], dim=1)
obj_vecs_shared, pred_vecs_shared = self.gconv_net_shared(obj_vecs_shared, pred_vecs_shared, edges)
obj_vecs_box, obj_vecs_shape = obj_vecs_shared[:, :obj_vecs_box.shape[1]], obj_vecs_shared[:, obj_vecs_box.shape[1]:]
pred_vecs_box, pred_vecs_shape = pred_vecs_shared[:, :pred_vecs_box.shape[1]], pred_vecs_shared[:, pred_vecs_box.shape[1]:]
if self.decoder_cat:
if self.dist_before:
obj_vecs_box = torch.cat([obj_vecs_box, z], dim=1)
obj_vecs_shape = torch.cat([obj_vecs_shape, z], dim=1)
obj_vecs_box, pred_vecs_box = self.gconv_net_dec_box(obj_vecs_box, pred_vecs_box, edges)
obj_vecs_shape, pred_vecs_shape = self.gconv_net_dec_shape(obj_vecs_shape, pred_vecs_shape, edges)
else:
raise NotImplementedError
boxes_pred = self.box_net(obj_vecs_box)
shapes_pred = self.shape_net(obj_vecs_shape)
if self.use_angles:
angles_pred = F.log_softmax(self.angle_net(obj_vecs_box), dim=1)
return boxes_pred, angles_pred, shapes_pred
else:
return boxes_pred, shapes_pred
def decoder_with_changes(self, z, dec_objs, dec_triples, attributes, missing_nodes, manipulated_nodes):
# append zero nodes
nodes_added = []
for i in range(len(missing_nodes)):
ad_id = missing_nodes[i] + i
nodes_added.append(ad_id)
noise = np.zeros(self.embedding_dim* 2) # np.random.normal(0, 1, 64)
zeros = torch.from_numpy(noise.reshape(1, self.embedding_dim* 2))
zeros.requires_grad = True
zeros = zeros.float().cuda()
z = torch.cat([z[:ad_id], zeros, z[ad_id:]], dim=0)
# mark changes in nodes
change_repr = []
for i in range(len(z)):
if i not in nodes_added and i not in manipulated_nodes:
noisechange = np.zeros(self.embedding_dim* 2)
else:
noisechange = np.random.normal(0, 1, self.embedding_dim* 2)
change_repr.append(torch.from_numpy(noisechange).float().cuda())
change_repr = torch.stack(change_repr, dim=0)
z_prime = torch.cat([z, change_repr], dim=1)
z_prime = self.manipulate(z_prime, dec_objs, dec_triples, attributes)
# take original nodes when untouched
if self.replace_latent:
# take original nodes when untouched
touched_nodes = torch.tensor(sorted(nodes_added + manipulated_nodes)).long()
for touched_node in touched_nodes:
z = torch.cat([z[:touched_node], z_prime[touched_node:touched_node + 1], z[touched_node + 1:]], dim=0)
else:
z = z_prime
dec_man_enc_boxes_pred = self.decoder(z, dec_objs, dec_triples, attributes)
if self.use_angles:
dec_man_enc_boxes_pred, dec_man_enc_shapes_pred = dec_man_enc_boxes_pred[:-1], dec_man_enc_boxes_pred[-1]
num_dec_objs = len(dec_man_enc_boxes_pred[0])
else:
num_dec_objs = len(dec_man_enc_boxes_pred)
keep = []
for i in range(num_dec_objs):
if i not in nodes_added and i not in manipulated_nodes:
keep.append(1)
else:
keep.append(0)
keep = torch.from_numpy(np.asarray(keep).reshape(-1, 1)).float().cuda()
return dec_man_enc_boxes_pred, dec_man_enc_shapes_pred, keep
def decoder_with_additions(self, z, objs, triples, attributes, missing_nodes, manipulated_nodes, distribution=None):
nodes_added = []
if distribution is not None:
mu, cov = distribution
for i in range(len(missing_nodes)):
ad_id = missing_nodes[i] + i
nodes_added.append(ad_id)
noise = np.zeros(z.shape[1])
if distribution is not None:
zeros = torch.from_numpy( | np.random.multivariate_normal(mu, cov, 1) | numpy.random.multivariate_normal |
import batoid
import numpy as np
from test_helpers import timer, do_pickle, all_obj_diff
@timer
def test_ObscCircle():
rng = np.random.default_rng(5)
size = 10_000
for i in range(100):
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
r = rng.uniform(0.5, 1.5)
obsc = batoid.ObscCircle(r, cx, cy)
for i in range(100):
x = rng.normal(0.0, 1.0)
y = rng.normal(0.0, 1.0)
assert obsc.contains(x, y) == (np.hypot(x-cx, y-cy) <= r)
x = rng.normal(0.0, 1.0, size=size)
y = rng.normal(0.0, 1.0, size=size)
np.testing.assert_array_equal(
obsc.contains(x, y),
np.hypot(x-cx, y-cy) <= r
)
do_pickle(obsc)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(obsc, rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
# Check method syntax too
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
obsc.obscure(rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
@timer
def test_ObscAnnulus():
rng = | np.random.default_rng(57) | numpy.random.default_rng |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 8 13:28:03 2021
@author: pmazumdar
"""
##############################################
## Smooth 13CO map to CHIMPS Resolution ##
##############################################
import radio_beam
from spectral_cube import SpectralCube
from astropy import units as u
from astropy.table import Table
from astrodendro import Dendrogram, ppv_catalog, structure
from astropy import wcs
from astropy.table import Table
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from astrodendro.pruning import all_true, min_vchan, min_delta, min_area
from astropy import constants as const
import aplpy
import seaborn as sns
import scipy.stats as sst
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
cube = SpectralCube.read('/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/G305_13CO_resample.fits')
cube_cd = SpectralCube.read('/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/ntotal.fits')
beam = radio_beam.Beam(major=27.4*u.arcsec, minor=27.4*u.arcsec, pa=0*u.deg)
new_cube = cube.convolve_to(beam)
new_cube_cd = cube_cd.convolve_to(beam)
hdu_13CO = fits.open('/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/G305_13CO-moment0.fits')[0]
mask_nan = ~np.isnan(hdu_13CO.data) # only include non nan pixels
masked_cube = new_cube.with_mask(mask_nan) # apply mask to spectral cube
masked_cube_cd = new_cube_cd.with_mask(mask_nan) # apply mask to spectral cube
data = masked_cube.hdu.data
hd = masked_cube.hdu.header
wc = wcs.WCS(hd)
data_cd = masked_cube_cd.hdu.data
hd_cd = masked_cube_cd.hdu.header
wc_cd = wcs.WCS(hd_cd)
## Custom Definitions for the Dendrogram ##
rms = 0.15 # rms noise
rms_cd = 1.6e7
cd_min = 3.37e11
bmaj = hd['bmaj'] # beam_major
bmin = hd['bmin'] # beam_minor
cdelt1 = hd['cdelt1'] # delta_x
cdelt2 = hd['cdelt2'] # delta_y
deltav_kms = abs(hd['CDELT3']/1000.) # vel res in kmps
ppb = abs((bmaj*bmin)/(cdelt1*cdelt2)*2*np.pi/(8*np.log(2))) # pixel_per_beam
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% Creating the Dendrogram
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Prune leaves below a given height:
#def custom_independent(structure, index=None, value=None):
# peak_index, peak_value = structure.get_peak()
# return peak_value > 5
is_independent = all_true((min_delta(5*rms), min_area(1*ppb), min_vchan(6)))
#is_independent_cd = all_true((min_delta(3*cd_min), min_area(1*ppb), min_vchan(2)))
d = Dendrogram.compute(data, min_value=5*rms, wcs=wc, is_independent = is_independent, verbose=1)
d.save_to('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_13CO_smoothed_dendro.fits')
#d_cd = Dendrogram.compute(data_cd, min_value=5*cd_min, wcs=wc_cd, is_independent = is_independent_cd, verbose=1)
#d_cd.save_to('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_column_densty_smoothed_dendro.fits')
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% START HERE IF DENDROGRAM ALREADY RUN
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#d = Dendrogram.load_from('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_13CO_dendro.fits')
#d_cd = Dendrogram.load_from('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_column_densty_dendro.fits')
leaf_id=np.array([])
for s in d.all_structures:
if s.is_leaf:
leaf_id=np.append(leaf_id,s.idx)
print(leaf_id)
leaf_id.sort()
leaf_cd_id=np.array([])
#for s in d_cd.all_structures:
# if s.is_leaf:
# leaf_cd_id=np.append(leaf_cd_id,s.idx)
#print(leaf_cd_id)
#leaf_cd_id.sort()
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% Viewing and Plotting the Dendrogram
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
p = d.plotter()
#p_cd = d_cd.plotter()
#fig,ax = plt.subplots(nrows = 2, ncols=1)
#plt.rcParams.update({"font.size":6})
#ax[0] = fig.add_subplot(1, 1, 1)
#ax[0].set_ylabel('$^{13}$CO Peak Intensity')
#p.plot_tree(ax[0],color='seagreen',lw=0.5)
#p_cd.plot_tree(ax[1],color='orange',lw=0.5)
#ax[1].set_yscale('log')
#ax[1].set_xlabel('Index of Structure')
#ax[1].set_ylabel('$^{13}$CO Column Density')
#plt.savefig("/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/plots/Dendrogram_G305.eps",dpi=300)
#plt.close()
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% Creating the ppv_catalog
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
metadata = {}
metadata['data_unit'] = u.Jy
metadata['beam_major'] = (bmaj * u.deg .to(u.arcsecond))*u.arcsecond # FWHM
metadata['beam_minor'] = (bmin * u.deg .to(u.arcsecond))*u.arcsecond # FWHM
metadata['velocity_scale'] = 0.5 * u.km/u.s # v_res
cat = ppv_catalog(d,metadata)
#cat_cd = ppv_catalog(d_cd,metadata)
# Note : Catalog of Column Density : Flux per pixel represents Col. Density per delX.delY.delV ##
#
dists = 3800*u.parsec # Distance of the source.
dist_gc = 6598.5452942296305*u.parsec
x2 = 18 # x2 = X_CO / 2E20 [cm^-2 / K km s^-1] , where X_CO is a CO-H2 conversion factor.
#x2 = 1 # for 12CO
sigma_v = np.array(cat['v_rms'])
sigma_x = np.array(cat['radius'])*(((1*u.arcsecond).to(u.rad)).value)*dists.value
#sigma_v_cd = np.array(cat_cd['v_rms'])
#sigma_x_cd = np.array(cat_cd['radius'])*(((1*u.arcsecond).to(u.rad)).value)*dists.value
eta = 1.91 # conversion factor. R = eta * sigma_r
G = 4.302e-3 # units of pc (M_sun)^-1 (km/s)^2
deltax_pc = abs(np.pi/180.*hd['CDELT1']*dists.value) # delta x in pc
deltay_pc = abs(np.pi/180.*hd['CDELT2']*dists.value) # delta y in pc
sigma_majs = cat['major_sigma']
sigma_mins = cat['minor_sigma']
#sigma_majs_cd = cat_cd['major_sigma']
#sigma_mins_cd = cat_cd['minor_sigma']
R12_13 = (6.21*(dist_gc.value/1000.0))+18.71 # C12/C13 abundance ratio (Milam et al. 2005)
R12 = 8.5e-5 # C12/H2 abundance (Frerking et al.)
R13_inv = R12_13/R12
mu = 2.72 # Average H2 mass including He fraction
mp = 8.4089382e-58*u.solMass # Proton Mass
nu_12CO = 345.79598990*u.GHz
nu_13CO = 330.58796530*u.GHz
delta_nu_12 = 0.0011534512649414282*u.GHz
delta_nu_13 = 0.0011027227552869259*u.GHz
##
## Additions to Integrated Intensity Catalog
##
## adding a radius column to the catalog
cat['radius_pc'] = np.zeros(len(cat),dtype=float)
cat['radius_pc'] = eta* | np.sqrt((sigma_majs*deltax_pc)*(sigma_mins*deltay_pc)) | numpy.sqrt |
from yt.frontends.open_pmd.data_structures import \
OpenPMDDataset
from yt.testing import \
assert_almost_equal, \
assert_equal, \
assert_array_equal, \
requires_file
from yt.utilities.answer_testing.framework import \
data_dir_load
from yt.convenience import load
import numpy as np
twoD = "example-2d/hdf5/data00000100.h5"
threeD = "example-3d/hdf5/data00000100.h5"
noFields = "no_fields/data00000400.h5"
noParticles = "no_particles/data00000400.h5"
groupBased = "singleParticle/simData.h5"
@requires_file(threeD)
def test_3d_out():
ds = data_dir_load(threeD)
field_list = [('all', 'particle_charge'),
('all', 'particle_mass'),
('all', 'particle_momentum_x'),
('all', 'particle_momentum_y'),
('all', 'particle_momentum_z'),
('all', 'particle_positionCoarse_x'),
('all', 'particle_positionCoarse_y'),
('all', 'particle_positionCoarse_z'),
('all', 'particle_positionOffset_x'),
('all', 'particle_positionOffset_y'),
('all', 'particle_positionOffset_z'),
('all', 'particle_weighting'),
('io', 'particle_charge'),
('io', 'particle_mass'),
('io', 'particle_momentum_x'),
('io', 'particle_momentum_y'),
('io', 'particle_momentum_z'),
('io', 'particle_positionCoarse_x'),
('io', 'particle_positionCoarse_y'),
('io', 'particle_positionCoarse_z'),
('io', 'particle_positionOffset_x'),
('io', 'particle_positionOffset_y'),
('io', 'particle_positionOffset_z'),
('io', 'particle_weighting'),
('openPMD', 'E_x'),
('openPMD', 'E_y'),
('openPMD', 'E_z'),
('openPMD', 'rho')]
domain_dimensions = [26, 26, 201] * np.ones_like(ds.domain_dimensions)
domain_width = [2.08e-05, 2.08e-05, 2.01e-05] * | np.ones_like(ds.domain_left_edge) | numpy.ones_like |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
import random
import os.path as osp
import numpy as np
import cv2
from PIL import Image, ImageEnhance
from .imgaug_support import execute_imgaug
from .ops import *
from .box_utils import *
class DetTransform:
"""检测数据处理基类
"""
def __init__(self):
pass
class Compose(DetTransform):
"""根据数据预处理/增强列表对输入数据进行操作。
所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
Args:
transforms (list): 数据预处理/增强列表。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 数据长度不匹配。
"""
def __init__(self, transforms):
if not isinstance(transforms, list):
raise TypeError('The transforms must be a list!')
if len(transforms) < 1:
raise ValueError('The length of transforms ' + \
'must be equal or larger than 1!')
self.transforms = transforms
self.use_mixup = False
for t in self.transforms:
if type(t).__name__ == 'MixupImage':
self.use_mixup = True
# 检查transforms里面的操作,目前支持PaddleX定义的或者是imgaug操作
for op in self.transforms:
if not isinstance(op, DetTransform):
import imgaug.augmenters as iaa
if not isinstance(op, iaa.Augmenter):
raise Exception(
"Elements in transforms should be defined in 'paddlex.det.transforms' or class of imgaug.augmenters.Augmenter, see docs here: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/"
)
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (str/np.ndarray): 图像路径/图像np.ndarray数据。
im_info (dict): 存储与图像相关的信息,dict中的字段如下:
- im_id (np.ndarray): 图像序列号,形状为(1,)。
- image_shape (np.ndarray): 图像原始大小,形状为(2,),
image_shape[0]为高,image_shape[1]为宽。
- mixup (list): list为[im, im_info, label_info],分别对应
与当前图像进行mixup的图像np.ndarray数据、图像相关信息、标注框相关信息;
注意,当前epoch若无需进行mixup,则无该字段。
label_info (dict): 存储与标注框相关的信息,dict中的字段如下:
- gt_bbox (np.ndarray): 真实标注框坐标[x1, y1, x2, y2],形状为(n, 4),
其中n代表真实标注框的个数。
- gt_class (np.ndarray): 每个真实标注框对应的类别序号,形状为(n, 1),
其中n代表真实标注框的个数。
- gt_score (np.ndarray): 每个真实标注框对应的混合得分,形状为(n, 1),
其中n代表真实标注框的个数。
- gt_poly (list): 每个真实标注框内的多边形分割区域,每个分割区域由点的x、y坐标组成,
长度为n,其中n代表真实标注框的个数。
- is_crowd (np.ndarray): 每个真实标注框中是否是一组对象,形状为(n, 1),
其中n代表真实标注框的个数。
- difficult (np.ndarray): 每个真实标注框中的对象是否为难识别对象,形状为(n, 1),
其中n代表真实标注框的个数。
Returns:
tuple: 根据网络所需字段所组成的tuple;
字段由transforms中的最后一个数据预处理操作决定。
"""
def decode_image(im_file, im_info, label_info):
if im_info is None:
im_info = dict()
if isinstance(im_file, np.ndarray):
if len(im_file.shape) != 3:
raise Exception(
"im should be 3-dimensions, but now is {}-dimensions".
format(len(im_file.shape)))
im = im_file
else:
try:
im = cv2.imread(im_file).astype('float32')
except:
raise TypeError(
'Can\'t read The image file {}!'.format(im_file))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# make default im_info with [h, w, 1]
im_info['im_resize_info'] = np.array(
[im.shape[0], im.shape[1], 1.], dtype=np.float32)
im_info['image_shape'] = np.array([im.shape[0],
im.shape[1]]).astype('int32')
if not self.use_mixup:
if 'mixup' in im_info:
del im_info['mixup']
# decode mixup image
if 'mixup' in im_info:
im_info['mixup'] = \
decode_image(im_info['mixup'][0],
im_info['mixup'][1],
im_info['mixup'][2])
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
outputs = decode_image(im, im_info, label_info)
im = outputs[0]
im_info = outputs[1]
if len(outputs) == 3:
label_info = outputs[2]
for op in self.transforms:
if im is None:
return None
if isinstance(op, DetTransform):
outputs = op(im, im_info, label_info)
im = outputs[0]
else:
if label_info is not None:
gt_poly = label_info.get('gt_poly', None)
gt_bbox = label_info['gt_bbox']
if gt_poly is None:
im, aug_bbox = execute_imgaug(op, im, bboxes=gt_bbox)
else:
im, aug_bbox, aug_poly = execute_imgaug(
op, im, bboxes=gt_bbox, polygons=gt_poly)
label_info['gt_poly'] = aug_poly
label_info['gt_bbox'] = aug_bbox
outputs = (im, im_info, label_info)
else:
im, = execute_imgaug(op, im)
outputs = (im, im_info)
return outputs
class ResizeByShort(DetTransform):
"""根据图像的短边调整图像大小(resize)。
1. 获取图像的长边和短边长度。
2. 根据短边与short_size的比例,计算长边的目标长度,
此时高、宽的resize比例为short_size/原图短边长度。
3. 如果max_size>0,调整resize比例:
如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
4. 根据调整大小的比例对图像进行resize。
Args:
target_size (int): 短边目标长度。默认为800。
max_size (int): 长边目标长度的最大限制。默认为1333。
Raises:
TypeError: 形参数据类型不满足需求。
"""
def __init__(self, short_size=800, max_size=1333):
self.max_size = int(max_size)
if not isinstance(short_size, int):
raise TypeError(
"Type of short_size is invalid. Must be Integer, now is {}".
format(type(short_size)))
self.short_size = short_size
if not (isinstance(self.max_size, int)):
raise TypeError("max_size: input type is invalid.")
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (numnp.ndarraypy): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
其中,im_info更新字段为:
- im_resize_info (np.ndarray): resize后的图像高、resize后的图像宽、resize后的图像相对原始图的缩放比例
三者组成的np.ndarray,形状为(3,)。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 数据长度不匹配。
"""
if im_info is None:
im_info = dict()
if not isinstance(im, np.ndarray):
raise TypeError("ResizeByShort: image type is not numpy.")
if len(im.shape) != 3:
raise ValueError('ResizeByShort: image is not 3-dimensional.')
im_short_size = min(im.shape[0], im.shape[1])
im_long_size = max(im.shape[0], im.shape[1])
scale = float(self.short_size) / im_short_size
if self.max_size > 0 and np.round(
scale * im_long_size) > self.max_size:
scale = float(self.max_size) / float(im_long_size)
resized_width = int(round(im.shape[1] * scale))
resized_height = int(round(im.shape[0] * scale))
im_resize_info = [resized_height, resized_width, scale]
im = cv2.resize(
im, (resized_width, resized_height),
interpolation=cv2.INTER_LINEAR)
im_info['im_resize_info'] = np.array(im_resize_info).astype(np.float32)
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
class Padding(DetTransform):
"""1.将图像的长和宽padding至coarsest_stride的倍数。如输入图像为[300, 640],
`coarest_stride`为32,则由于300不为32的倍数,因此在图像最右和最下使用0值
进行padding,最终输出图像为[320, 640]。
2.或者,将图像的长和宽padding到target_size指定的shape,如输入的图像为[300,640],
a. `target_size` = 960,在图像最右和最下使用0值进行padding,最终输出
图像为[960, 960]。
b. `target_size` = [640, 960],在图像最右和最下使用0值进行padding,最终
输出图像为[640, 960]。
1. 如果coarsest_stride为1,target_size为None则直接返回。
2. 获取图像的高H、宽W。
3. 计算填充后图像的高H_new、宽W_new。
4. 构建大小为(H_new, W_new, 3)像素值为0的np.ndarray,
并将原图的np.ndarray粘贴于左上角。
Args:
coarsest_stride (int): 填充后的图像长、宽为该参数的倍数,默认为1。
target_size (int|list|tuple): 填充后的图像长、宽,默认为None,coarset_stride优先级更高。
Raises:
TypeError: 形参`target_size`数据类型不满足需求。
ValueError: 形参`target_size`为(list|tuple)时,长度不满足需求。
"""
def __init__(self, coarsest_stride=1, target_size=None):
self.coarsest_stride = coarsest_stride
if target_size is not None:
if not isinstance(target_size, int):
if not isinstance(target_size, tuple) and not isinstance(
target_size, list):
raise TypeError(
"Padding: Type of target_size must in (int|list|tuple)."
)
elif len(target_size) != 2:
raise ValueError(
"Padding: Length of target_size must equal 2.")
self.target_size = target_size
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (numnp.ndarraypy): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 数据长度不匹配。
ValueError: coarsest_stride,target_size需有且只有一个被指定。
ValueError: target_size小于原图的大小。
"""
if im_info is None:
im_info = dict()
if not isinstance(im, np.ndarray):
raise TypeError("Padding: image type is not numpy.")
if len(im.shape) != 3:
raise ValueError('Padding: image is not 3-dimensional.')
im_h, im_w, im_c = im.shape[:]
if isinstance(self.target_size, int):
padding_im_h = self.target_size
padding_im_w = self.target_size
elif isinstance(self.target_size, list) or isinstance(
self.target_size, tuple):
padding_im_w = self.target_size[0]
padding_im_h = self.target_size[1]
elif self.coarsest_stride > 0:
padding_im_h = int(
np.ceil(im_h / self.coarsest_stride) * self.coarsest_stride)
padding_im_w = int(
np.ceil(im_w / self.coarsest_stride) * self.coarsest_stride)
else:
raise ValueError(
"coarsest_stridei(>1) or target_size(list|int) need setting in Padding transform"
)
pad_height = padding_im_h - im_h
pad_width = padding_im_w - im_w
if pad_height < 0 or pad_width < 0:
raise ValueError(
'the size of image should be less than target_size, but the size of image ({}, {}), is larger than target_size ({}, {})'
.format(im_w, im_h, padding_im_w, padding_im_h))
padding_im = np.zeros((padding_im_h, padding_im_w, im_c),
dtype=np.float32)
padding_im[:im_h, :im_w, :] = im
if label_info is None:
return (padding_im, im_info)
else:
return (padding_im, im_info, label_info)
class Resize(DetTransform):
"""调整图像大小(resize)。
- 当目标大小(target_size)类型为int时,根据插值方式,
将图像resize为[target_size, target_size]。
- 当目标大小(target_size)类型为list或tuple时,根据插值方式,
将图像resize为target_size。
注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
Args:
target_size (int/list/tuple): 短边目标长度。默认为608。
interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"LINEAR"。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
'AREA', 'LANCZOS4', 'RANDOM']中。
"""
# The interpolation mode
interp_dict = {
'NEAREST': cv2.INTER_NEAREST,
'LINEAR': cv2.INTER_LINEAR,
'CUBIC': cv2.INTER_CUBIC,
'AREA': cv2.INTER_AREA,
'LANCZOS4': cv2.INTER_LANCZOS4
}
def __init__(self, target_size=608, interp='LINEAR'):
self.interp = interp
if not (interp == "RANDOM" or interp in self.interp_dict):
raise ValueError("interp should be one of {}".format(
self.interp_dict.keys()))
if isinstance(target_size, list) or isinstance(target_size, tuple):
if len(target_size) != 2:
raise TypeError(
'when target is list or tuple, it should include 2 elements, but it is {}'
.format(target_size))
elif not isinstance(target_size, int):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
.format(type(target_size)))
self.target_size = target_size
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 数据长度不匹配。
"""
if im_info is None:
im_info = dict()
if not isinstance(im, np.ndarray):
raise TypeError("Resize: image type is not numpy.")
if len(im.shape) != 3:
raise ValueError('Resize: image is not 3-dimensional.')
if self.interp == "RANDOM":
interp = random.choice(list(self.interp_dict.keys()))
else:
interp = self.interp
im = resize(im, self.target_size, self.interp_dict[interp])
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
class RandomHorizontalFlip(DetTransform):
"""随机翻转图像、标注框、分割信息,模型训练时的数据增强操作。
1. 随机采样一个0-1之间的小数,当小数小于水平翻转概率时,
执行2-4步操作,否则直接返回。
2. 水平翻转图像。
3. 计算翻转后的真实标注框的坐标,更新label_info中的gt_bbox信息。
4. 计算翻转后的真实分割区域的坐标,更新label_info中的gt_poly信息。
Args:
prob (float): 随机水平翻转的概率。默认为0.5。
Raises:
TypeError: 形参数据类型不满足需求。
"""
def __init__(self, prob=0.5):
self.prob = prob
if not isinstance(self.prob, float):
raise TypeError("RandomHorizontalFlip: input type is invalid.")
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
其中,im_info更新字段为:
- gt_bbox (np.ndarray): 水平翻转后的标注框坐标[x1, y1, x2, y2],形状为(n, 4),
其中n代表真实标注框的个数。
- gt_poly (list): 水平翻转后的多边形分割区域的x、y坐标,长度为n,
其中n代表真实标注框的个数。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 数据长度不匹配。
"""
if not isinstance(im, np.ndarray):
raise TypeError(
"RandomHorizontalFlip: image is not a numpy array.")
if len(im.shape) != 3:
raise ValueError(
"RandomHorizontalFlip: image is not 3-dimensional.")
if im_info is None or label_info is None:
raise TypeError(
'Cannot do RandomHorizontalFlip! ' +
'Becasuse the im_info and label_info can not be None!')
if 'gt_bbox' not in label_info:
raise TypeError('Cannot do RandomHorizontalFlip! ' + \
'Becasuse gt_bbox is not in label_info!')
image_shape = im_info['image_shape']
gt_bbox = label_info['gt_bbox']
height = image_shape[0]
width = image_shape[1]
if np.random.uniform(0, 1) < self.prob:
im = horizontal_flip(im)
if gt_bbox.shape[0] == 0:
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
label_info['gt_bbox'] = box_horizontal_flip(gt_bbox, width)
if 'gt_poly' in label_info and \
len(label_info['gt_poly']) != 0:
label_info['gt_poly'] = segms_horizontal_flip(
label_info['gt_poly'], height, width)
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
class Normalize(DetTransform):
"""对图像进行标准化。
1. 归一化图像到到区间[0.0, 1.0]。
2. 对图像进行减均值除以标准差操作。
Args:
mean (list): 图像数据集的均值。默认为[0.485, 0.456, 0.406]。
std (list): 图像数据集的标准差。默认为[0.229, 0.224, 0.225]。
Raises:
TypeError: 形参数据类型不满足需求。
"""
def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
self.mean = mean
self.std = std
if not (isinstance(self.mean, list) and isinstance(self.std, list)):
raise TypeError("NormalizeImage: input type is invalid.")
from functools import reduce
if reduce(lambda x, y: x * y, self.std) == 0:
raise TypeError('NormalizeImage: std is invalid!')
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (numnp.ndarraypy): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
"""
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im = normalize(im, mean, std)
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
class RandomDistort(DetTransform):
"""以一定的概率对图像进行随机像素内容变换,模型训练时的数据增强操作
1. 对变换的操作顺序进行随机化操作。
2. 按照1中的顺序以一定的概率在范围[-range, range]对图像进行随机像素内容变换。
Args:
brightness_range (float): 明亮度因子的范围。默认为0.5。
brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
contrast_range (float): 对比度因子的范围。默认为0.5。
contrast_prob (float): 随机调整对比度的概率。默认为0.5。
saturation_range (float): 饱和度因子的范围。默认为0.5。
saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
hue_range (int): 色调因子的范围。默认为18。
hue_prob (float): 随机调整色调的概率。默认为0.5。
"""
def __init__(self,
brightness_range=0.5,
brightness_prob=0.5,
contrast_range=0.5,
contrast_prob=0.5,
saturation_range=0.5,
saturation_prob=0.5,
hue_range=18,
hue_prob=0.5):
self.brightness_range = brightness_range
self.brightness_prob = brightness_prob
self.contrast_range = contrast_range
self.contrast_prob = contrast_prob
self.saturation_range = saturation_range
self.saturation_prob = saturation_prob
self.hue_range = hue_range
self.hue_prob = hue_prob
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
"""
brightness_lower = 1 - self.brightness_range
brightness_upper = 1 + self.brightness_range
contrast_lower = 1 - self.contrast_range
contrast_upper = 1 + self.contrast_range
saturation_lower = 1 - self.saturation_range
saturation_upper = 1 + self.saturation_range
hue_lower = -self.hue_range
hue_upper = self.hue_range
ops = [brightness, contrast, saturation, hue]
random.shuffle(ops)
params_dict = {
'brightness': {
'brightness_lower': brightness_lower,
'brightness_upper': brightness_upper
},
'contrast': {
'contrast_lower': contrast_lower,
'contrast_upper': contrast_upper
},
'saturation': {
'saturation_lower': saturation_lower,
'saturation_upper': saturation_upper
},
'hue': {
'hue_lower': hue_lower,
'hue_upper': hue_upper
}
}
prob_dict = {
'brightness': self.brightness_prob,
'contrast': self.contrast_prob,
'saturation': self.saturation_prob,
'hue': self.hue_prob
}
for id in range(4):
params = params_dict[ops[id].__name__]
prob = prob_dict[ops[id].__name__]
params['im'] = im
if np.random.uniform(0, 1) < prob:
im = ops[id](**params)
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
class MixupImage(DetTransform):
"""对图像进行mixup操作,模型训练时的数据增强操作,目前仅YOLOv3模型支持该transform。
当label_info中不存在mixup字段时,直接返回,否则进行下述操作:
1. 从随机beta分布中抽取出随机因子factor。
2.
- 当factor>=1.0时,去除label_info中的mixup字段,直接返回。
- 当factor<=0.0时,直接返回label_info中的mixup字段,并在label_info中去除该字段。
- 其余情况,执行下述操作:
(1)原图像乘以factor,mixup图像乘以(1-factor),叠加2个结果。
(2)拼接原图像标注框和mixup图像标注框。
(3)拼接原图像标注框类别和mixup图像标注框类别。
(4)原图像标注框混合得分乘以factor,mixup图像标注框混合得分乘以(1-factor),叠加2个结果。
3. 更新im_info中的image_shape信息。
Args:
alpha (float): 随机beta分布的下限。默认为1.5。
beta (float): 随机beta分布的上限。默认为1.5。
mixup_epoch (int): 在前mixup_epoch轮使用mixup增强操作;当该参数为-1时,该策略不会生效。
默认为-1。
Raises:
ValueError: 数据长度不匹配。
"""
def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
self.alpha = alpha
self.beta = beta
if self.alpha <= 0.0:
raise ValueError("alpha shold be positive in MixupImage")
if self.beta <= 0.0:
raise ValueError("beta shold be positive in MixupImage")
self.mixup_epoch = mixup_epoch
def _mixup_img(self, img1, img2, factor):
h = max(img1.shape[0], img2.shape[0])
w = max(img1.shape[1], img2.shape[1])
img = np.zeros((h, w, img1.shape[2]), 'float32')
img[:img1.shape[0], :img1.shape[1], :] = \
img1.astype('float32') * factor
img[:img2.shape[0], :img2.shape[1], :] += \
img2.astype('float32') * (1.0 - factor)
return img.astype('float32')
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
其中,im_info更新字段为:
- image_shape (np.ndarray): mixup后的图像高、宽二者组成的np.ndarray,形状为(2,)。
im_info删除的字段:
- mixup (list): 与当前字段进行mixup的图像相关信息。
label_info更新字段为:
- gt_bbox (np.ndarray): mixup后真实标注框坐标,形状为(n, 4),
其中n代表真实标注框的个数。
- gt_class (np.ndarray): mixup后每个真实标注框对应的类别序号,形状为(n, 1),
其中n代表真实标注框的个数。
- gt_score (np.ndarray): mixup后每个真实标注框对应的混合得分,形状为(n, 1),
其中n代表真实标注框的个数。
Raises:
TypeError: 形参数据类型不满足需求。
"""
if im_info is None:
raise TypeError('Cannot do MixupImage! ' +
'Becasuse the im_info can not be None!')
if 'mixup' not in im_info:
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
factor = np.random.beta(self.alpha, self.beta)
factor = max(0.0, min(1.0, factor))
if im_info['epoch'] > self.mixup_epoch \
or factor >= 1.0:
im_info.pop('mixup')
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
if factor <= 0.0:
return im_info.pop('mixup')
im = self._mixup_img(im, im_info['mixup'][0], factor)
if label_info is None:
raise TypeError('Cannot do MixupImage! ' +
'Becasuse the label_info can not be None!')
if 'gt_bbox' not in label_info or \
'gt_class' not in label_info or \
'gt_score' not in label_info:
raise TypeError('Cannot do MixupImage! ' + \
'Becasuse gt_bbox/gt_class/gt_score is not in label_info!')
gt_bbox1 = label_info['gt_bbox']
gt_bbox2 = im_info['mixup'][2]['gt_bbox']
gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
gt_class1 = label_info['gt_class']
gt_class2 = im_info['mixup'][2]['gt_class']
gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
gt_score1 = label_info['gt_score']
gt_score2 = im_info['mixup'][2]['gt_score']
gt_score = np.concatenate(
(gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
if 'gt_poly' in label_info:
gt_poly1 = label_info['gt_poly']
gt_poly2 = im_info['mixup'][2]['gt_poly']
label_info['gt_poly'] = gt_poly1 + gt_poly2
is_crowd1 = label_info['is_crowd']
is_crowd2 = im_info['mixup'][2]['is_crowd']
is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
label_info['gt_bbox'] = gt_bbox
label_info['gt_score'] = gt_score
label_info['gt_class'] = gt_class
label_info['is_crowd'] = is_crowd
im_info['image_shape'] = np.array([im.shape[0],
im.shape[1]]).astype('int32')
im_info.pop('mixup')
if label_info is None:
return (im, im_info)
else:
return (im, im_info, label_info)
class RandomExpand(DetTransform):
"""随机扩张图像,模型训练时的数据增强操作。
1. 随机选取扩张比例(扩张比例大于1时才进行扩张)。
2. 计算扩张后图像大小。
3. 初始化像素值为输入填充值的图像,并将原图像随机粘贴于该图像上。
4. 根据原图像粘贴位置换算出扩张后真实标注框的位置坐标。
5. 根据原图像粘贴位置换算出扩张后真实分割区域的位置坐标。
Args:
ratio (float): 图像扩张的最大比例。默认为4.0。
prob (float): 随机扩张的概率。默认为0.5。
fill_value (list): 扩张图像的初始填充值(0-255)。默认为[123.675, 116.28, 103.53]。
"""
def __init__(self,
ratio=4.,
prob=0.5,
fill_value=[123.675, 116.28, 103.53]):
super(RandomExpand, self).__init__()
assert ratio > 1.01, "expand ratio must be larger than 1.01"
self.ratio = ratio
self.prob = prob
assert isinstance(fill_value, Sequence), \
"fill value must be sequence"
if not isinstance(fill_value, tuple):
fill_value = tuple(fill_value)
self.fill_value = fill_value
def __call__(self, im, im_info=None, label_info=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (dict, 可选): 存储与图像相关的信息。
label_info (dict, 可选): 存储与标注框相关的信息。
Returns:
tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
存储与标注框相关信息的字典。
其中,im_info更新字段为:
- image_shape (np.ndarray): 扩张后的图像高、宽二者组成的np.ndarray,形状为(2,)。
label_info更新字段为:
- gt_bbox (np.ndarray): 随机扩张后真实标注框坐标,形状为(n, 4),
其中n代表真实标注框的个数。
- gt_class (np.ndarray): 随机扩张后每个真实标注框对应的类别序号,形状为(n, 1),
其中n代表真实标注框的个数。
Raises:
TypeError: 形参数据类型不满足需求。
"""
if im_info is None or label_info is None:
raise TypeError(
'Cannot do RandomExpand! ' +
'Becasuse the im_info and label_info can not be None!')
if 'gt_bbox' not in label_info or \
'gt_class' not in label_info:
raise TypeError('Cannot do RandomExpand! ' + \
'Becasuse gt_bbox/gt_class is not in label_info!')
if np.random.uniform(0., 1.) < self.prob:
return (im, im_info, label_info)
image_shape = im_info['image_shape']
height = int(image_shape[0])
width = int(image_shape[1])
expand_ratio = np.random.uniform(1., self.ratio)
h = int(height * expand_ratio)
w = int(width * expand_ratio)
if not h > height or not w > width:
return (im, im_info, label_info)
y = np.random.randint(0, h - height)
x = np.random.randint(0, w - width)
canvas = | np.ones((h, w, 3), dtype=np.float32) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 2020
Class to read and manipulate CryoSat-2 waveform data
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
http://www.numpy.org
http://www.scipy.org/NumPy_for_Matlab_Users
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 08/2020: flake8 compatible binary regular expression strings
Forked 02/2020 from read_cryosat_L1b.py
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import numpy as np
import pointCollection as pc
import netCDF4
import re
import os
class data(pc.data):
np.seterr(invalid='ignore')
def __default_field_dict__(self):
"""
Define the default fields that get read from the CryoSat-2 file
"""
field_dict = {}
field_dict['Location'] = ['days_J2k','Day','Second','Micsec','USO_Corr',
'Mode_ID','SSC','Inst_config','Rec_Count','Lat','Lon','Alt','Alt_rate',
'Sat_velocity','Real_beam','Baseline','ST_ID','Roll','Pitch','Yaw','MCD']
field_dict['Data'] = ['TD', 'H_0','COR2','LAI','FAI','AGC_CH1','AGC_CH2',
'TR_gain_CH1','TR_gain_CH2','TX_Power','Doppler_range','TR_inst_range',
'R_inst_range','TR_inst_gain','R_inst_gain','Internal_phase',
'External_phase','Noise_power','Phase_slope']
field_dict['Geometry'] = ['dryTrop','wetTrop','InvBar','DAC','Iono_GIM',
'Iono_model','ocTideElv','lpeTideElv','olTideElv','seTideElv','gpTideElv',
'Surf_type','Corr_status','Corr_error']
field_dict['Waveform_20Hz'] = ['Waveform','Linear_Wfm_Multiplier',
'Power2_Wfm_Multiplier','N_avg_echoes']
field_dict['METADATA'] = ['MPH','SPH']
return field_dict
def from_dbl(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from binary formats
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
# CryoSat-2 Mode record sizes
i_size_timestamp = 12
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 125
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# check baseline from file to set i_record_size and allocation function
if (BASELINE == 'C'):
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2 + 6*4 + 3*3*4 + 3*2 + 4*4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_BC_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_BC_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_BC_RW*2 + \
n_SARIN_BC_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baseline C
read_cryosat_variables = self.cryosat_baseline_C
else:
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2+ 6*4 + 3*3*4 + 4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_RW*2 + \
n_SARIN_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baselines A and B
read_cryosat_variables = self.cryosat_baseline_AB
# get dataset MODE from PRODUCT portion of file name
# set record sizes and DS_TYPE for read_DSD function
self.MODE = re.findall('(LRM|SAR|SIN)', PRODUCT).pop()
if (self.MODE == 'LRM'):
i_record_size = i_record_size_LRM_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SAR'):
i_record_size = i_record_size_SAR_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SIN'):
i_record_size = i_record_size_SARIN_L1b
DS_TYPE = 'CS_L1B'
# read the input file to get file information
fid = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fid)
os.close(fid)
# num DSRs from SPH
j_num_DSR = np.int32(file_info.st_size//i_record_size)
# print file information
if verbose:
print(full_filename)
print('{0:d} {1:d} {2:d}'.format(j_num_DSR,file_info.st_size,i_record_size))
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size == file_info.st_size):
print('No Header on file')
print('The number of DSRs is: {0:d}'.format(j_num_DSR))
else:
print('Header on file')
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size != file_info.st_size):
# If there are MPH/SPH/DSD headers
s_MPH_fields = self.read_MPH(full_filename)
j_sph_size = np.int32(re.findall(r'[-+]?\d+',s_MPH_fields['SPH_SIZE']).pop())
s_SPH_fields = self.read_SPH(full_filename, j_sph_size)
# extract information from DSD fields
s_DSD_fields = self.read_DSD(full_filename, DS_TYPE=DS_TYPE)
# extract DS_OFFSET
j_DS_start = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DS_OFFSET']).pop())
# extract number of DSR in the file
j_num_DSR = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['NUM_DSR']).pop())
# check the record size
j_DSR_size = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DSR_SIZE']).pop())
# minimum size is start of the read plus number of records to read
j_check_size = j_DS_start + (j_DSR_size*j_num_DSR)
if verbose:
print('The offset of the DSD is: {0:d} bytes'.format(j_DS_start))
print('The number of DSRs is {0:d}'.format(j_num_DSR))
print('The size of the DSR is {0:d}'.format(j_DSR_size))
# check if invalid file size
if (j_check_size > file_info.st_size):
raise IOError('File size error')
# extract binary data from input CryoSat data file (skip headers)
fid = open(os.path.expanduser(full_filename), 'rb')
cryosat_header = fid.read(j_DS_start)
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# add headers to output dictionary as METADATA
CS_L1b_mds['METADATA'] = {}
CS_L1b_mds['METADATA']['MPH'] = s_MPH_fields
CS_L1b_mds['METADATA']['SPH'] = s_SPH_fields
CS_L1b_mds['METADATA']['DSD'] = s_DSD_fields
# close the input CryoSat binary file
fid.close()
else:
# If there are not MPH/SPH/DSD headers
# extract binary data from input CryoSat data file
fid = open(os.path.expanduser(full_filename), 'rb')
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# close the input CryoSat binary file
fid.close()
# if unpacking the units
if unpack:
CS_l1b_scale = self.cryosat_scaling_factors()
# for each dictionary key
for group in CS_l1b_scale.keys():
# for each variable
for key,val in CS_L1b_mds[group].items():
# check if val is the 20Hz waveform beam variables
if isinstance(val, dict):
# for each waveform beam variable
for k,v in val.items():
# scale variable
CS_L1b_mds[group][key][k] = CS_l1b_scale[group][key][k]*v.copy()
else:
# scale variable
CS_L1b_mds[group][key] = CS_l1b_scale[group][key]*val.copy()
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def from_nc(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from netCDF4 format data
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
print(full_filename) if verbose else None
# get dataset MODE from PRODUCT portion of file name
self.MODE = re.findall(r'(LRM|FDM|SAR|SIN)', PRODUCT).pop()
# read level-2 CryoSat-2 data from netCDF4 file
CS_L1b_mds = self.cryosat_baseline_D(full_filename, unpack=unpack)
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def calc_GPS_time(self, day, second, micsec):
"""
Calculate the GPS time (seconds since Jan 6, 1980 00:00:00)
"""
# TAI time is ahead of GPS by 19 seconds
return (day + 7300.0)*86400.0 + second.astype('f') + micsec/1e6 - 19
def count_leap_seconds(self, GPS_Time):
"""
Count number of leap seconds that have passed for given GPS times
"""
# GPS times for leap seconds
leaps = [46828800, 78364801, 109900802, 173059203, 252028804, 315187205,
346723206, 393984007, 425520008, 457056009, 504489610, 551750411,
599184012, 820108813, 914803214, 1025136015, 1119744016, 1167264017]
# number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
i_records,i_blocks = np.nonzero(GPS_Time >= leap)
n_leaps[i_records,i_blocks] += 1.0
return n_leaps
def read_MPH(self, full_filename):
"""
Read ASCII Main Product Header (MPH) block from an ESA PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# check that first line of header matches PRODUCT
if not bool(re.match(br'PRODUCT\=\"(.*)(?=\")',file_contents[0])):
raise IOError('File does not start with a valid PDS MPH')
# read MPH header text
s_MPH_fields = {}
for i in range(n_MPH_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_MPH_fields
def read_SPH(self, full_filename, j_sph_size):
"""
Read ASCII Specific Product Header (SPH) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# compile regular expression operator for reading headers
rx = re.compile(br'(.*?)\=\"?(.*)',re.VERBOSE)
# check first line of header matches SPH_DESCRIPTOR
if not bool(re.match(br'SPH\_DESCRIPTOR\=',file_contents[n_MPH_lines+1])):
raise IOError('File does not have a valid PDS DSD')
# read SPH header text (no binary control characters)
s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)
and not re.search(br'[^\x20-\x7e]+',li)]
# extract SPH header text
s_SPH_fields = {}
c = 0
while (c < len(s_SPH_lines)):
# check if line is within DS_NAME portion of SPH header
if bool(re.match(br'DS_NAME',s_SPH_lines[c])):
# add dictionary for DS_NAME
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
key = value.decode('utf-8').rstrip()
s_SPH_fields[key] = {}
for line in s_SPH_lines[c+1:c+7]:
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',line)):
# data fields within quotes
dsfield,dsvalue=re.findall(br'(.*?)\=\"(.*)(?=\")',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',line)):
# data fields without quotes
dsfield,dsvalue=re.findall(br'(.*?)\=(.*)',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
# add 6 to counter to go to next entry
c += 6
# use regular expression operators to read headers
elif bool(re.match(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',s_SPH_lines[c])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# add 1 to counter to go to next line
c += 1
# Return block name array to calling function
return s_SPH_fields
def read_DSD(self, full_filename, DS_TYPE=None):
"""
Read ASCII Data Set Descriptors (DSD) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# number of text lines in a DSD header
n_DSD_lines = 8
# Level-1b CryoSat DS_NAMES within files
regex_patterns = []
if (DS_TYPE == 'CS_L1B'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_LRM[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SAR[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SARIN[\s+]*"')
elif (DS_TYPE == 'SIR_L1B_FDM'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_FDM[\s+]*"')
# find the DSD starting line within the SPH header
c = 0
Flag = False
while ((Flag is False) and (c < len(regex_patterns))):
# find indice within
indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if
re.search(regex_patterns[c],line)]
if indice:
Flag = True
else:
c+=1
# check that valid indice was found within header
if not indice:
raise IOError('Can not find correct DSD field')
# extract s_DSD_fields info
DSD_START = n_MPH_lines + indice[0] + 1
s_DSD_fields = {}
for i in range(DSD_START,DSD_START+n_DSD_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_DSD_fields
def cryosat_baseline_AB(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baselines A and B
"""
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32,fill_value=0)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
CS_l1b_mds['Location']['Baseline'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Measurement Confidence Data Flags
# Generally the MCD flags indicate problems when set
# If MCD is 0 then no problems or non-nominal conditions were detected
# Serious errors are indicated by setting bit 31
CS_l1b_mds['Location']['MCD'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
CS_l1b_mds['Data'] = {}
# Window Delay reference (two-way) corrected for instrument delays
CS_l1b_mds['Data']['TD'] = np.ma.zeros((n_records,n_blocks),dtype=np.int64)
# H0 Initial Height Word from telemetry
CS_l1b_mds['Data']['H_0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# COR2 Height Rate: on-board tracker height rate over the radar cycle
CS_l1b_mds['Data']['COR2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Coarse Range Word (LAI) derived from telemetry
CS_l1b_mds['Data']['LAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Fine Range Word (FAI) derived from telemetry
CS_l1b_mds['Data']['FAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
# Gain calibration corrections are applied (Sum of AGC stages 1 and 2
# plus the corresponding corrections) (dB/100)
CS_l1b_mds['Data']['AGC_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
# Gain calibration corrections are applied (dB/100)
CS_l1b_mds['Data']['AGC_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Transmit Power in microWatts
CS_l1b_mds['Data']['TX_Power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Doppler range correction: Radial component (mm)
# computed for the component of satellite velocity in the nadir direction
CS_l1b_mds['Data']['Doppler_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: transmit-receive antenna (mm)
# Calibration correction to range on channel 1 computed from CAL1.
CS_l1b_mds['Data']['TR_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: receive-only antenna (mm)
# Calibration correction to range on channel 2 computed from CAL1.
CS_l1b_mds['Data']['R_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: transmit-receive antenna (dB/100)
# Calibration correction to gain on channel 1 computed from CAL1
CS_l1b_mds['Data']['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: receive-only (dB/100)
# Calibration correction to gain on channel 2 computed from CAL1
CS_l1b_mds['Data']['R_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Internal Phase Correction (microradians)
CS_l1b_mds['Data']['Internal_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# External Phase Correction (microradians)
CS_l1b_mds['Data']['External_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Noise Power measurement (dB/100): converted from telemetry units to be
# the noise floor of FBR measurement echoes.
# Set to -9999.99 when the telemetry contains zero.
CS_l1b_mds['Data']['Noise_power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Phase slope correction (microradians)
# Computed from the CAL-4 packets during the azimuth impulse response
# amplitude (SARIN only). Set from the latest available CAL-4 packet.
CS_l1b_mds['Data']['Phase_slope'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
CS_l1b_mds['Data']['Spares1'] = np.ma.zeros((n_records,n_blocks,4),dtype=np.int8)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry'] = {}
# Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['dryTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['wetTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['InvBar'] = np.ma.zeros((n_records),dtype=np.int32)
# Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['DAC'] = np.ma.zeros((n_records),dtype=np.int32)
# GIM Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_GIM'] = np.ma.zeros((n_records),dtype=np.int32)
# Model Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_model'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['ocTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['lpeTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['olTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['seTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['gpTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Surface Type: enumerated key to classify surface at nadir
# 0 = Open Ocean
# 1 = Closed Sea
# 2 = Continental Ice
# 3 = Land
CS_l1b_mds['Geometry']['Surf_type'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare1'] = np.ma.zeros((n_records,4),dtype=np.int8)
# Corrections Status Flag
CS_l1b_mds['Geometry']['Corr_status'] = np.ma.zeros((n_records),dtype=np.uint32)
# Correction Error Flag
CS_l1b_mds['Geometry']['Corr_error'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare2'] = np.ma.zeros((n_records,4),dtype=np.int8)
# CryoSat-2 Average Waveforms Groups
CS_l1b_mds['Waveform_1Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SIN'):
# SARIN Mode
# Same as the LRM/SAR groups but the waveform array is 512 bins instead of
# 128 and the number of echoes averaged is different.
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
# CryoSat-2 Waveforms Groups
# Beam Behavior Parameters
Beam_Behavior = {}
# Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# 3rd moment: providing the degree of asymmetry of the range integrated
# stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
# CryoSat-2 mode specific waveforms
CS_l1b_mds['Waveform_20Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
elif (self.MODE == 'SIN'):
# SARIN Mode
# Averaged Power Echo Waveform [512]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
# Coherence [512]: packed units (1/1000)
CS_l1b_mds['Waveform_20Hz']['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
# Phase Difference [512]: packed units (microradians)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
# for each record in the CryoSat file
for r in range(n_records):
# CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
CS_l1b_mds['Location']['Day'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
for b in range(n_blocks):
CS_l1b_mds['Data']['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Data']['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry']['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
CS_l1b_mds['Geometry']['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 Average Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SAR'):
# SAR Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SIN'):
# SARIN Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
# CryoSat-2 Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SAR'):
# SAR Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
elif (self.MODE == 'SIN'):
# SARIN Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
CS_l1b_mds['Waveform_20Hz']['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_RW)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_RW)
# set the mask from day variables
mask_20Hz = CS_l1b_mds['Location']['Day'].data == CS_l1b_mds['Location']['Day'].fill_value
Location_keys = [key for key in CS_l1b_mds['Location'].keys() if not re.search(r'Spare',key)]
Data_keys = [key for key in CS_l1b_mds['Data'].keys() if not re.search(r'Spare',key)]
Geometry_keys = [key for key in CS_l1b_mds['Geometry'].keys() if not re.search(r'Spare',key)]
Wfm_1Hz_keys = [key for key in CS_l1b_mds['Waveform_1Hz'].keys() if not re.search(r'Spare',key)]
Wfm_20Hz_keys = [key for key in CS_l1b_mds['Waveform_20Hz'].keys() if not re.search(r'Spare',key)]
for key in Location_keys:
CS_l1b_mds['Location'][key].mask = mask_20Hz.copy()
for key in Data_keys:
CS_l1b_mds['Data'][key].mask = mask_20Hz.copy()
# return the output dictionary
return CS_l1b_mds
def cryosat_baseline_C(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baseline C
"""
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32,fill_value=0)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
CS_l1b_mds['Location']['Baseline'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Star Tracker ID
CS_l1b_mds['Location']['ST_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
# Antenna Bench Roll Angle (Derived from star trackers)
# packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Roll'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Antenna Bench Pitch Angle (Derived from star trackers)
# packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Pitch'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Antenna Bench Yaw Angle (Derived from star trackers)
# packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Yaw'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Measurement Confidence Data Flags
# Generally the MCD flags indicate problems when set
# If MCD is 0 then no problems or non-nominal conditions were detected
# Serious errors are indicated by setting bit 31
CS_l1b_mds['Location']['MCD'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
CS_l1b_mds['Location']['Spares'] = np.ma.zeros((n_records,n_blocks,2),dtype=np.int16)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
CS_l1b_mds['Data'] = {}
# Window Delay reference (two-way) corrected for instrument delays
CS_l1b_mds['Data']['TD'] = np.ma.zeros((n_records,n_blocks),dtype=np.int64)
# H0 Initial Height Word from telemetry
CS_l1b_mds['Data']['H_0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# COR2 Height Rate: on-board tracker height rate over the radar cycle
CS_l1b_mds['Data']['COR2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Coarse Range Word (LAI) derived from telemetry
CS_l1b_mds['Data']['LAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Fine Range Word (FAI) derived from telemetry
CS_l1b_mds['Data']['FAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
# Gain calibration corrections are applied (Sum of AGC stages 1 and 2
# plus the corresponding corrections) (dB/100)
CS_l1b_mds['Data']['AGC_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
# Gain calibration corrections are applied (dB/100)
CS_l1b_mds['Data']['AGC_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Transmit Power in microWatts
CS_l1b_mds['Data']['TX_Power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Doppler range correction: Radial component (mm)
# computed for the component of satellite velocity in the nadir direction
CS_l1b_mds['Data']['Doppler_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: transmit-receive antenna (mm)
# Calibration correction to range on channel 1 computed from CAL1.
CS_l1b_mds['Data']['TR_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: receive-only antenna (mm)
# Calibration correction to range on channel 2 computed from CAL1.
CS_l1b_mds['Data']['R_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: transmit-receive antenna (dB/100)
# Calibration correction to gain on channel 1 computed from CAL1
CS_l1b_mds['Data']['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: receive-only (dB/100)
# Calibration correction to gain on channel 2 computed from CAL1
CS_l1b_mds['Data']['R_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Internal Phase Correction (microradians)
CS_l1b_mds['Data']['Internal_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# External Phase Correction (microradians)
CS_l1b_mds['Data']['External_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Noise Power measurement (dB/100)
CS_l1b_mds['Data']['Noise_power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Phase slope correction (microradians)
# Computed from the CAL-4 packets during the azimuth impulse response
# amplitude (SARIN only). Set from the latest available CAL-4 packet.
CS_l1b_mds['Data']['Phase_slope'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
CS_l1b_mds['Data']['Spares1'] = np.ma.zeros((n_records,n_blocks,4),dtype=np.int8)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry'] = {}
# Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['dryTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['wetTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['InvBar'] = np.ma.zeros((n_records),dtype=np.int32)
# Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['DAC'] = np.ma.zeros((n_records),dtype=np.int32)
# GIM Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_GIM'] = np.ma.zeros((n_records),dtype=np.int32)
# Model Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_model'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['ocTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['lpeTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['olTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['seTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['gpTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Surface Type: enumerated key to classify surface at nadir
# 0 = Open Ocean
# 1 = Closed Sea
# 2 = Continental Ice
# 3 = Land
CS_l1b_mds['Geometry']['Surf_type'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare1'] = np.ma.zeros((n_records,4),dtype=np.int8)
# Corrections Status Flag
CS_l1b_mds['Geometry']['Corr_status'] = np.ma.zeros((n_records),dtype=np.uint32)
# Correction Error Flag
CS_l1b_mds['Geometry']['Corr_error'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare2'] = np.ma.zeros((n_records,4),dtype=np.int8)
# CryoSat-2 Average Waveforms Groups
CS_l1b_mds['Waveform_1Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SIN'):
# SARIN Mode
# Same as the LRM/SAR groups but the waveform array is 512 bins instead of
# 128 and the number of echoes averaged is different.
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
# CryoSat-2 Waveforms Groups
# Beam Behavior Parameters
Beam_Behavior = {}
# Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# 3rd moment: providing the degree of asymmetry of the range integrated
# stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# Standard deviation as a function of boresight angle (microradians)
Beam_Behavior['SD_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center angle as a function of boresight angle (microradians)
Beam_Behavior['Center_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-7),dtype=np.int16)
# CryoSat-2 mode specific waveform variables
CS_l1b_mds['Waveform_20Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = | np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16) | numpy.zeros |
import numpy as np
from iminuit import Minuit
from .energy_likelihood import *
from .spatial_likelihood import *
"""
Module to compute the IceCube point source likelihood
using publicly available information.
Based on the method described in:
<NAME> al., 2008. Methods for point source analysis
in high energy neutrino telescopes. Astroparticle Physics,
29(4), pp.299–305.
Currently well-defined for searches with
Northern sky muon neutrinos.
"""
class PointSourceLikelihood:
"""
Calculate the point source likelihood for a given
neutrino dataset - in terms of reconstructed
energies and arrival directions.
Based on what is described in Braun+2008 and
Aartsen+2018.
"""
def __init__(
self,
direction_likelihood,
energy_likelihood,
ras,
decs,
energies,
source_coord,
bg_energy_likelihood=None,
index_prior=None,
band_width_factor=3.0,
):
"""
Calculate the point source likelihood for a given
neutrino dataset - in terms of reconstructed
energies and arrival directions.
:param direction_likelihood: An instance of SpatialLikelihood.
:param energy_likelihood: An instance of MarginalisedEnergyLikelihood.
:param event_coords: List of (ra, dec) tuples for reconstructed coords.
:param energies: The reconstructed nu energies.
:param source_coord: (ra, dec) pf the point to test.
:param index_prior: Optional prior on the spectral index, instance of Prior.
"""
self._direction_likelihood = direction_likelihood
self._energy_likelihood = energy_likelihood
self._bg_energy_likelihood = bg_energy_likelihood
if isinstance(
self._direction_likelihood, EnergyDependentSpatialGaussianLikelihood
):
self._band_width = (
band_width_factor * self._direction_likelihood.get_low_res()
)
else:
self._band_width = (
band_width_factor * self._direction_likelihood._sigma
) # degrees
self._dec_low = source_coord[1] - np.deg2rad(self._band_width)
self._dec_high = source_coord[1] + np.deg2rad(self._band_width)
if self._dec_low < np.arcsin(-0.1) or np.isnan(self._dec_low):
self._dec_low = np.arcsin(-0.1)
if self._dec_high > np.arcsin(1.0) or np.isnan(self._dec_high):
self._dec_high = np.arcsin(1.0)
self._band_solid_angle = (
2 * np.pi * (np.sin(self._dec_high) - np.sin(self._dec_low))
)
self._ra_low = source_coord[0] - np.deg2rad(self._band_width)
self._ra_high = source_coord[0] + np.deg2rad(self._band_width)
self._ras = ras
self._decs = decs
self._energies = energies
self._source_coord = source_coord
self._index_prior = index_prior
# Sensible values based on Braun+2008
# and Aartsen+2018 analyses
self._bg_index = 3.7
self._ns_min = 0.0
self._max_index = 4.0
# min index depends on the energy likelihood used.
self._select_nearby_events()
# Can't have more source events than actual events...
self._ns_max = self.N
self.Ntot = len(self._energies)
def _select_nearby_events(self):
source_ra, source_dec = self._source_coord
dec_fac = np.deg2rad(self._band_width)
selected = list(
set(
np.where(
(self._decs >= self._dec_low)
& (self._decs <= self._dec_high)
& (self._ras >= self._ra_low)
& (self._ras <= self._ra_high)
)[0]
)
)
selected_dec_band = np.where(
(self._decs >= self._dec_low) & (self._decs <= self._dec_high)
)[0]
self._selected = selected
self._selected_ras = self._ras[selected]
self._selected_decs = self._decs[selected]
self._selected_energies = self._energies[selected]
self.Nprime = len(selected)
self.N = len(selected_dec_band)
def _signal_likelihood(self, ra, dec, source_coord, energy, index):
if isinstance(
self._direction_likelihood, EnergyDependentSpatialGaussianLikelihood
):
likelihood = self._direction_likelihood(
(ra, dec), source_coord, energy, index
) * self._energy_likelihood(energy, index)
else:
likelihood = self._direction_likelihood(
(ra, dec), source_coord
) * self._energy_likelihood(energy, index)
return likelihood
def _background_likelihood(self, energy):
if self._bg_energy_likelihood:
output = self._bg_energy_likelihood(energy) / self._band_solid_angle
if output == 0.0:
output = 1e-10
return output
else:
return (
self._energy_likelihood(energy, self._bg_index) / self._band_solid_angle
)
def _get_neg_log_likelihood_ratio(self, ns, index):
"""
Calculate the -log(likelihood_ratio).
Uses calculation described in:
https://github.com/IceCubeOpenSource/SkyLLH/blob/master/doc/user_manual.pdf
:param ns: Number of source counts.
:param index: Spectral index of the source.
"""
one_plus_alpha = 1e-10
alpha = one_plus_alpha - 1
log_likelihood_ratio = 0.0
for i in range(self.Nprime):
signal = self._signal_likelihood(
self._selected_ras[i],
self._selected_decs[i],
self._source_coord,
self._selected_energies[i],
index,
)
bg = self._background_likelihood(self._selected_energies[i])
chi = (1 / self.N) * (signal / bg - 1)
alpha_i = ns * chi
if (1 + alpha_i) < one_plus_alpha:
alpha_tilde = (alpha_i - alpha) / one_plus_alpha
log_likelihood_ratio += (
np.log1p(alpha) + alpha_tilde - (0.5 * alpha_tilde ** 2)
)
else:
log_likelihood_ratio += np.log1p(alpha_i)
log_likelihood_ratio += (self.N - self.Nprime) * np.log1p(-ns / self.N)
return -log_likelihood_ratio
def _func_to_minimize(self, ns, index):
"""
Calculate the -log(likelihood_ratio) for minimization.
Uses calculation described in:
https://github.com/IceCubeOpenSource/SkyLLH/blob/master/doc/user_manual.pdf
If there is a prior, it is added here, as this is equivalent to maximising
the likelihood.
:param ns: Number of source counts.
:param index: Spectral index of the source.
"""
one_plus_alpha = 1e-10
alpha = one_plus_alpha - 1
log_likelihood_ratio = 0.0
for i in range(self.Nprime):
signal = self._signal_likelihood(
self._selected_ras[i],
self._selected_decs[i],
self._source_coord,
self._selected_energies[i],
index,
)
bg = self._background_likelihood(self._selected_energies[i])
chi = (1 / self.N) * (signal / bg - 1)
alpha_i = ns * chi
if (1 + alpha_i) < one_plus_alpha:
alpha_tilde = (alpha_i - alpha) / one_plus_alpha
log_likelihood_ratio += (
np.log1p(alpha) + alpha_tilde - (0.5 * alpha_tilde ** 2)
)
else:
log_likelihood_ratio += np.log1p(alpha_i)
log_likelihood_ratio += (self.N - self.Nprime) * np.log1p(-ns / self.N)
if self._index_prior:
log_likelihood_ratio += np.log(self._index_prior(index))
return -log_likelihood_ratio
def __call__(self, ns, index):
"""
Wrapper function for convenience.
"""
return self._get_neg_log_likelihood_ratio(ns, index)
def _minimize(self):
"""
Minimize -log(likelihood_ratio) for the source hypothesis,
returning the best fit ns and index.
Uses the iMiuint wrapper.
"""
init_index = 2.19 # self._energy_likelihood._min_index + (self._max_index - self._energy_likelihood._min_index)/2
init_ns = self._ns_min + (self._ns_max - self._ns_min) / 2
m = Minuit(
self._func_to_minimize,
ns=init_ns,
index=init_index,
error_ns=1,
error_index=0.1,
errordef=0.5,
limit_ns=(self._ns_min, self._ns_max),
limit_index=(self._energy_likelihood._min_index, self._max_index),
)
m.migrad()
if not m.migrad_ok() or not m.matrix_accurate():
# Fix the index as can be uninformative
m.fixed["index"] = True
m.migrad()
self._best_fit_ns = m.values["ns"]
self._best_fit_index = m.values["index"]
def _minimize_grid(self):
"""
Minimize -log(likelihood_ratio) for the source hypothesis,
returning the best fit ns and index.
This simple grid method takes roughly the same time as minuit
and is more accurate...
"""
ns_grid = np.linspace(self._ns_min, self._ns_max, 10)
index_grid = np.linspace(
self._energy_likelihood._min_index, self._max_index, 10
)
out = np.zeros((len(ns_grid), len(index_grid)))
for i, ns in enumerate(ns_grid):
for j, index in enumerate(index_grid):
out[i][j] = self._get_neg_log_likelihood_ratio(ns, index)
sel = np.where(out == np.min(out))
if len(sel[0]) > 1:
self._best_fit_index = 3.7
self._best_fit_ns = 0.0
else:
self._best_fit_ns = ns_grid[sel[0]][0]
self._best_fit_index = index_grid[sel[1]][0]
def _first_derivative_likelihood_ratio(self, ns=0, index=2.0):
"""
First derivative of the likelihood ratio.
Equation 41 in
https://github.com/IceCubeOpenSource/SkyLLH/blob/master/doc/user_manual.pdf.
"""
one_plus_alpha = 1e-10
alpha = one_plus_alpha - 1
self._first_derivative = []
for i in range(self.Nprime):
signal = self._signal_likelihood(
self._selected_ras[i],
self._selected_decs[i],
self._source_coord,
self._selected_energies[i],
index,
)
bg = self._background_likelihood(self._selected_energies[i])
chi_i = (1 / self.N) * ((signal / bg) - 1)
alpha_i = ns * chi_i
if (1 + alpha_i) < one_plus_alpha:
alpha_tilde = (alpha_i - alpha) / one_plus_alpha
self._first_derivative.append(
(1 / one_plus_alpha) * (1 - alpha_tilde) * chi_i
)
else:
self._first_derivative.append(chi_i / (1 + alpha_i))
self._first_derivative = np.array(self._first_derivative)
return sum(self._first_derivative) - ((self.N - self.Nprime) / (self.N - ns))
def _second_derivative_likelihood_ratio(self, ns=0):
"""
Second derivative of the likelihood ratio.
Equation 44 in
https://github.com/IceCubeOpenSource/SkyLLH/blob/master/doc/user_manual.pdf.
"""
self._second_derivative = -((self._first_derivative) ** 2)
return sum(self._second_derivative) - (
(self.N - self.Nprime) / (self.N - ns) ** 2
)
def get_test_statistic(self):
"""
Calculate the test statistic for the best fit ns
"""
self._minimize()
# self._minimize_grid()
# For resolving the TS peak at zero
# if self._best_fit_ns == 0:
# first_der = self._first_derivative_likelihood_ratio(self._best_fit_ns, self._best_fit_index)
# second_der = self._second_derivative_likelihood_ratio(self._best_fit_ns)
# self.test_statistic = -2 * (first_der**2 / (4 * second_der))
# self.likelihood_ratio = np.exp(-self.test_statistic/2)
# else:
neg_log_lik = self._get_neg_log_likelihood_ratio(
self._best_fit_ns, self._best_fit_index
)
self.likelihood_ratio = np.exp(neg_log_lik)
self.test_statistic = -2 * neg_log_lik
return self.test_statistic
class SpatialOnlyPointSourceLikelihood:
"""
Calculate the point source likelihood for a given
neutrino dataset - in terms of reconstructed
arrival directions.
This class is exactly as in PointSourceLikelihood,
but without the energy depedence.
"""
def __init__(self, direction_likelihood, event_coords, source_coord):
"""
Calculate the point source likelihood for a given
neutrino dataset - in terms of reconstructed
energies and arrival directions.
:param direction_likelihood: An instance of SpatialGaussianLikelihood.
:param event_coords: List of (ra, dec) tuples for reconstructed coords.
:param source_coord: (ra, dec) pf the point to test.
"""
self._direction_likelihood = direction_likelihood
self._band_width = 3 * self._direction_likelihood._sigma # degrees
dec_low = source_coord[1] - np.deg2rad(self._band_width)
dec_high = source_coord[1] + np.deg2rad(self._band_width)
self._band_solid_angle = 2 * np.pi * (np.sin(dec_high) - np.sin(dec_low))
self._event_coords = event_coords
self._source_coord = source_coord
self._bg_index = 3.7
self._ns_min = 0.0
self._ns_max = 100
self._max_index = 3.7
self._select_nearby_events()
self.Ntot = len(self._event_coords)
def _select_nearby_events(self):
ras = np.array([_[0] for _ in self._event_coords])
decs = np.array([_[1] for _ in self._event_coords])
source_ra, source_dec = self._source_coord
dec_fac = np.deg2rad(self._band_width)
selected = list(
set(
np.where(
(decs >= source_dec - dec_fac)
& (decs <= source_dec + dec_fac)
& (ras >= source_ra - dec_fac)
& (ras <= source_ra + dec_fac)
)[0]
)
)
selected_dec_band = np.where(
(decs >= source_dec - dec_fac) & (decs <= source_dec + dec_fac)
)[0]
self._selected = selected
self._selected_event_coords = [
(ec[0], ec[1])
for ec in self._event_coords
if (ec[1] >= source_dec - dec_fac)
& (ec[1] <= source_dec + dec_fac)
& (ec[0] >= source_ra - dec_fac)
& (ec[0] <= source_ra + dec_fac)
]
self.Nprime = len(selected)
self.N = len(selected_dec_band)
def _signal_likelihood(self, event_coord, source_coord):
return self._direction_likelihood(event_coord, source_coord)
def _background_likelihood(self):
return 1.0 / self._band_solid_angle
def _get_neg_log_likelihood_ratio(self, ns):
"""
Calculate the -log(likelihood_ratio) for minimization.
Uses calculation described in:
https://github.com/IceCubeOpenSource/SkyLLH/blob/master/doc/user_manual.pdf
:param ns: Number of source counts.
"""
one_plus_alpha = 1e-10
alpha = one_plus_alpha - 1
log_likelihood_ratio = 0.0
for i in range(self.Nprime):
signal = self._signal_likelihood(
self._selected_event_coords[i], self._source_coord
)
bg = self._background_likelihood()
chi = (1 / self.N) * (signal / bg - 1)
alpha_i = ns * chi
if (1 + alpha_i) < one_plus_alpha:
alpha_tilde = (alpha_i - alpha) / one_plus_alpha
log_likelihood_ratio += (
np.log1p(alpha) + alpha_tilde - (0.5 * alpha_tilde ** 2)
)
else:
log_likelihood_ratio += np.log1p(alpha_i)
log_likelihood_ratio += (self.N - self.Nprime) * np.log1p(-ns / self.N)
return -log_likelihood_ratio
def __call__(self, ns):
"""
Wrapper function for convenience.
"""
return self._get_neg_log_likelihood_ratio(ns)
def _minimize(self):
"""
Minimize -log(likelihood_ratio) for the source hypothesis,
returning the best fit ns and index.
Uses the iMiuint wrapper.
"""
init_ns = self._ns_min + (self._ns_max - self._ns_min) / 2
m = Minuit(
self._get_neg_log_likelihood_ratio,
ns=init_ns,
error_ns=0.1,
errordef=0.5,
limit_ns=(self._ns_min, self._ns_max),
)
m.migrad()
self._best_fit_ns = m.values["ns"]
def get_test_statistic(self):
"""
Calculate the test statistic for the best fit ns
"""
self._minimize()
neg_log_lik = self._get_neg_log_likelihood_ratio(self._best_fit_ns)
self.likelihood_ratio = np.exp(neg_log_lik)
self.test_statistic = -2 * neg_log_lik
return self.test_statistic
class EnergyDependentSpatialPointSourceLikelihood:
"""
Calculate the point source likelihood for a given
neutrino dataset - in terms of reconstructed
arrival directions.
This class is exactly as in PointSourceLikelihood,
but without the energy depedence.
"""
def __init__(
self,
direction_likelihood,
ras,
decs,
energies,
source_coord,
band_width_factor=3.0,
):
"""
Calculate the point source likelihood for a given
neutrino dataset - in terms of reconstructed
energies and arrival directions.
:param direction_likelihood: An instance of SpatialGaussianLikelihood.
:param ras: Array of right acsensions in [rad]
:param decs: Array of declinations in [dec]
:param source_coord: (ra, dec) pf the point to test.
"""
self._direction_likelihood = direction_likelihood
self._band_width = band_width_factor * self._direction_likelihood.get_low_res()
self._dec_low = source_coord[1] - np.deg2rad(self._band_width)
self._dec_high = source_coord[1] + np.deg2rad(self._band_width)
if self._dec_low < np.arcsin(-0.1) or np.isnan(self._dec_low):
self._dec_low = np.arcsin(-0.1)
if self._dec_high > np.arcsin(1.0) or np.isnan(self._dec_high):
self._dec_high = np.arcsin(1.0)
self._band_solid_angle = (
2 * np.pi * (np.sin(self._dec_high) - np.sin(self._dec_low))
)
self._ra_low = source_coord[0] - np.deg2rad(self._band_width)
self._ra_high = source_coord[0] + np.deg2rad(self._band_width)
self._ras = ras
self._decs = decs
self._source_coord = source_coord
self._energies = energies
self._ns_min = 0.0
self._ns_max = 100
self._select_nearby_events()
self.Ntot = len(self._ras)
def _select_nearby_events(self):
source_ra, source_dec = self._source_coord
selected = list(
set(
np.where(
(self._decs >= self._dec_low)
& (self._decs <= self._dec_high)
& (self._ras >= self._ra_low)
& (self._ras <= self._ra_high)
)[0]
)
)
selected_dec_band = np.where(
(self._decs >= self._dec_low) & (self._decs <= self._dec_high)
)[0]
self._selected = selected
self._selected_ras = self._ras[selected]
self._selected_decs = self._decs[selected]
self._selected_energies = self._energies[selected]
self.Nprime = len(selected)
self.N = len(selected_dec_band)
def _signal_likelihood(self, ra, dec, source_coord, energy):
return self._direction_likelihood((ra, dec), source_coord, energy)
def _background_likelihood(self):
return 1.0 / self._band_solid_angle
def _get_neg_log_likelihood_ratio(self, ns):
"""
Calculate the -log(likelihood_ratio) for minimization.
Uses calculation described in:
https://github.com/IceCubeOpenSource/SkyLLH/blob/master/doc/user_manual.pdf
:param ns: Number of source counts.
"""
one_plus_alpha = 1e-10
alpha = one_plus_alpha - 1
log_likelihood_ratio = 0.0
for i in range(self.Nprime):
signal = self._signal_likelihood(
self._selected_ras[i],
self._selected_decs[i],
self._source_coord,
self._energies[i],
)
bg = self._background_likelihood()
chi = (1 / self.N) * (signal / bg - 1)
alpha_i = ns * chi
if (1 + alpha_i) < one_plus_alpha:
alpha_tilde = (alpha_i - alpha) / one_plus_alpha
log_likelihood_ratio += (
np.log1p(alpha) + alpha_tilde - (0.5 * alpha_tilde ** 2)
)
else:
log_likelihood_ratio += np.log1p(alpha_i)
log_likelihood_ratio += (self.N - self.Nprime) * np.log1p(-ns / self.N)
return -log_likelihood_ratio
def __call__(self, ns):
"""
Wrapper function for convenience.
"""
return self._get_neg_log_likelihood_ratio(ns)
def _minimize(self):
"""
Minimize -log(likelihood_ratio) for the source hypothesis,
returning the best fit ns and index.
Uses the iMiuint wrapper.
"""
init_ns = self._ns_min + (self._ns_max - self._ns_min) / 2
m = Minuit(
self._get_neg_log_likelihood_ratio,
ns=init_ns,
error_ns=0.1,
errordef=0.5,
limit_ns=(self._ns_min, self._ns_max),
)
m.migrad()
self._best_fit_ns = m.values["ns"]
def _minimize_grid(self):
"""
Minimize -log(likelihood_ratio) for the source hypothesis,
returning the best fit ns and index.
This simple grid method takes roughly the same time as minuit
and is more accurate...
"""
ns_grid = np.linspace(self._ns_min, self._ns_max, 10)
out = np.zeros(len(ns_grid))
for i, ns in enumerate(ns_grid):
out[i] = self._get_neg_log_likelihood_ratio(ns)
sel = np.where(out == np.min(out))
if len(sel[0]) > 1:
self._best_fit_ns = 0.0
else:
self._best_fit_ns = ns_grid[sel[0]]
def get_test_statistic(self):
"""
Calculate the test statistic for the best fit ns
"""
self._minimize()
neg_log_lik = self._get_neg_log_likelihood_ratio(self._best_fit_ns)
self.likelihood_ratio = np.exp(neg_log_lik)
self.test_statistic = -2 * neg_log_lik
return self.test_statistic
class SimplePointSourceLikelihood:
def __init__(self, direction_likelihood, event_coords, source_coord):
"""
Point source likelihood with only spatial information.
Testing out simple algorithms for evaluating TS.
"""
self._direction_likelihood = direction_likelihood
self._band_width = 3 * direction_likelihood._sigma
self._event_coords = event_coords
self._source_coord = source_coord
self._select_declination_band()
self.Ntot = len(self._event_coords)
def _signal_likelihood(self, event_coord, source_coord):
return self._direction_likelihood(event_coord, source_coord)
def _background_likelihood(self):
return 1 / (np.deg2rad(self._band_width * 2) * 2 * np.pi)
def _select_declination_band(self):
decs = np.array([_[1] for _ in self._event_coords])
_, source_dec = self._source_coord
dec_fac = np.deg2rad(self._band_width)
selected = np.where(
(decs >= source_dec - dec_fac) & (decs <= source_dec + dec_fac)
)[0]
self._selected = selected
self._selected_event_coords = [
(ec[0], ec[1])
for ec in self._event_coords
if (ec[1] >= source_dec - dec_fac) & (ec[1] <= source_dec + dec_fac)
]
self.N = len(selected)
def __call__(self, ns):
log_likelihood = 0.0
for i in range(self.N):
signal = (ns / self.N) * self._signal_likelihood(
self._selected_event_coords[i], self._source_coord
)
bg = (1 - (ns / self.N)) * self._background_likelihood()
log_likelihood += np.log(signal + bg)
return -log_likelihood
class SimpleWithEnergyPointSourceLikelihood:
def __init__(
self, direction_likelihood, energy_likelihood, event_coords, source_coord
):
"""
Simple version of point source likelihood.
Also including the energy dependence.
Testing out simple algorithms for evaluating TS.
"""
self._direction_likelihood = direction_likelihood
self._energy_likelihood = energy_likelihood
self._band_width = 3 * direction_likelihood._sigma
self._event_coords = event_coords
self._source_coord = source_coord
self._select_declination_band()
self.Ntot = len(self._event_coords)
self._bg_index = 3.7
def _signal_likelihood(self, event_coord, source_coord, energy, index):
return self._direction_likelihood(
event_coord, source_coord
) * self._energy_likelihood(energy, index)
def _background_likelihood(self, energy):
return (
1
/ ( | np.deg2rad(self._band_width * 2) | numpy.deg2rad |
import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
from ssd.modeling.multibox_loss import MultiBoxLoss
from ssd.module import L2Norm
from ssd.module.prior_box import PriorBox
from ssd.utils import box_utils
from torch.nn.functional import binary_cross_entropy
class SSD(nn.Module):
def __init__(self, cfg,
vgg: nn.ModuleList,
extras: nn.ModuleList,
classification_headers: nn.ModuleList,
regression_headers: nn.ModuleList,
downsample_layers_index:list):
"""Compose a SSD model using the given components.
"""
super(SSD, self).__init__()
self.cfg = cfg
self.num_classes = cfg.MODEL.NUM_CLASSES
self.vgg = vgg
self.extras = extras
self.classification_headers = classification_headers
self.regression_headers = regression_headers
self.l2_norm = L2Norm(512, scale=20)
self.criterion = MultiBoxLoss(neg_pos_ratio=cfg.MODEL.NEG_POS_RATIO)
self.priors = None
self.downsample_layers_index = downsample_layers_index
# FCN part
self.fcn_module = []
self.conv1 = nn.Conv2d(512, 512, 1)
self.fcn_module.append(self.conv1)
self.bn1 = nn.BatchNorm2d(512)
self.fcn_module.append(self.bn1)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(256, 64, 3, padding=1)
self.fcn_module.append(self.conv2)
self.bn2 = nn.BatchNorm2d(64)
self.fcn_module.append(self.bn2)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(64, 1, 1)
self.sigmoid = nn.Sigmoid()
self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.unpool1_conv2d = nn.Conv2d(1024,512,1)
self.fcn_module.append(self.unpool1_conv2d)
self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.unpool2_conv2d = nn.Conv2d(512,256,1)
self.fcn_module.append(self.unpool2_conv2d)
self.fcn_module = nn.ModuleList(self.fcn_module)
self.reset_parameters()
def reset_parameters(self):
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m,nn.BatchNorm2d):
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
self.vgg.apply(weights_init)
self.extras.apply(weights_init)
self.classification_headers.apply(weights_init)
self.regression_headers.apply(weights_init)
self.fcn_module.apply(weights_init)
def dice_coefficient(self,y_true_cls, y_pred_cls):
'''
dice loss
:param y_true_cls:
:param y_pred_cls:
:return:
'''
eps = 1e-5
# print('y_true_cls:',y_true_cls.size)
# print('y_pred_cls:',y_pred_cls.size)
intersection = torch.sum(y_true_cls * y_pred_cls)
union = torch.sum(y_true_cls) + torch.sum(y_pred_cls) + eps
loss = 1. - (2 * intersection / union)
return loss
def balanced_cross_entropy(self,y_true_cls, y_pred_cls):
# y_true_cls_cp=y_true_cls.clone()
# y_true_cls_cp=y_true_cls.size()
batch_size, w, h = y_true_cls.size()
all_loss = 0.0
for i in range(batch_size):
true_count = torch.sum(y_true_cls[i])
all_count = w * h
beta = 1 - true_count / all_count #beta通常大于0.9
y_pred_cls = y_pred_cls.data.cpu().numpy()
y_pred_cls = | np.clip(y_pred_cls, 0.1, 0.9) | numpy.clip |
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Some math functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
import scipy.linalg as la
from ..hyp_defs import float_cpu
def logdet_pdmat(A):
"""Log determinant of positive definite matrix.
"""
assert(A.shape[0] == A.shape[1])
R=la.cholesky(A)
return 2*np.sum(np.log(np.diag(R)))
def invert_pdmat(A, right_inv=False, return_logdet=False, return_inv=False):
"""Inversion of positive definite matrices.
Returns lambda function f that multiplies the inverse of A times a vector.
Args:
A: Positive definite matrix
right_inv: If False, f(v)=A^{-1}v; if True f(v)=v' A^{-1}
return_logdet: If True, it also returns the log determinant of A.
return_inv: If True, it also returns A^{-1}
Returns:
Lambda function that multiplies A^{-1} times vector.
Cholesky transform of A upper triangular
Log determinant of A
A^{-1}
"""
assert(A.shape[0] == A.shape[1])
R=la.cholesky(A, lower=False)
if right_inv:
fh=lambda x: la.cho_solve((R, False), x.T).T
else:
fh=lambda x: la.cho_solve((R, False), x)
#fh=lambda x: la.solve_triangular(R, la.solve_triangular(R.T, x, lower=True), lower=False)
r = [fh, R]
logdet = None
invA = None
if return_logdet:
logdet=2*np.sum(np.log(np.diag(R)))
r.append(logdet)
if return_inv:
invA=fh(np.eye(A.shape[0]))
r.append(invA)
return r
def invert_trimat(A, lower=False, right_inv=False, return_logdet=False, return_inv=False):
"""Inversion of triangular matrices.
Returns lambda function f that multiplies the inverse of A times a vector.
Args:
A: Triangular matrix.
lower: if True A is lower triangular, else A is upper triangular.
right_inv: If False, f(v)=A^{-1}v; if True f(v)=v' A^{-1}
return_logdet: If True, it also returns the log determinant of A.
return_inv: If True, it also returns A^{-1}
Returns:
Lambda function that multiplies A^{-1} times vector.
Log determinant of A
A^{-1}
"""
if right_inv:
fh=lambda x: la.solve_triangular(A.T, x.T, lower=not(lower)).T
else:
fh=lambda x: la.solve_triangular(A, x, lower=lower)
if return_logdet or return_inv:
r = [fh]
else:
r = fh
if return_logdet:
logdet=np.sum(np.log(np.diag(A)))
r.append(logdet)
if return_inv:
invA=fh(np.eye(A.shape[0]))
r.append(invA)
return r
def softmax(r, axis=-1):
"""
Returns:
y = \exp(r)/\sum(\exp(r))
"""
max_r=np.max(r, axis=axis, keepdims=True)
r=np.exp(r-max_r)
r/=np.sum(r, axis=axis, keepdims=True)
return r
def logsumexp(r, axis=-1):
"""
Returns:
y = \log \sum(\exp(r))
"""
max_r=np.max(r, axis=axis, keepdims=True)
r=np.exp(r-max_r)
return np.log(np.sum(r, axis=axis)+1e-20) + np.squeeze(max_r, axis=axis)
def logsigmoid(x):
"""
Returns:
y = \log(sigmoid(x))
"""
e = np.exp(-x)
f = x < -100
log_p = -np.log(1+np.exp(-x))
log_p[f] = x[f]
return log_p
def neglogsigmoid(x):
"""
Returns:
y = -\log(sigmoid(x))
"""
e = np.exp(-x)
f = x < -100
log_p = np.log(1+np.exp(-x))
log_p[f] = - x[f]
return log_p
def sigmoid(x):
"""
Returns:
y = sigmoid(x)
"""
e = np.exp(-x)
f = x < -100
p = 1/(1+np.exp(-x))
p[f] = 0
return p
def fisher_ratio(mu1, Sigma1, mu2, Sigma2):
"""Computes the Fisher ratio between two classes
from the class means and covariances.
"""
S=Sigma1+Sigma2
L=invert_pdmat(S)[0]
delta=mu1-mu2
return np.inner(delta, L(delta))
def fisher_ratio_with_precs(mu1, Lambda1, mu2, Lambda2):
"""Computes the Fisher ratio between two classes
from the class means precisions.
"""
Sigma1 = invert_pdmat(Lambda1, return_inv=True)[-1]
Sigma2 = invert_pdmat(Lambda2, return_inv=True)[-1]
return fisher_ratio(mu1, Sigma1, mu2, Sigma2)
def symmat2vec(A, lower=False, diag_factor=None):
"""Puts a symmetric matrix into a vector.
Args:
A: Symmetric matrix.
lower: If True, it uses the lower triangular part of the matrix.
If False, it uses the upper triangular part of the matrix.
diag_factor: It multiplies the diagonal of A by diag_factor.
Returns:
Vector with the upper or lower triangular part of A.
"""
if diag_factor is not None:
A = np.copy(A)
A[np.diag_indices(A.shape[0])] *= diag_factor
if lower:
return A[np.tril_indices(A.shape[0])]
return A[np.triu_indices(A.shape[0])]
def vec2symmat(v, lower=False, diag_factor=None):
"""Puts a vector back into a symmetric matrix.
Args:
v: Vector with the upper or lower triangular part of A.
lower: If True, v contains the lower triangular part of the matrix.
If False, v contains the upper triangular part of the matrix.
diag_factor: It multiplies the diagonal of A by diag_factor.
Returns:
Symmetric matrix.
"""
dim=int((-1+np.sqrt(1+8*v.shape[0]))/2)
idx_u=np.triu_indices(dim)
idx_l=np.tril_indices(dim)
A=np.zeros((dim,dim), dtype=float_cpu())
if lower:
A[idx_l]=v
A[idx_u]=A.T[idx_u]
else:
A[idx_u]=v
A[idx_l]=A.T[idx_l]
if diag_factor is not None:
A[np.diag_indices(A.shape[0])] *= diag_factor
return A
def trimat2vec(A, lower=False):
"""Puts a triangular matrix into a vector.
Args:
A: Triangular matrix.
lower: If True, it uses the lower triangular part of the matrix.
If False, it uses the upper triangular part of the matrix.
Returns:
Vector with the upper or lower triangular part of A.
"""
return symmat2vec(A, lower)
def vec2trimat(v, lower=False):
"""Puts a vector back into a triangular matrix.
Args:
v: Vector with the upper or lower triangular part of A.
lower: If True, v contains the lower triangular part of the matrix.
If False, v contains the upper triangular part of the matrix.
Returns:
Triangular matrix.
"""
dim=int((-1+np.sqrt(1+8*v.shape[0]))/2)
A=np.zeros((dim,dim), dtype=float_cpu())
if lower:
A[ | np.tril_indices(dim) | numpy.tril_indices |
"""
This file contains a couple of S/N estimation codes
designed for use during SAMI observing runs.
UPDATED: 08.04.2013, <NAME>
- Edited to comply with new conventions in sami_utils.
- Edited to accept new target table format.
23.08.2012, <NAME>
- Changed name of "sn" function to "sn_re".
- Writing new S/N code based on the secondary star observation.
NOTES: 10.04.2013, <NAME>
- I no longer return SN_all, but sn_Re, the median SN @Re.
- Removed the SN_all array from the sn function.
26.08.2013, <NAME>
- Updated fields for the SAMI target table.
- Also changed all mentions of 'z' to 'zpec'.
- Major bug fixes in case where target not found on target table.
27.08.2013, <NAME>
- Writing surface brightness map function.
For reasons I (JTA) don't remember, this code was never quite finished
or put into action. The intention had been to use S/N measurements to aid
the observers in deciding when a field was finished, but this code is not
mentioned in the observers' instructions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pylab as py
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# use astropy for all astronomy related things.
import astropy.io.fits as pf
import astropy.io.ascii as tab
import sys
from matplotlib.patches import Circle
# Relative imports from sami package
from .. import utils
from .. import samifitting as fitting
def sn_map(rssin):
"""
Plot SNR of all 12 SAMI targets across fraction of Re.
Process:
- Deduce the noise level from the standard star:
+ obtain listed brightness,
+ use existing 2D Gauss function to get SBP,
+ (photometric aperture and aperture correction?),
+ normalise flux,
+ calculate integrated S/N for star,
+ establish noise level.
- Run the SDSS-SB fuction on all targets,
+ Convert brightness to S/N,
+ Plot all 12 targets:
- x-axis: fraction of Re (from target selection table),
- y-axis: S/N,
- horizontal lines @S/N=5, 10.
"""
print("HAY!")
def sn_list(inlist, tablein, l1, l2, ifus='all'):
"""
Wrapper function to provide S/N estimates for >1 file
inlist [ascii] list of files (format?)
tablein [ascii]
"""
#To print only two decimal places in all numpy arrays
np.set_printoptions(precision=2)
files=[]
for line in open(inlist):
cols=line.split(' ')
cols[0]=str.strip(cols[0])
files.append(np.str(cols[0]))
print("I have received", len(files), \
"files for which to calculate and combine S/N measurements.")
# Define the list of IFUs to display
if ifus == 'all':
IFUlist = [1,2,3,4,5,6,7,8,9,10,11,12,13]
else:
IFUlist = [ifus]
print("I will calculate S/N for", len(IFUlist), "IFUs.")
SN_all_sq=np.empty((len(IFUlist), len(files)))
for i in range(len(files)):
insami=files[i]
SN_all=sn_re(insami, tablein, plot=False, ifus=ifus, verbose=False)
SN_all_sq[:,i]=SN_all*SN_all
# Add the squared SN values and square root them
SN_tot=np.sqrt(np.sum(SN_all_sq, axis=1))
print(IFUlist)
print(SN_tot)
def sn_re(insami, tablein, l1, l2, plot=False, ifus='all',
log=True, verbose=True, output=False, seek_centroid=True):
"""
Purpose: Main function, estimates S/N for any or all probes in an RSS file.
Input variables:
insami [fits] Input RSS file.
tablein [ascii] Observations table.
l1, l2 [flt] Wavelength range for S/N estimation.
ifus [str] Probe number, or 'all' for all 13.
log [bool] Logarithimic scaling for plot -- CURRENTLY NOT ENVOKED.
verbose [bool] Toggles diagnostic verbosity.
Process:
1) Interpret input.
[Set up plot]
2) Read target table (new format for SAMI survey),
[Commence all-IFU loop, read data]
3) Identify wavelength range over which to estimate SNR,
4) Calculate SNR for all cores in the RSS file.
5) Locate galaxy centre as peak SNR core.
6) Identify cores intercepted by Re (listed).
7) Get SNR @Re as median of collapsed wavelength region.
[End all-IFU loop]
"""
# --------------------
# (1) Interpret input
# --------------------
if ifus == 'all':
IFUlist = [1,2,3,4,5,6,7,8,9,10,11,12,13]
else:
IFUlist = ifu_num = [int(ifus)]
n_IFU = len(IFUlist)
if verbose:
print('')
print('--------------------------------')
print('Running sami.observing.sn.sn_re.')
print('--------------------------------')
print('')
if n_IFU == 1: print('Processing', n_IFU, 'IFU. Plotting is', end=' ')
if n_IFU > 1: print('Processing', n_IFU, 'IFUs. Plotting is', end=' ')
if not plot: print('OFF.')
if plot: print('ON.')
print('')
# --------------------
# Set up plot process
# --------------------
# Define number of cores, core diameter (in arcsec).
# -- is this stored someplace in sami.utils/generic?
n_core = 61
r_core = 1.6
# Create the figure
if plot:
# Get Field RA, DEC
hdulist = pf.open(insami)
primary_header = hdulist['PRIMARY'].header
field_dec = primary_header['MEANDEC']
# To create the even grid to display the cubes on
# (accurate to 1/10th core diameter)
dx = 4.44e-5 /np.cos(np.pi *field_dec /180.)
dy = 4.44e-5
fig = py.figure()
# Number of rows and columns needed in the final display box
# This is a bit of a fudge...
if n_IFU==1:
im_n_row = 1
im_n_col = 1
elif n_IFU==2:
im_n_row = 1
im_n_col = 2
elif n_IFU==3:
im_n_row = 1
im_n_col = 3
elif n_IFU==4:
im_n_row = 2
im_n_col = 2
elif n_IFU>3 and n_IFU<=6:
im_n_row = 2
im_n_col = 3
elif n_IFU>6 and n_IFU<=9:
im_n_row = 3
im_n_col = 3
elif n_IFU>9 and n_IFU<=12:
im_n_row = 3
im_n_col = 4
elif n_IFU>12:
im_n_row = 4
im_n_col = 4
# ISK: trying to improve the rows and columns a bit:
# def isodd(num): return num & 1 and True or False
# if n <= 3:
# r = 1
# c = n
# elif n > 6:
# r = 3
# c = 3
# ----------------------
# (2) Read target table
# ----------------------
tabname = ['name', 'ra', 'dec', 'r_petro', 'r_auto', 'z_tonry', 'zspec',
'M_r', 'Re', '<mu_Re>', 'mu(Re)', 'mu(2Re)', 'ellip', 'PA', 'M*',
'g-i', 'A_g', 'CATID', 'SURV_SAMI', 'PRI_SAMI', 'BAD_CLASS']
target_table = tab.read(tablein, names=tabname, data_start=0)
CATID = target_table['CATID'].tolist()
# Start a little counter to keep track
# -- a fudge for the way the plot loop is set up...
counter = 0
# --------------------------
# Commence the all-IFU loop
# --------------------------
for ifu_num in IFUlist:
counter = counter + 1
# Read single IFU
myIFU = utils.IFU(insami, ifu_num, flag_name=False)
# And find the row index for this SAMI target.
try:
this_galaxy = CATID.index(int(myIFU.name))
no_such_galaxy = False
except:
this_galaxy = []
no_such_galaxy = True
pass
"""
There are other ways to do this with a numpy array as input.
Lists are far better at this, so have made a CATID list.
this_galaxy = np.where(target_table['CATID'] == int(myIFU.name))
this_galaxy = np.where(CATID == int(myIFU.name))
this_galaxy = [CATID == int(myIFU.name)]
"""
# ----------------------------
# (3) Define wavelength range
# ----------------------------
if no_such_galaxy:
z_target = 0.0
z_string = '0.0'
# see below for explanation of this.
idx1 = l1
idx2 = l2
print(('-- IFU #' + str(ifu_num)))
print(" This galaxy was not found in the Target Table. ")
else:
z_target = target_table['zspec'][this_galaxy]
z_string = str(z_target)
l_range = myIFU.lambda_range
l_rest = l_range/(1+z_target)
# identify array elements closest to l1, l2 **in rest frame**
idx1 = (np.abs(l_rest - l1)).argmin()
idx2 = (np.abs(l_rest - l2)).argmin()
if verbose:
print('-------------------------------------------------------')
print((' IFU #' + str(ifu_num)))
print('-------------------------------------------------------')
print((' Redshift: ' + z_string))
print((' Spectral range: ' +
str(np.around([l_rest[idx1], l_rest[idx2]]))))
print((' Observed at: ' +
str(np.around([l_range[idx1], l_range[idx2]]))))
print('')
# -------------------------
# (4) Get SNR of all cores
# -------------------------
sn_spec = myIFU.data/np.sqrt(myIFU.var)
# Median SN over lambda range (per Angstrom)
sn = np.nanmedian(sn_spec[:, idx1:idx2], axis=1) * (1./myIFU.cdelt1)
# ----------------------------------
# (5) Find galaxy centre (peak SNR)
# ----------------------------------
# Initialise a couple of arrays for this loop
core_distance = np.zeros(n_core)
good_core = np.zeros(n_core)
centroid_ra = 0.
centroid_dec = 0.
# Get target Re from table (i.e., match entry by name)
if no_such_galaxy:
print(" No Re listed, calculating SNR at centroid instead.")
re_target = 0.
else:
re_target = target_table['Re'][this_galaxy]
# Get either centroid, or table RA, DEC
if seek_centroid:
if no_such_galaxy:
centroid = np.where(myIFU.n ==1)
else:
centroid = np.where(sn == np.nanmax(sn))
centroid_ra = myIFU.xpos[centroid]
centroid_dec = myIFU.ypos[centroid]
if not seek_centroid:
if no_such_galaxy:
centroid = np.where(myIFU.n ==1)
else:
centroid_ra = target_table['ra'][this_galaxy]
centroid_dec = target_table['dec'][this_galaxy]
test_distance = 3600.* np.sqrt(
(myIFU.xpos - centroid_ra)**2 +
(myIFU.ypos - centroid_dec)**2 )
centroid = np.abs(test_distance - 0).argmin()
if verbose:
print(' S/N @Centroid =', np.round(sn[centroid]), '[/Angstrom]')
print('')
# ----------------------------------------
# (6) Identify cores at approximately Re
# ----------------------------------------
# Check that there is an Re listed, some times there isn't.
if no_such_galaxy:
sn_Re = 0.
else:
core_distance = 3600.* np.sqrt(
(myIFU.xpos - centroid_ra)**2 +
(myIFU.ypos - centroid_dec)**2 )
good_core[(core_distance > re_target - 0.5*r_core)
& (core_distance < re_target + 0.5*r_core)] = True
# Get median S/N of cores @Re:
if 1 in good_core:
sn_Re = np.nanmedian(sn[good_core == True])
sn_min = min(sn[good_core == True])
sn_max = max(sn[good_core == True])
if verbose:
if not 1 in good_core:
sn_str = str(np.round(np.nanmedian(sn)))
print("** Could not match Re")
print(('=> Median overall S/N = '+sn_str))
print('')
else:
print('=> [Min, Max, Median] S/N @Re = [', end=' ')
print('%0.2f' % min(sn[good_core == True]), ',', end=' ')
print('%0.2f' % max(sn[good_core == True]), ',', end=' ')
print('%0.2f' % sn_Re, '] [/Angstrom]')
print('')
# ----------
# DRAW PLOT
# ----------
if plot:
# Set image size to fit the bundle.
size_im = 100
N_im = np.arange(size_im)
# Create a linear grid, centred at Fibre #1.
x_ctr = myIFU.xpos[np.sum( | np.where(myIFU.n == 1) | numpy.where |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import math
import numpy as np
import os
import glob
import sys
import gaussianFit as gF
import RL
import Accelerated as ac
import projections as proj
import GMM_sample as GMMs
import scipy
from scipy.stats import entropy
class OMDPI:
def __init__(self, RL):
self.RL = RL
timeStep = self.RL.n*self.RL.times
dimension = self.RL.n_dims
rfs = self.RL.n_rfs
reps = self.RL.reps
timeIndex = range(timeStep)
self.G = np.zeros((reps,dimension,timeStep,rfs))
self.psi = np.zeros((reps,timeStep,rfs))
def init(self):
self.f = open(self.RL.path+'/'+'Entropy-'+self.RL.MethodMode+'.csv', 'a')
timeStep = self.RL.n*self.RL.times
# muはrepsに依存しないため,1とした
self.mu0=np.zeros([1, self.RL.n_dims, timeStep, self.RL.n_rfs])
self.mu0[0,:,0,:]=self.RL.meanIni+np.zeros([self.RL.n_dims, self.RL.n_rfs])
#self.mu0[0,:,0,:]=np.array([[.55, .25, .2]]).T
#TODO 念のためtimestep全体にコピー
for t in range(timeStep):
self.mu0[0,:,t,:]=self.mu0[0,:,0,:]
self.distroIni=[\
[self.RL.lambdakIni, self.mu0],\
[1.0-self.RL.lambdakIni, self.mu0]]
# AMDクラスの初期化
self.MDmethod=self.__amd_init(self.RL.MethodMode)
# パラメータの初期化
self._tmp = [np.random.rand() for _ in range(self.RL.reps)]
self.x0 = self.ztilde0 = | np.array(self._tmp) | numpy.array |
import random
random.seed(666)
import dynet as dy
import numpy as np
np.random.seed(666)
import heapq
from utils.helper import *
class LSTMDecoder(object):
def __init__(self, model, x_dims, h_dims, ccg_dims, LSTMBuilder, n_tag):
pc = model.add_subcollection()
input_dim = x_dims + ccg_dims
#decoder lstm
self.f = LSTMBuilder(1, input_dim, h_dims, pc)
self.W = pc.add_parameters((n_tag, h_dims), init='normal', mean=0, std=1)
# lookup table
self.ccg_lookup = pc.lookup_parameters_from_numpy(
| np.random.randn(n_tag, ccg_dims) | numpy.random.randn |
import numpy as np
import pandas as pd
import glob
def ecdf(data):
"""
Computes the empirical cumulative distribution function for a collection of provided data.
Parameters
----------
data : 1d-array, Pandas Series, or list
One-dimensional collection of data for which the ECDF will
be computed
Returns
-------
x, y : 1d-arrays
The sorted x data and the computed ECDF
"""
return | np.sort(data) | numpy.sort |
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import zscore
from sklearn.decomposition import PCA
import pandas as pd
from itertools import combinations
# Load helper function(s) for interacting with CTF dataset
from ctf_dataset.load import create_wrapped_dataset
base_dir = '/mnt/bucket/labs/hasson/snastase/social-ctf'
data_dir = join(base_dir, 'data')
# Create wrapped CTF dataset
wrap_f = create_wrapped_dataset(data_dir, output_dataset_name="virtual.hdf5")
n_lstms = 512
n_repeats = 8
n_players = 4
map_id = 0
# Get matchups with all same agents (e.g. AA vs AA)
agent_ids = wrap_f['map/matchup/repeat/player/agent_id'][0, :, :, :, 0]
matchup_ids = np.all(agent_ids[:, 0, :] ==
agent_ids[:, 0, 0][:, np.newaxis], axis=1)
n_matchups = np.sum(matchup_ids) # 0, 34, 49, 54
# Extract LSTMs for one map and matchup
lstms_matched = np.tanh(wrap_f['map/matchup/repeat/player/time/lstm'][
map_id, matchup_ids, ...].astype(np.float32))
print("Loaded LSTMs for within-population matchups")
# Loop through matchups, repeats, and players to compute PCA
k = n_lstms
lstm_pca = {}
for m in np.arange(n_matchups):
lstm_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pca[m][r] = {}
for p in np.arange(n_players):
lstm_pca[m][r][p] = {}
pca = PCA(n_components=k)
transformed = pca.fit_transform(
#zscore(lstms_matched[m, r, p], axis=0))
#np.tanh(lstms_matched[m, r, p]))
zscore(lstms_matched[m, r, p], axis=0))
lstm_pca[m][r][p]['transformed'] = transformed
lstm_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, player {p}")
np.save('results/pca_lstm_tanh-z_results.npy', lstm_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pca_long = {'population': [], 'repeat': [], 'player': [],
'variance explained': [], 'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for k, v in enumerate(lstm_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pca_long['population'].append(pops[m])
lstm_pca_long['repeat'].append(r)
lstm_pca_long['player'].append(p)
lstm_pca_long['variance explained'].append(v)
lstm_pca_long['dimension'].append(k + 1)
lstm_pca_long = pd.DataFrame(lstm_pca_long)
max_k = 30
lstm_pca_trunc = lstm_pca_long[lstm_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pca_trunc, x='dimension',
y='variance explained', hue='repeat',
col='population', col_wrap=2,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_players, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, ..., i]))
min = int(np.amin(percents_vaf[m, ..., i]))
max = int(np.amax(percents_vaf[m, ..., i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack pairs of players and compute joint PCA
pairs = list(combinations(np.arange(n_players), 2))
n_pairs = len(pairs)
k = n_lstms * 2
coop_ids, comp_ids = [0, 5], [1, 2, 3, 4]
lstm_pair_pca = {}
for m in np.arange(n_matchups):
lstm_pair_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pair_pca[m][r] = {}
for p, pair in enumerate(pairs):
lstm_pair_pca[m][r][p] = {}
stack_lstm = np.hstack((lstms_matched[m, r, pair[0]],
lstms_matched[m, r, pair[1]]))
pca = PCA(n_components=k)
transformed = pca.fit_transform(
zscore(stack_lstm, axis=0))
lstm_pair_pca[m][r][p]['transformed'] = transformed
lstm_pair_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, pair {pair}")
np.save('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pair_pca_long = {'population': [], 'repeat': [], 'pair': [],
'variance explained': [], 'dimension': [],
'type': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
pair_type = {c:('cooperative' if c in coop_ids else 'competitive')
for c in np.arange(n_pairs)}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for k, v in enumerate(lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pair_pca_long['population'].append(pops[m])
lstm_pair_pca_long['repeat'].append(r)
lstm_pair_pca_long['pair'].append(p)
lstm_pair_pca_long['variance explained'].append(v)
lstm_pair_pca_long['dimension'].append(k + 1)
lstm_pair_pca_long['type'].append(pair_type[p])
lstm_pair_pca_long = pd.DataFrame(lstm_pair_pca_long)
max_k = 10
lstm_pair_pca_trunc = lstm_pair_pca_long[
lstm_pair_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pair_pca_trunc, x='dimension',
y='variance explained', hue='type',
col='population', col_wrap=2, linewidth=3,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_pairs, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for type, c in zip(['cooperative', 'competitive'],
[coop_ids, comp_ids]):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, :, c, i]))
min = int(np.amin(percents_vaf[m, :, c, i]))
max = int(np.amax(percents_vaf[m, :, c, i]))
print(f"Population {pops[m]} {type}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack across all repeats and run PCA
k = n_lstms
lstm_stack_pca = {}
for m in np.arange(n_matchups):
lstm_stack_pca[m] = {}
stack_lstm = []
for r in np.arange(n_repeats):
for p in np.arange(n_players):
stack_lstm.append(zscore(lstms_matched[m, r, p],
axis=0))
stack_lstm = np.vstack(stack_lstm)
pca = PCA(n_components=k)
transformed = pca.fit_transform(stack_lstm)
lstm_stack_pca[m]['transformed'] = transformed
lstm_stack_pca[m]['pca'] = pca
print(f"Finished running stacked PCA for matchup {m}")
np.save('results/stack-pca_lstm_tanh-z_results.npy', lstm_stack_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_stack_pca_long = {'population': [], 'variance explained': [],
'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for k, v in enumerate(lstm_stack_pca[m][
'pca'].explained_variance_ratio_):
lstm_stack_pca_long['population'].append(pops[m])
lstm_stack_pca_long['variance explained'].append(v)
lstm_stack_pca_long['dimension'].append(k + 1)
lstm_stack_pca_long = pd.DataFrame(lstm_stack_pca_long)
max_k = 8
lstm_stack_pca_trunc = lstm_stack_pca_long[
lstm_stack_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.lineplot(data=lstm_stack_pca_trunc, x='dimension',
y='variance explained', hue='population',
linewidth=3)
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, len(percents)))
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_stack_pca[m][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance")
print('\n')
# Create reduced-dimension version of data (e.g. k = 100)
k = 100
lstm_pca_reduce = []
for m in np.arange(n_matchups):
stack_lstm = []
for r in np.arange(n_repeats):
for p in np.arange(n_players):
stack_lstm.append(zscore(lstms_matched[m, r, p],
axis=0))
stack_lstm = | np.vstack(stack_lstm) | numpy.vstack |
import numpy as np
A = np.matrix([[1,0,0],
[0,1,0],
[0,0,1]]) #lattice
print("Direct Lattice:\n{}".format(A))
B = 2*np.pi*(np.linalg.inv(A)).H #recip lattice
print("Recip Lattice:\n{}".format(B))
rc = 0.5*np.min(np.sqrt(np.sum(np.square(A),1)))
lrdimcut = 40
kc = lrdimcut / rc
print("lrdim: {}, rc: {}, kc: {}".format(lrdimcut,rc,kc))
#electronic structure, p. 85
mmax = np.floor(np.sqrt(np.sum(np.square(A),1)) * (kc/(2*np.pi))) + 1
mmax = np.array(mmax,dtype=int).reshape((3,)) #matrix to array
kpts = [] #translations of recip lattice
kmag = [] #magnitude
for i in range(-mmax[0], mmax[0] + 1):
for j in range(-mmax[1], mmax[1] + 1):
for k in range(-mmax[2], mmax[2] + 1):
if (i == 0) and (j==0) and (k==0):
continue
kvec = np.matrix([i,j,k])
kcart = np.array(np.dot(kvec,B)).reshape((3,))
if np.linalg.norm(kcart) > kc:
continue
kpts.append(np.array(kvec).reshape((3,)))
kmag.append(np.linalg.norm(kcart))
kpts = np.array(kpts)
kmag = np.array(kmag)
idx = np.argsort(kmag)
kpts = kpts[idx]
kmag = kmag[idx]
# 1-exp(-k^2) and k is unit k
sks = []
with open('simple_Sk.dat','w') as f:
f.write('# kx ky kz Sk err\n')
for i in range(len(kpts)):
kcart = np.array(np.dot(kpts[i],B)).reshape((3,))
kunit = kcart / (2*np.pi)
k = | np.linalg.norm(kunit) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
"""
This module contains the feature calculators that take time series as input and calculate the values of the feature.
There are three types of features:
1. aggregate features without parameter
2. aggregate features with parameter
3. apply features with parameters
While type 1 and 2 are designed to be used with pandas aggregate, they will only return one singular feature.
To not unnecessarily redo auxiliary calculations, in type 3 a group of features is calculated at the same time. They
can be used with pandas apply.
"""
from __future__ import absolute_import, division
from builtins import range
import itertools
import numpy as np
from numpy.linalg import LinAlgError
import numbers
from functools import wraps
import pandas as pd
from scipy.signal import welch, cwt, ricker, find_peaks_cwt
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.stattools import adfuller
from functools import reduce
# todo: make sure '_' works in parameter names in all cases, add a warning if not
def _get_length_sequences_where(x):
"""
This method calculates the length of all sub-sequences where the array x is either True or 1.
Examples
--------
>>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
:param x: An iterable containing only 1, True, 0 and False values
:return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues
contained, the list [0] is returned.
"""
if len(x) == 0:
return [0]
else:
res = [len(list(group)) for value, group in itertools.groupby(x) if value == 1]
return res if len(res) > 0 else [0]
def not_apply_to_raw_numbers(func):
"""
This decorator makes sure that the function func is only called on objects that are not numbers.Number
:param func: the method that should only be executed on objects which are not a numbers.Number
:return: the decorated version of func which returns 0 if the first argument x is a numbers.Number. For every
other x the output of func is returned
"""
@wraps(func)
def func_on_nonNumberObject(x, *arg, **args):
if isinstance(x, numbers.Number):
return 0
else:
return func(x, *arg, **args)
return func_on_nonNumberObject
def set_property(key, value):
"""
This method returns a decorator that sets the property key of the function to value
"""
def decorate_func(func):
setattr(func, key, value)
if func.__doc__ and key == "fctype":
func.__doc__ = func.__doc__ + "\n\n *This function is of type: " + value + "*\n"
return func
return decorate_func
@set_property("fctype", "aggregate")
@not_apply_to_raw_numbers
def variance_larger_than_standard_deviation(x):
"""
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x
being larger than 1
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: bool
"""
return np.var(x) > np.std(x)
@set_property("fctype", "aggregate_with_parameters")
@not_apply_to_raw_numbers
def large_standard_deviation(x, r):
"""
Boolean variable denoting if the standard dev of x is higher
than 'r' times the range = difference between max and min of x.
Hence it checks if
.. math::
std(x) > r * (max(X)-min(X))
According to a rule of the thumb, the standard deviation should be a forth of the range of the values.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param r: the percentage of the range to compare with
:type r: float
:return: the value of this feature
:return type: bool
"""
x = np.asarray(x)
return np.std(x) > (r * (max(x) - min(x)))
@set_property("fctype", "apply")
@not_apply_to_raw_numbers
def symmetry_looking(x, c, param):
"""
Boolean variable denoting if the distribution of x *looks symmetric*. This is the case if
.. math::
| mean(X)-median(X)| < r * (max(X)-min(X))
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param r: the percentage of the range to compare with
:type r: float
:return: the value of this feature
:return type: bool
"""
x = np.asarray(x)
mean_median_difference = abs(np.mean(x) - np.median(x))
max_min_difference = max(x) - min(x)
return pd.Series({"{}__symmetry_looking__r_{}".format(c, r["r"]):
mean_median_difference < (r["r"] * max_min_difference) for r in param})
@set_property("fctype", "aggregate")
@not_apply_to_raw_numbers
def has_duplicate_max(x):
"""
Checks if the maximum value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: bool
"""
return sum(np.asarray(x) == max(x)) >= 2
@set_property("fctype", "aggregate")
@not_apply_to_raw_numbers
def has_duplicate_min(x):
"""
Checks if the minimal value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: bool
"""
return sum(np.asarray(x) == min(x)) >= 2
@set_property("fctype", "aggregate")
@not_apply_to_raw_numbers
def has_duplicate(x):
"""
Checks if any value in x occurs more than once
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: bool
"""
return len(x) != len(set(x))
@set_property("fctype", "aggregate")
@set_property("minimal", True)
def sum_values(x):
"""
Calculates the sum over the time series values
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: bool
"""
return np.sum(x)
@set_property("fctype", "aggregate_with_parameters")
@not_apply_to_raw_numbers
def large_number_of_peaks(x, n):
"""
Checks if the number of peaks is higher than n.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param n: the number of peaks to compare
:type n: int
:return: the value of this feature
:return type: bool
"""
return number_peaks(x, n=n) > 5
@set_property("fctype", "aggregate")
@not_apply_to_raw_numbers
def mean_autocorrelation(x):
"""
Calculates the average autocorrelation (Compare to http://en.wikipedia.org/wiki/Autocorrelation#Estimation),
taken over different all possible lags (1 to length of x)
.. math::
\\frac{1}{n} \\sum_{l=1,\ldots, n} \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)
where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its
mean.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: float
"""
var = np.var(x)
n = len(x)
if abs(var) < 10**-10 or n == 1:
return 0
else:
r = np.correlate(x - np.mean(x), x - | np.mean(x) | numpy.mean |
import numpy as np
from utils.octree_partition import partition_octree
import time
from glob import glob
import tensorflow as tf
import multiprocessing
from tqdm import tqdm
from pyntcloud import PyntCloud
import pandas as pd
#VOXEL-OCTREE
def timing(f):
def wrap(*args, **kwargs):
time1 = time.time()
ret = f(*args, **kwargs)
time2 = time.time()
print('{:s} function took {:.3f} ms'.format(f.__name__, (time2 - time1) * 1000.0))
return ret
return wrap
def get_bin_stream_blocks(path_to_ply, pc_level, departition_level):
# co 10 level --> binstr of 10 level, blocks size =1
level = int(departition_level)
pc = PyntCloud.from_file(path_to_ply)
points = pc.points.values
no_oc_voxels = len(points)
box = int(2 ** pc_level)
blocks2, binstr2 = timing(partition_octree)(points, [0, 0, 0], [box, box, box], level)
return no_oc_voxels, blocks2, binstr2
def voxel_block_2_octree(box,oct_seq):
box_size=box.shape[0]
child_bbox=int(box_size/2)
if(box_size>2):
for d in range(2):
for h in range(2):
for w in range(2):
child_box=box[d * child_bbox:(d + 1) * child_bbox, h * child_bbox:(h + 1) * child_bbox, w * child_bbox:(w + 1) * child_bbox]
if(np.sum(child_box)!=0):
oct_seq.append(1)
voxel_block_2_octree(child_box, oct_seq)
else:
oct_seq.append(0)
else:
curr_octant=[int(x) for x in box.flatten()]
oct_seq+=curr_octant
return oct_seq
#FOR VOXEL
def input_fn_super_res(points, batch_size, dense_tensor_shape32, data_format, repeat=True, shuffle=True, prefetch_size=1):
# Create input data pipeline.
def gen():
iterator=iter(points)
done=False
while not done:
try:
p = next(iterator)
except StopIteration:
done=True
else:
ds = np.abs(np.round((p - 0.01) / 2))
ds = np.unique(ds,axis=0)
yield (ds, p)
p_max = np.array([64, 64, 64])
dense_tensor_shape64 = np.concatenate([p_max, [1]]).astype('int64')
dense_tensor_shape=[dense_tensor_shape32,dense_tensor_shape64]
dataset = tf.data.Dataset.from_generator(generator=gen, output_types=(tf.int64,tf.int64),output_shapes= (tf.TensorShape([None, 3]),tf.TensorShape([None, 3])))
if shuffle:
dataset = dataset.shuffle(len(points))
if repeat:
dataset = dataset.repeat()
dataset = dataset.map(lambda x,y: pc_to_tf(x,y
, dense_tensor_shape, data_format))
dataset = dataset.map(lambda x,y: process_x(x,y, dense_tensor_shape))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch_size)
return dataset
# Main launcher
def input_fn_voxel_dnn(points, batch_size, dense_tensor_shape, data_format, repeat=True, shuffle=True, prefetch_size=1):
print('point shape: ', points.shape)
# Create input data pipeline.
dataset = tf.data.Dataset.from_generator(lambda: iter(points), tf.int64, tf.TensorShape([None, 3]))
if shuffle:
dataset = dataset.shuffle(len(points))
if repeat:
dataset = dataset.repeat()
dataset = dataset.map(lambda x: pc_to_tf_voxel_dnn(x, dense_tensor_shape, data_format))
dataset = dataset.map(lambda x: process_x_voxel_dnn(x, dense_tensor_shape))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch_size)
return dataset
def df_to_pc(df):
points = df[['x', 'y', 'z']].values
return points
def pa_to_df(points):
cols = ['x', 'y', 'z', 'red', 'green', 'blue']
types = (['float32'] * 3) + (['uint8'] * 3)
d = {}
assert 3 <= points.shape[1] <= 6
for i in range(points.shape[1]):
col = cols[i]
dtype = types[i]
d[col] = points[:, i].astype(dtype)
df = pd.DataFrame(data=d)
return df
def pc_to_df(pc):
points = pc.points
return pa_to_df(points)
def pc_to_tf(x,y, dense_tensor_shape, data_format):
assert data_format in ['channels_last', 'channels_first']
# Add one channel (channels_last convention)
if data_format == 'channels_last':
x = tf.pad(x, [[0, 0], [0, 1]])
else:
x = tf.pad(x, [[0, 0], [1, 0]])
st0 = tf.sparse.SparseTensor(x, tf.ones_like(x[:, 0]), dense_tensor_shape[0])
# Add one channel (channels_last convention)
if data_format == 'channels_last':
y = tf.pad(y, [[0, 0], [0, 1]])
else:
y = tf.pad(y, [[0, 0], [1, 0]])
st1 = tf.sparse.SparseTensor(y, tf.ones_like(y[:, 0]), dense_tensor_shape[1])
return (st0,st1)
def process_x(x,y, dense_tensor_shape):
x = tf.sparse.to_dense(x, default_value=0, validate_indices=False)
x.set_shape(dense_tensor_shape[0])
x = tf.cast(x, tf.float32)
y = tf.sparse.to_dense(y, default_value=0, validate_indices=False)
y.set_shape(dense_tensor_shape[1])
y = tf.cast(y, tf.float32)
return (x,y)
def pc_to_tf_voxel_dnn(points, dense_tensor_shape, data_format):
x = points
assert data_format in ['channels_last', 'channels_first']
# Add one channel (channels_last convention)
if data_format == 'channels_last':
x = tf.pad(x, [[0, 0], [0, 1]])
else:
x = tf.pad(x, [[0, 0], [1, 0]])
st = tf.sparse.SparseTensor(x, tf.ones_like(x[:, 0]), dense_tensor_shape)
# print('st in pc to tf: ',st)
return st
def process_x_voxel_dnn(x, dense_tensor_shape):
x = tf.sparse.to_dense(x, default_value=0, validate_indices=False)
x.set_shape(dense_tensor_shape)
x = tf.cast(x, tf.float32)
# print('x in process x: ',x)
return x
def get_shape_data(resolution, data_format):
assert data_format in ['channels_last', 'channels_first']
bbox_min = 0
bbox_max = resolution
p_max = np.array([bbox_max, bbox_max, bbox_max])
p_min = np.array([bbox_min, bbox_min, bbox_min])
if data_format == 'channels_last':
dense_tensor_shape = np.concatenate([p_max, [1]]).astype('int64')
else:
dense_tensor_shape = np.concatenate([[1], p_max]).astype('int64')
return p_min, p_max, dense_tensor_shape
def get_files(input_glob):
return np.array(glob(input_glob, recursive=True))
def load_pc(path):
try:
pc = PyntCloud.from_file(path)
points=pc.points
ret = df_to_pc(points)
return ret
except:
return
def load_points(files, batch_size=32):
files_len = len(files)
with multiprocessing.Pool() as p:
# logger.info('Loading PCs into memory (parallel reading)')
points = np.array(list(tqdm(p.imap(load_pc, files, batch_size), total=files_len)))
return points
# blocks to occupancy maps, only for running the first time to explore pc characteristic
def pc_2_block_oc3_test(blocks, bbox_max):
no_blocks = len(blocks)
blocks_oc = np.zeros((no_blocks, bbox_max, bbox_max, bbox_max, 1), dtype=np.float32)
coor_min_max=np.zeros((no_blocks,6),dtype=np.uint32)
lower_level_ocv=[]
for i, block in enumerate(blocks):
block = block[:, 0:3]
# getting infor of block
coor_min_max[i,:3] = np.min(block,axis=0)
coor_min_max[i, 3:] = np.max(block,axis=0)
bl_points = (block - 0.01) / 2
bl_points = | np.abs(bl_points) | numpy.abs |
import numpy as np
from ..metrics import rpmse, bias, accuracy
from ..tmath import norm, norm_der
from .activations import ( relu, elu, gelu, gelu_der, gelu_approx,
gelu_speedy, gelu_speedy_der,
sigmoid, sigmoid_der,
softmax, softmax_der, relu_der, cross_entropy,
cross_entropy_der, selu, selu_der, elu_der,
softmax_cross_entropy_der,
softmax_cross_entropy_der_fullmath,
sigmoid_cross_entropy_binary_der,
cross_entropy_binary, cross_entropy_binary_der,
linear, mse, mse_linear_der)
from .initializers import (he_uniform, he_normal, xavier_uniform,
xavier_normal, lecun_uniform, lecun_normal,
random
)
from .optimizers import gradient_descent, adam
class NeuralNetwork(object):
def __init__(self,
model,
eval_size=10,
batch_size=64,
num_iterations=500,
optimizer='adam',
optimizer_args={'learning_rate': 0.001,
'beta1': 0.9,
'beta2': 0.999,
'eps': 1e-8},
m_scale=1,
bn_tol=1e-9,
bn_momentum=0,
scorer='accuracy',
shuffle=False,
print_cost=True,
random_state=42):
"""
The constructor for the NeuralNetwork class.
Parameters
----------
model : dict
A dictionary containing the model components, layers, and other
specifications. The dictionary should have the following general
structure:
{
'hidden0': {'depth': 10,
'activation': 'relu',
'derivative': 'relu_der',
'activation_args': {},
'initializer': 'he_uniform',
'dropout_keep_prob': 1,
'lambda': {'Weight': 0,
'activity': 0,
'bias': 0
},
'lp_norm': {'Weight': 2,
'activity': 2,
'bias': 2
},
'use_batch_norm': False
},
'output': {'activation': 'softmax',
'activation_args': {},
'cost': 'cross_entropy',
'cost_args': {},
'derivative': 'softmax_cross_entropy_der',
'derivative_args': {},
'initializer': 'xavier_normal',
'evaluation_metric': 'accuracy',
'lambda': {'Weight': 0,
'activity': 0,
'bias': 0
},
'lp_norm': {'Weight': 2,
'activity': 2,
'bias': 2
},
'use_batch_norm': False
}
}
Each layer should have the components defined above, however, not
every component needs to be used (for example, setting
dropout_keep_prob = 1 disables dropout). There can be as many
hidden layers as desired (including none). Simply copy the
'hidden1' sub-dictionary before the output layer to add a new
hidden layer. However, the network must have an output layer
defined. The key names for the layers can be anything, but the
output layer must be positioned last.
A description of each layer key is defined below:
activation (str or function): The activation function to be
used. If custom function, it
will pass the affine
transformation of the current
layer as the first input to the
function.
activation_args (dict) : An optional dictionary for passing
additional arguments to the activation
or derivative function. If there are
none to pass, use an empty dictionary.
For hidden layers, the derivative and
activation arguments should be the
same, so they share this dictionary.
cost (str or function): The cost function to be
used. If custom function, it
will pass the true Y values and
the predicted Y values as the first
two inputs to the function.
cost_args (dict) : An optional dictionary for passing
additional arguments to the
cost function. If there are
none to pass, use an empty dictionary.
Only applies ot the output layer.
depth (int): The number of hidden nodes in the layer
derivative (str or function): The derivative of the combined
cost and output layer activation
function to be
used. If custom function, it
will pass the true Y values,
the predicted Y values, and the
non-activated output layer values
as the first inputs to the
function.
derivative_args (dict) : An optional dictionary for passing
additional arguments to the derivative
function. If there are none to pass,
use an empty dictionary. This only
applies to the output layer.
dropout_keep_prob (float) : The proportion of nodes to keep at
the respective layer. Between 0
and 1. If dropping 10% of the
nodes, the keep prob is 0.9
evaluation_metric (str or function) : An additional evaluation
metric to be used in
training. This is only
used for printing an
additional output along
with cost at each epoch
or specified iteration to
track the training
progress
initializer (str or function) : The function to be used in
initializing the layer weights
and biases. If custom, it must
accept two arguments,
'incoming' and 'outgoing',
which represent how many inputs
are recieved from the previous
layer, and how many outputs
will be calculated at the
current layer.
lambda (dict) : A dictionary containing the regularization
penalties for each type of regularization.
The options are:
Weight (float) : The kernel or weight
regularizer
(recommended for use)
activity (float) : A regularizer placed on
the activation function
output (experimental in
this code, not
recommended for use)
bias (float) : A regularizer for the bias
(not recommended for use for
theoretical reasons, but
should be correct to use)
A value of zero for any of the lambdas will
that regularization type for that layers.
lp_norm (dict) : A dictionary containing the regularization
norm funcitons for each type of
regularization.
The options are:
Weight (int) : The lp-norm for the weight
or kernel regularizer
activity (int) : The lp-norm for the
activity regularizer
bias (int) : The lp-norm for the bias
regularizer
use_batch_norm (bool) : If true, perform batch normalization
on the current layer. For this
implementation, the batch norm layer
is placed before the activation
function and before dropout (if used
together)
eval_size : int, optional
The number of model evaluations to perform before printing
an output. It is recommended that this number be
`int(n/batch_size) + sum([n % batch_size != 0])` where n is the
number of observations
batch_size : int, optional
The number of observations used to update the model at each step.
The default is 64.
num_iterations : int, optional
The total number of full passes through the data to perform
(i.e. the number of epochs). The default is 500.
optimizer : str, optional
The type of optimizer to use for gradient descent.
The default is 'adam'.
optimizer_args : dict, optional
Optional arguments to send to the optimizer (learning rate, etc.).
The default is {'learning_rate': 0.001,
'beta1': 0.9,
'beta2': 0.999,
'eps': 1e-8}.
m_scale : float, optional
An optional scaling parameter to scale up or down the cost and
gradient values. For example, m_scale=2 will multiply the cost
function by 0.5. The default is 1.
bn_tol : float, optional
The tolerance used in the batch norm equations.
The default is 1e-9.
bn_momentum : float, optional
The momentum used in the exponential moving average for the mean
and variance of the batch norm process. The default is 0.
scorer : str or function, optional
The function used in the score method. If custom, it must accept
the true Y values and the predicted y values as the first two
arguments of the function.
The default is 'accuracy'.
shuffle : bool, optional
Shuffle the training set before training. The default is False.
print_cost : bool, optional
Print the cost (and possibly another metric) at each eval_step.
The default is True.
random_state : int, optional
The random state of the process (for reproducibility).
The default is 42.
Returns
-------
None.
"""
self.model = model
self.num_layers = len(model.keys())
self.eval_size = eval_size
self.batch_size = batch_size
self.num_iterations = num_iterations
self.optimizer_args = optimizer_args
self.m_scale = m_scale
self.bn_tol = bn_tol
self.bn_momentum = bn_momentum
self.print_cost = print_cost
self.random_state = random_state
self.shuffle = shuffle
self.scorer = scorer
self.activations = {'linear': linear,
'mse': mse,
'mse_linear_der': mse_linear_der,
'relu': relu,
'relu_der': relu_der,
'elu': elu,
'elu_der': elu_der,
'selu': selu,
'selu_der': selu_der,
'gelu': selu,
'gelu_approx': gelu_approx,
'gelu_speedy': gelu_speedy,
'gelu_der': gelu_der,
'gelu_speedy_der': gelu_speedy_der,
'sigmoid': sigmoid,
'sigmoid_der': sigmoid_der,
'softmax': softmax,
'softmax_der': softmax_der,
'cross_entropy': cross_entropy,
'cross_entropy_der': cross_entropy_der,
'cross_entropy_binary': cross_entropy_binary,
'cross_entropy_binary_der': cross_entropy_binary_der,
'softmax_cross_entropy_der': softmax_cross_entropy_der,
'softmax_cross_entropy_der_fullmath': softmax_cross_entropy_der_fullmath,
'sigmoid_cross_entropy_binary_der': sigmoid_cross_entropy_binary_der
}
self.initializers = {'he_uniform': he_uniform,
'he_normal': he_normal,
'xavier_uniform': xavier_uniform,
'xavier_normal': xavier_normal,
'lecun_uniform': lecun_uniform,
'lecun_normal': lecun_normal,
'random': random
}
self.optimizers = {'adam': adam,
'gradient_descent': gradient_descent}
self.scorer = {'accuracy': accuracy,
'rpmse': rpmse,
'bias': bias
}
self.optimizer = 'adam'
self.update_parameters = self.optimizers[optimizer]
def initialize_wb(self, X):
"""
Initialize the network.
Parameters
----------
X : numpy array
The input data.
Returns
-------
wb_list : dict
A dictionary of the weights/biases for each layer.
"""
wb_list = {}
xrows = X.shape[0]
X = X.reshape(xrows, -1)
prev_col = X.shape[1]
for k, key in enumerate(self.model.keys()):
cur_row = prev_col
cur_col = self.model[key]['depth']
init_func = self.initializers[self.model[key]['initializer']]
# Weights cannot be initialized to zero, or the model can't train
W = init_func(cur_row, cur_col)
# Zero is a common bias initialization. Some argue that a small
# positive value like 0.01 should be used instead. Others argue
# that makes it worse. LSTMs typically initialize bias at 1
b = np.zeros(shape=(1, cur_col))
wb_list["Weight" + str(k)] = W
wb_list["bias" + str(k)] = b
if self.model[key]['use_batch_norm']:
wb_list['gamma' + str(k)] = np.ones(shape=(1, cur_col))
wb_list['beta' + str(k)] = np.zeros(shape=(1, cur_col))
prev_col = cur_col
return wb_list
def forward_prop(self, X, wb, batch_norm, train=True, sample=False):
"""
The forward propagation step
Parameters
----------
X : numpy array
The input data.
wb : dict
A dictionary of the weights/biases for each layer.
batch_norm : dict
A dictionary containing the initial (or previous) batch norm
results.
train : bool, optional
Whether the forward propagation method is being used to train or
calculate the network in it's current state.
The default is True.
sample : bool, optional
Whether or not the forward propagation is being used to generate
random smaples of the output from the distribution created using
dropout. The default is False.
Returns
-------
hz : numpy array
The final predicted output.
zs : dict
A dictionary of the linearly (affine transform) activated values
for each layer.
batch_norm : dict
A dictionary containing the initial (or previous) batch norm
results.
hzs : dict
A dictionary of the fully activated values for each layer
dropout : dict
A dictionary of the dropout status for each layer.
regularizers : dict
A dictionary of the regularization status for each layer.
"""
hz = X
zs = {}
hzs = {}
dropout = {}
regularizers = {}
for i, key in enumerate(self.model.keys()):
indx = str(i)
z = hz.dot(wb["Weight" + indx]) + wb["bias" + indx]
# It is a highly debated topic whether or not batch norm or dropout
# first, and on top of that, whether batch norm should occur before
# or after activation. It likely depends on specific situations on
# types of networks. Other sources say they shouldn't be used
# together anyway. Since this is a demonstration, I chose to put
# batch norm first, before acivation. Note: moving them around
# will affect the derivative, so if doing by hand and not using
# autograd etc., watch out for this.
if self.model[key]['use_batch_norm']:
if train:
batch_norm['mu' + indx] = np.mean(z, axis=0)
batch_norm['var' + indx] = np.var(z, axis=0)
batch_norm['z_mu' + indx] = z - batch_norm['mu' + indx]
batch_norm['std' + indx] = np.sqrt(batch_norm['var' + indx] + self.bn_tol)
batch_norm['xhat' + indx] = batch_norm['z_mu' + indx]/batch_norm['std' + indx]
batch_norm['norm_z' + indx] = wb['gamma' + indx]*batch_norm['xhat' + indx] + wb['beta' + indx]
# Exponential running mean for mu/var, if desired. Use
# momentum = 0 for regular batch norm process
batch_norm['mu_r' + indx] = self.bn_momentum*batch_norm['mu_r' + indx] + (1 - self.bn_momentum)*batch_norm['mu' + indx]
batch_norm['var_r' + indx] = self.bn_momentum*batch_norm['var_r' + indx] + (1 - self.bn_momentum)*batch_norm['var' + indx]
else:
batch_norm['xhat' + indx] = (z - batch_norm['mu_r' + indx])/np.sqrt(batch_norm['var_r' + indx] + self.bn_tol)
batch_norm['norm_z' + indx] = wb['gamma' + indx]*batch_norm['xhat' + indx] + wb['beta' + indx]
else:
batch_norm['norm_z' + indx] = z
actf = self.activations[self.model[key]['activation']]
hz = actf(batch_norm['norm_z' + indx], **self.model[key]['activation_args'])
if i != self.num_layers - 1:
if train or sample:
dropout_keep_prob = self.model[key]['dropout_keep_prob']
else:
dropout_keep_prob = 1
drop_mask = np.random.uniform(size=hz.shape)
dropout["Weight" + indx] = drop_mask <= dropout_keep_prob
hz = (hz*dropout["Weight" + indx])/dropout_keep_prob
lamda_w = self.model[key]['lambda']['Weight']
p_w = self.model[key]['lp_norm']['Weight']
lamda_a = self.model[key]['lambda']['activity']
p_a = self.model[key]['lp_norm']['activity']
lamda_b = self.model[key]['lambda']['bias']
p_b = self.model[key]['lp_norm']['bias']
# kernel/weight regularizer
# Note: information here is only recorded, it does not affect
# the forward propagation calculations at all
if train:
regularizers["Weight" + indx] = (1/p_w)*lamda_w*norm(wb["Weight" + indx], p_w)
regularizers["activity" + indx] = (1/p_a)*lamda_a*norm(hz, p_a)
regularizers["bias" + indx] = (1/p_b)*lamda_b*norm(wb["bias" + indx], p_b)
zs['z' + str(i)] = z
hzs['hz' + str(i)] = hz
return hz, zs, batch_norm, hzs, dropout, regularizers
def back_prop(self, X, Y, wb, zs, batch_norm, hzs, dropout):
"""
The backward propagation step.
Parameters
----------
X : numpy array
The input data.
Y : numpy array
The true Y values.
wb : dict
A dictionary of the weights/biases for each layer.
zs : dict
A dictionary of the linearly (affine transform) activated values
for each layer.
batch_norm : dict
A dictionary containing the initial (or previous) batch norm
results.
hzs : dict
A dictionary of the fully activated values for each layer
dropout : dict
A dictionary of the dropout status for each layer.
Returns
-------
dwdb : dict
A dictionary of the gradients with respect to the weights and
biases.
"""
dwdb = {}
batch_m = X.shape[0]
keys = list(self.model.keys())
for i in range(self.num_layers - 1, -1, -1):
lamda_w = self.model[keys[i]]['lambda']['Weight']
p_w = self.model[keys[i]]['lp_norm']['Weight']
lamda_a = self.model[keys[i]]['lambda']['activity']
p_a = self.model[keys[i]]['lp_norm']['activity']
lamda_b = self.model[keys[i]]['lambda']['bias']
p_b = self.model[keys[i]]['lp_norm']['bias']
if i == self.num_layers - 1:
dcostoutf = self.activations[self.model[keys[i]]['derivative']]
dZ = dcostoutf(Y, hzs["hz" + str(i)], zs["z" + str(i)],
**self.model[keys[i]]['derivative_args'])/(batch_m*self.m_scale)
dZ += lamda_a*norm_der(hzs["hz" + str(i)], p_a)/(batch_m*self.m_scale)
# Batchnorm step, if applicable
if self.model[keys[i]]['use_batch_norm']:
dxhat = dZ * wb["gamma" + str(i)]
dvar = -0.5*np.sum(dxhat*batch_norm["z_mu"+str(i)], axis=0)*(1/batch_norm["std"+str(i)]**3)
dxdstd = dxhat/batch_norm["std"+str(i)]
dmu = -np.sum(dxdstd, axis=0)
dmu -= 2*dvar*np.mean(batch_norm["z_mu"+str(i)], axis=0)
dgamma = np.sum(dZ*batch_norm["xhat"+str(i)], axis=0)
dbeta = np.sum(dZ, axis=0)
dwdb["gamma" + str(i)] = dgamma
dwdb["beta" + str(i)] = dbeta
dZ = dxdstd + 2*dvar*batch_norm["z_mu"+str(i)]/zs["z"+str(i)].shape[0]
dZ += dmu/zs["z"+str(i)].shape[0]
else:
dZ = dZ.dot(wb["Weight" + str(i + 1)].T)
dropout_keep_prob = self.model[keys[i]]['dropout_keep_prob']
dZ = dZ * dropout["Weight" + str(i)]/dropout_keep_prob
dactf = self.activations[self.model[keys[i]]['derivative']]
dZ = dZ * dactf(zs["z" + str(i)],
**self.model[keys[i]]['activation_args'])
dZ = dZ + lamda_a*norm_der(hzs["hz" + str(i)], p_a)/(batch_m*self.m_scale)
# Batchnorm step, if applicable
if self.model[keys[i]]['use_batch_norm']:
dxhat = dZ * wb["gamma" + str(i)]
dvar = -0.5*np.sum(dxhat*batch_norm["z_mu"+str(i)], axis=0)*(1/batch_norm["std"+str(i)]**3)
dxdstd = dxhat/batch_norm["std"+str(i)]
dmu = -np.sum(dxdstd, axis=0)
dmu -= 2*dvar*np.mean(batch_norm["z_mu"+str(i)], axis=0)
dgamma = np.sum(dZ*batch_norm["xhat"+str(i)], axis=0)
dbeta = np.sum(dZ, axis=0)
dwdb["gamma" + str(i)] = dgamma
dwdb["beta" + str(i)] = dbeta
dZ = dxdstd + 2*dvar*batch_norm["z_mu"+str(i)]/zs["z"+str(i)].shape[0]
dZ += dmu/zs["z"+str(i)].shape[0]
if i == 0:
A = X
else:
A = hzs["hz" + str(i - 1)]
# m doesn't come from the derivative. We are dividing by m because we
# technically calculated the gradient for each observation. Thus, we
# average them to get the overall impact. We could also just sum and
# not use the mean, but having a lower value is helpful to not run
# into errors with large numbers
dW = (1/(batch_m*self.m_scale))*(np.dot(A.T, dZ) + lamda_w*norm_der(wb["Weight" + str(i)], p_w)/p_w)
dB = (1/(batch_m*self.m_scale))*(np.sum(dZ, axis=0, keepdims=True) + lamda_b*norm_der(wb["bias" + str(i)], p_b)/p_b)
dwdb["Weight" + str(i)] = dW
dwdb["bias" + str(i)] = dB
return dwdb
def fit(self, X, Y):
"""
Fits the Neural Network.
Parameters
----------
X : numpy array
The input data.
Y : numpy array
The true Y values.
Returns
-------
self
The fitted model.
"""
# This is only here for debugging or reproducibility
if self.random_state is not None:
np.random.seed(self.random_state)
# Store total training observations
self.m = X.shape[0]
# Reshape Y so that it is a matrix, even in 1D
Y = Y.reshape(self.m, -1)
self.model['output']['depth'] = Y.shape[1]
# Check for an additional metric to evaluate other than cost
output_layer_components = list(self.model['output'].keys())
has_metric = False
if 'evaluation_metric' in output_layer_components:
if isinstance(self.model['output']['evaluation_metric'], str):
met_func = self.scorer[self.model['output']['evaluation_metric']]
else:
met_func = self.model['output']['evaluation_metric']
has_metric = True
# Initialize the graph
wb = self.initialize_wb(X)
# Total batches needed for minibatch
if self.batch_size > self.m:
total_batches = self.m
else:
total_batches = int(self.m/self.batch_size) + sum([self.m % self.batch_size != 0])
# Initialize result arrays and weights
costs = np.zeros(self.num_iterations*total_batches)
metric = np.zeros(self.num_iterations*total_batches)
batch_norm = {'mu_r'+str(i): 0 for i in range(self.num_layers)}
batch_norm.update({'var_r'+str(i): 1 for i in range(self.num_layers)})
batches = [(b*self.batch_size, (b+1)*self.batch_size) for b in range(total_batches)]
# TODO: Figure out if this (and below) is the proper way to do minibatch
if self.shuffle:
rand_indx = np.random.choice(range(X.shape[0]), X.shape[0])
X = X[rand_indx, :]
Y = Y[rand_indx, :]
# marker for counting total iterations in minibatch
count = 0
for i in range(self.num_iterations):
# TODO: figure out if each batch is meant to be random in minibatch
#batch_order = np.random.choice(range(total_batches), total_batches)
batch_order = range(total_batches)
for j in batch_order:
X_batch = X[batches[j][0]:batches[j][1], :]
Y_batch = Y[batches[j][0]:batches[j][1], :]
batch_m = X_batch.shape[0]
# Forward propagation.
(hz, zs, batch_norm, hzs, dropout, regularizers) = self.forward_prop(X_batch,
wb,
batch_norm,
train=True,
sample=False)
# Calculate cost without regularization
costf = self.activations[self.model['output']['cost']]
cost = costf(Y_batch, hz,
**self.model['output']['cost_args'])
# Get regularization information for the cost function,
# the activity, weight, and bias regularizers
norms_w = np.sum([regularizers["Weight"+str(l)] for l in range(self.num_layers)])
norms_a = np.sum([regularizers["activity"+str(l)] for l in range(self.num_layers)])
norms_b = np.sum([regularizers["bias"+str(l)] for l in range(self.num_layers)])
# Update Cost with regularization
cost = (cost + norms_w + norms_a + norms_b)/(batch_m*self.m_scale)
# Backpropagation.
dwdb = self.back_prop(X_batch, Y_batch, wb, zs,
batch_norm, hzs, dropout)
# Update parameters with optimizing function
if self.optimizer == 'adam':
if i == 0:
self.optimizer_args['mt'] = {}
self.optimizer_args['vt'] = {}
for key in wb.keys():
self.optimizer_args['mt'][key] = np.zeros(wb[key].shape)
self.optimizer_args['vt'][key] = | np.zeros(wb[key].shape) | numpy.zeros |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.convert.to_list
def test_EmptyArray():
v2a = ak._v2.contents.emptyarray.EmptyArray()
with pytest.raises(IndexError):
v2a[np.array([0, 1], np.int64)]
def test_NumpyArray():
v2a = ak._v2.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]))
resultv2 = v2a[np.array([0, 1, -2], np.int64)]
assert to_list(resultv2) == [0.0, 1.1, 2.2]
assert v2a.typetracer[np.array([0, 1, -2], np.int64)].form == resultv2.form
v2b = ak._v2.contents.numpyarray.NumpyArray(
np.arange(2 * 3 * 5, dtype=np.int64).reshape(2, 3, 5)
)
resultv2 = v2b[np.array([1, 1, 1], np.int64)]
assert to_list(resultv2) == [
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
assert v2b.typetracer[np.array([1, 1, 1], np.int64)].form == resultv2.form
def test_RegularArray_NumpyArray():
v2a = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])),
3,
)
resultv2 = v2a[np.array([0, 1], np.int64)]
assert to_list(resultv2) == [[0.0, 1.1, 2.2], [3.3, 4.4, 5.5]]
assert v2a.typetracer[np.array([0, 1], np.int64)].form == resultv2.form
v2b = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.emptyarray.EmptyArray(), 0, zeros_length=10
)
resultv2 = v2b[np.array([0, 0, 0], np.int64)]
assert to_list(resultv2) == [[], [], []]
assert v2b.typetracer[np.array([0, 0, 0], np.int64)].form == resultv2.form
assert to_list(resultv2) == [[], [], []]
def test_ListArray_NumpyArray():
v2a = ak._v2.contents.listarray.ListArray(
ak._v2.index.Index(np.array([4, 100, 1], np.int64)),
ak._v2.index.Index(np.array([7, 100, 3, 200], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(
np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8])
),
)
resultv2 = v2a[np.array([1, -1], np.int64)]
assert to_list(resultv2) == [[], [4.4, 5.5]]
assert v2a.typetracer[np.array([1, -1], np.int64)].form == resultv2.form
def test_ListOffsetArray_NumpyArray():
v2a = ak._v2.contents.listoffsetarray.ListOffsetArray(
ak._v2.index.Index(np.array([1, 4, 4, 6, 7], np.int64)),
ak._v2.contents.numpyarray.NumpyArray([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]),
)
resultv2 = v2a[np.array([1, 2], np.int64)]
assert to_list(resultv2) == [[], [4.4, 5.5]]
assert v2a.typetracer[np.array([1, 2], np.int64)].form == resultv2.form
@pytest.mark.skipif(
ak._util.win,
reason="unstable dict order. -- on Windows",
)
def test_RecordArray_NumpyArray():
v2a = ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
),
],
["x", "y"],
)
resultv2 = v2a[np.array([1, 2], np.int64)]
assert to_list(resultv2) == [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}]
assert v2a.typetracer[np.array([1, 2], np.int64)].form == resultv2.form
v2b = ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
),
],
None,
)
resultv2 = v2b[np.array([0, 1, 2, 3, -1], np.int64)]
assert to_list(resultv2) == [(0, 0.0), (1, 1.1), (2, 2.2), (3, 3.3), (4, 4.4)]
assert v2b.typetracer[np.array([0, 1, 2, 3, -1], np.int64)].form == resultv2.form
v2c = ak._v2.contents.recordarray.RecordArray([], [], 10)
resultv2 = v2c[np.array([0], np.int64)]
assert to_list(resultv2) == [{}]
assert v2c.typetracer[np.array([0], np.int64)].form == resultv2.form
v2d = ak._v2.contents.recordarray.RecordArray([], None, 10)
resultv2 = v2d[np.array([0], np.int64)]
assert to_list(resultv2) == [()]
assert v2d.typetracer[np.array([0], np.int64)].form == resultv2.form
def test_IndexedArray_NumpyArray():
v2a = ak._v2.contents.indexedarray.IndexedArray(
ak._v2.index.Index(np.array([2, 2, 0, 1, 4, 5, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
)
resultv2 = v2a[np.array([0, 1, 4], np.int64)]
assert to_list(resultv2) == [3.3, 3.3, 5.5]
assert v2a.typetracer[np.array([0, 1, 4], np.int64)].form == resultv2.form
def test_IndexedOptionArray_NumpyArray():
v2a = ak._v2.contents.indexedoptionarray.IndexedOptionArray(
ak._v2.index.Index(np.array([2, 2, -1, 1, -1, 5, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
)
resultv2 = v2a[np.array([0, 1, -1], np.int64)]
assert to_list(resultv2) == [3.3, 3.3, 5.5]
assert v2a.typetracer[np.array([0, 1, -1], np.int64)].form == resultv2.form
def test_ByteMaskedArray_NumpyArray():
v2a = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([1, 0, 1, 0, 1], np.int8)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
valid_when=True,
)
resultv2 = v2a[np.array([0, 1, 2], np.int64)]
assert to_list(resultv2) == [1.1, None, 3.3]
assert v2a.typetracer[np.array([0, 1, 2], np.int64)].form == resultv2.form
v2b = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([0, 1, 0, 1, 0], np.int8)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
valid_when=False,
)
resultv2 = v2b[np.array([0, 1, 2], np.int64)]
assert to_list(resultv2) == [1.1, None, 3.3]
assert v2b.typetracer[np.array([0, 1, 2], np.int64)].form == resultv2.form
def test_BitMaskedArray_NumpyArray():
v2a = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
],
np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=True,
length=13,
lsb_order=False,
)
resultv2 = v2a[np.array([0, 1, 4], np.int64)]
assert to_list(resultv2) == [0.0, 1.0, None]
assert v2a.typetracer[np.array([0, 1, 4], np.int64)].form == resultv2.form
v2b = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
],
np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=False,
length=13,
lsb_order=False,
)
resultv2 = v2b[ | np.array([0, 1, 4], np.int64) | numpy.array |
# SPDX-FileCopyrightText: (c) 2021 <NAME> <github.com/rtmigo>
# SPDX-License-Identifier: MIT
from math import floor
from pathlib import Path
from typing import Tuple
import cv2
import numpy as np
import random
from PIL import Image, ImageDraw
import extcolors
from PIL import Image # importing with tweaked options
# todo Find a way to add dithering noise to 8-bit grading
#
# It looks like in 2021 Pillow cannot alpha-blend 16-bit or 32-bit images.
# So we need to keep our gradient mask in 8 bit.
#
# To avoid banding, we may want to create 16 or 32 bit gradient, and then
# convert it to dithered 8-bit version. But it seems, Pillow cannot do such
# conversion either (https://github.com/python-pillow/Pillow/issues/3011)
#
# So all colors are 8 bit now. Maybe we should find a way to add some random
# noise to out gradient. But Pillow will not create noise, we need to generate
# it pixel-by-pixel, and probably not in native Python
def horizontal_gradient_256_scaled(size: Tuple[int, int],
reverse=True) -> Image:
gradient = Image.new('L', (256, 1), color=None)
for x in range(256):
if reverse:
gradient.putpixel((x, 0), x)
else:
gradient.putpixel((x, 0), 255 - x)
return gradient.resize(size)
def vertical_gradient_256_scaled(size: Tuple[int, int], reverse=True) -> Image:
gradient = Image.new('L', (1, 256), color=None)
for x in range(256):
if reverse:
gradient.putpixel((0, x), x)
else:
gradient.putpixel((0, x), 255 - x)
return gradient.resize(size)
def stripe_size(full_size: int, pct: float) -> int:
if not 0 <= pct <= 0.5:
raise ValueError(pct)
result = floor(full_size * pct)
assert result * 2 <= full_size
return result
class Mixer:
def __init__(self, source: Image, pct=1.0 / 3):
self.source = source
self.pct = pct
@property
def src_width(self) -> int:
return self.source.size[0]
@property
def src_height(self) -> int:
return self.source.size[1]
@property
def horizontal_stripe_width(self) -> int:
return stripe_size(self.src_width, self.pct)
@property
def vertical_stripe_height(self) -> int:
return stripe_size(self.src_height, self.pct)
def _left_stripe_image(self):
return self.source.crop(
(0, 0, self.horizontal_stripe_width, self.src_height))
def _right_stripe_image(self):
return self.source.crop(
(self.src_width - self.horizontal_stripe_width, 0,
self.src_width, self.src_height))
def _bottom_stripe_image(self):
return self.source.crop(
(0, self.src_height - self.vertical_stripe_height,
self.src_width, self.src_height))
def _to_rgba(self, image: Image) -> Image:
if image.mode != 'RGBA':
converted = image.convert('RGBA')
assert converted is not None
return converted
return image
def make_seamless_h(self) -> Image:
stripe = self._to_rgba(self._right_stripe_image())
stripe.putalpha(
horizontal_gradient_256_scaled(stripe.size, reverse=False))
overlay = Image.new('RGBA', size=self.source.size, color=0x00)
overlay.paste(stripe, box=(0, 0))
comp = Image.alpha_composite(self._to_rgba(self.source),
overlay)
comp = comp.crop((0,
0,
comp.size[0] - self.horizontal_stripe_width,
comp.size[1]))
return comp
def make_seamless_v(self) -> Image:
stripe = self._to_rgba(self._bottom_stripe_image())
stripe.putalpha(
vertical_gradient_256_scaled(stripe.size, reverse=False))
overlay = Image.new('RGBA', size=self.source.size, color=0x00)
overlay.paste(stripe, box=(0, 0))
comp = Image.alpha_composite(self._to_rgba(self.source),
overlay)
comp = comp.crop((0,
0,
comp.size[0],
comp.size[1] - self.vertical_stripe_height))
return comp
def img2tex(src: Image, pct=0.25):
mixer1 = Mixer(src, pct=pct)
result = mixer1.make_seamless_h()
mixer2 = Mixer(result, pct=pct)
result = mixer2.make_seamless_v()
if result.mode != "RGB":
result = result.convert("RGB")
return result
def tile(source: Image,
horizontal: int = 3, vertical: int = 3) -> None:
"""Merges multiple copies of `source` image into the `target` image
side-by-side."""
image = source
w, h = image.size
total_width = w * horizontal
total_height = h * vertical
new_im = Image.new('RGB', (total_width, total_height))
for x in range(horizontal):
for y in range(vertical):
new_im.paste(image, (w * x, h * y))
return new_im
def apparel_generation(pattern: Image, templatePath: str, black = True):
pattern = cv2.cvtColor(np.array(pattern), cv2.COLOR_RGB2BGR)
masked = cv2.imread(templatePath)
pattern=cv2.resize(pattern, (256,256),interpolation = cv2.INTER_AREA)
masked=cv2.resize(masked, (256,256),interpolation = cv2.INTER_AREA)
if black:
thresh = cv2.threshold(masked,255, 0, cv2.THRESH_TRUNC)[1]
result = pattern.copy()
result= np.where(thresh==255, pattern, thresh)
result[result==[36,28,237]]=255
else:
gray = cv2.cvtColor(masked, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)[1]
result = pattern.copy()
result[thresh==0] = (255,255,255)
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
result = Image.fromarray(result)
return result
def complementary_designs(img: Image, direction: str):
img = img.resize((256,256))
colors,_ = extcolors.extract_from_image(img)
final_img =Image.new('RGB',(256,256),colors[0][0])
draw_final_img = ImageDraw.Draw(final_img)
dimensions=256
spacing=[2,4]
if direction=='Checked' or direction=='Diagonal-Left' or direction=='Diagonal-Right':
dimensions=256*2
spacing=random.choice(spacing)
for i in range(0, dimensions, 15):
if i%spacing==0:
color=colors[1]
else:
color=colors[2]
if direction=='Vertical':
draw_final_img.line([(i, 0),(i,256)], width=3,
fill=color[0])
elif direction=='Horizontal':
draw_final_img.line([(0, i),(256,i)], width=3,
fill=color[0])
elif direction=='Diagonal-Left':
draw_final_img.line([(i, 0),(i-final_img.size[0],256)], width=2,
fill=color[0])
elif direction=='Diagonal-Right':
draw_final_img.line([(0, i-final_img.size[0]),(256,i)], width=2,
fill=color[0])
elif direction=='Checked':
draw_final_img.line([(0, i-final_img.size[0]),(256,i)], width=2,
fill=color[0])
draw_final_img.line([(i, 0),(i-final_img.size[0],256)], width=2,
fill=color[0])
elif direction=="Zig-Zag":
for i in range(0,256,10):
for j in range(0,256,10):
# draw_final_img.line([(0, 0), (10,10),(20,0),(30,10),(40,0)], width=2, fill="green",joint="curve")
draw_final_img.line([(j+10,i),(j+30,i)], width=2, fill="green",joint="curve")
final_img.show()
break
return final_img
def post_processing(img: Image, choose):
if choose == 1:
pattern = cv2.cvtColor( | np.array(img) | numpy.array |
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np, h5py
import scipy.io as sio
import sys
import random
import kNN_cosine
import re
from numpy import *
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def compute_accuracy(test_word, test_visual, test_id, test_label):
global left_w1
word_pre = sess.run(left_w1, feed_dict={word_features: test_word})
test_id = np.squeeze(np.asarray(test_id))
outpre = [0]*6180
test_label = np.squeeze(np.asarray(test_label))
test_label = test_label.astype("float32")
for i in range(6180):
outputLabel = kNN_cosine.kNNClassify(test_visual[i,:], word_pre, test_id, 1)
outpre[i] = outputLabel
correct_prediction = tf.equal(outpre, test_label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={
word_features: test_word, visual_features: test_visual})
return result
# # data
f=sio.loadmat('./data/AwA_data/wordvector/train_word.mat')
word=np.array(f['train_word'])
word.shape
f=sio.loadmat('./data/AwA_data/train_googlenet_bn.mat')
x=np.array(f['train_googlenet_bn'])
x.shape
f=sio.loadmat('./data/AwA_data/test_googlenet_bn.mat')
x_test=np.array(f['test_googlenet_bn'])
x_test.shape
f=sio.loadmat('./data/AwA_data/test_labels.mat')
test_label=np.array(f['test_labels'])
test_label.shape
f=sio.loadmat('./data/AwA_data/testclasses_id.mat')
test_id=np.array(f['testclasses_id'])
test_id.shape
f=sio.loadmat('./data/AwA_data/wordvector/test_vectors.mat')
word_pro= | np.array(f['test_vectors']) | numpy.array |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
# For reproducibility
| np.random.seed(1000) | numpy.random.seed |
#!/usr/bin/env python
"""
This file contains the functions for calculating the reionization data used to
make plots with ``ReionPlots.py``. It is called from ``paper_plots.py`` with a
dictionary containing which exact plots we require.
You should not need to touch this file. Please refer to the ``paper_plots.py``
documentation for full information on how to use this plotting pipeline.
Author: <NAME>
Version: 0.2
"""
from __future__ import print_function
import numpy as np
from numpy.fft import fftn, ifftn
import scipy.integrate as integrate
import os
import time
import random
import AllVars as av
import ReadScripts as rs
import PlotScripts as ps
import CollectiveStats as collective
import GalaxyData as gd
import ReionPlots as reionplot
def calc_duration(z_array_reion_allmodels, lookback_array_reion_allmodels,
mass_frac_allmodels, duration_definition):
"""
Determines the duration of reionization.
Parameters
----------
z_array_reion_allmodels : 2D nested list of floats. Outer length is number
of models, inner is number of snapshots.
The redshift at each snapshot for each model.
lookback_array_reion_allmodels : 2D nested list of floats. Dimensions
identical to ``z_array_reion_allmodels``.
The lookback time at each snapshot for each model. Units are Myr.
mass_frac_allmodels : 2D nested list of floats. Dimensions equal to
``z_array_reion_allmodels``.
The mass weighted neutral fraction at each snapshot for each model.
duration_definition : List of floats with length 3.
The neutral fractions that define reionization. The first element is
the start, second is the mid-point and third is the end of
reionization.
Returns
---------
duration_z, duration_t : 2D nested list of floats. Outer length is number
of models, inner is 3.
The redshift and lookback time corresponding to each element of the
``duration_definition`` list.
reion_completed : List of integers.
Flag to denote whether reionization has been completedd by the final
snapshot.
"""
duration_z = []
duration_t = []
reion_completed = []
# We need to be careful here. For low values of fesc, reionization
# won't actually complete. Hence we need to check `duration_z` and see
# those models in which reionization is 'completed' at the last snapshot.
for model_number in range(len(mass_frac_allmodels)):
mass_frac_thismodel = mass_frac_allmodels[model_number]
duration_z.append([])
duration_t.append([])
for val in duration_definition:
idx = (np.abs(mass_frac_thismodel - val)).argmin()
duration_z[model_number].append(z_array_reion_allmodels[model_number][idx])
duration_t[model_number].append(lookback_array_reion_allmodels[model_number][idx])
if (val == duration_definition[-1]) and \
(idx == len(mass_frac_thismodel)-1):
reion_completed.append(0)
elif(val == duration_definition[-1]):
reion_completed.append(1)
return duration_z, duration_t, reion_completed
def T_naught(z, h, OM, OB):
"""
Calculates the 21cm brightness temperature for specified cosmology +
redshift.
Parameters
---------
z : Float
Redshift we're calculating at.
h : Float
The Hubble h parameter defined as H = 100*h.
OM : Float
Critical matter density.
OB : Float
Critical Baryon density.
Returns
-------
T0: Float
21cm brightness temperature in units of mK.
Units
-----
All input parameters are unitless.
Output brightness temperature has units of mK.
"""
T0 = 28.5 * ((1.0+z)/10.0)**(0.5) * OB/0.042 * h/0.73 * (0.24/OM)**(0.5)
return T0
def calc_ps(XHII, density, boxsize):
"""
Calculates the 21cm and XHI power spectrum.
Parameters
---------
XHII: 3-Dimensional Array of floats. Required.
Grid that contains the fraction of ionized hydrogen (XHII) in each
cell.
density: 3-Dimensional Array of floats. Required.
Grid that contains the overdensity (rho/<rho>) of dark matter in each
cell.
Returns
-------
kmid_bins: 1-Dimensional array of floats.
The middle of each wavenumber bin.
powerspec: 1-Dimensional array of floats.
The 21cm power spectrum in each k bin.
p_err: 1-Dimensiona array of floats.
The error on the 21cm power spectrum in each k bin.
The same again except for just the HII power spectrum.
Units
-----
XHII is unitless.
density is the overdensity, rho/<rho>.
The wavenumber bins (kmid_bins) is in units of h/Mpc.
The 21cm power spectrum (and associated error) is in units of Mpc^3/h^3/(2*pi).
"""
GridSize = np.shape(XHII)[0]
meanion = np.mean(XHII)
Tb = (1.0 - XHII)*density
modes = ifftn(Tb)
kmid_bins, powerspec, p_err = av.modes_to_pspec(modes,
boxsize=boxsize)
kmid_bins_XHII, pspec_XHII, p_err_XHII = av.modes_to_pspec(ifftn(XHII),
boxsize=boxsize)
return (kmid_bins, powerspec, p_err,
kmid_bins_XHII, pspec_XHII, p_err_XHII)
def determine_ps_fixed_XHI(rank, size, comm,
z_array_reion_allmodels, cosmology_allmodels,
mass_frac_allmodels, XHII_fbase_allmodels,
XHII_precision_allmodels, density_fbase_allmodels,
density_precision_allmodels, GridSize_allmodels,
boxsize_allmodels, first_snap_allmodels,
fixed_XHI_values):
"""
Calculates the 21cm and HII power spectra at fixed HI fractions.
Parameters
----------
rank : Integer
This processor rank.
size : Integer
The total number of processors executing the pipeline.
comm : Class ``mpi4py.MPI.Intracomm``
The ``mpi4py`` communicator.
z_array_reion_allmodels : 2D nested list of floats. Outer length is number
of models, inner is number of snapshots.
The redshift at each snapshot for each model.
cosmology_allmodels : List of class ``astropy.cosmology``. Length is number
of models.
``Astropy`` class containing the cosmology for each model.
mass_frac_allmodels : 2D nested list of floats. Dimensions equal to
``z_array_reion_allmodels``.
The mass weighted neutral fraction at each snapshot for each model.
XHII_fbase_allmodels : List of strings. Length is number of models.
The base filename for the ionization fields for each model.
XHII_precision_allmodels : List of integers. Length is number of models.
The precision of the ionization fields for each model.
1 : Float, 2 : Double.
density_fbase_allmodels : List of strings. Length is number of models.
The base filename for the density fields for each model.
density_precision_allmodels : List of integers. Length is number of models.
The precision of the density fields for each model.
1 : Float, 2 : Double.
GridSize_allmodels : List of integers. Length is number of models.
The number of grid cells (along a box size) for each model.
boxsize_allmodels : List of integers. Length is number of models.
The simulation box size for each model (units are Mpc/h).
first_snap_allmodels : List of integers. Length is number of models.
The snapshot where ``cifog`` starts calculations for each model.
fixed_XHI_values : List of floats.
The neutral hydrogen fractions we're calculating the power spectra at.
Defined by the user in ``paper_plots.py``.
Returns
---------
For all non-zero ranks, the returns are:
None, None, None.
k_master, P21_master, PHII_master : 3D nested lists of floats. Outer length
is number of models, next is number of
snapshots in the model and final is the
number of wavenumber bins.
The wavenumber bins, 21cm and HII power spectra for each model for the
neutral hydrogen fractions `fixed_XHI_values`.
"""
num_models = len(mass_frac_allmodels)
num_fractions = len(fixed_XHI_values)
# Determine which mass_frac indices correspond to the requested XHI values.
snap_idx_target = []
for model_number in range(num_models):
snap_idx_target.append([])
for val in fixed_XHI_values:
idx = (np.abs(mass_frac_allmodels[model_number] - val)).argmin()
snap_idx_target[model_number].append(idx)
if rank == 0:
print("Model {0}\tFrac {1}\tSnap {2}".format(model_number,
val, idx+28))
flat_snap_idx_target = [item for sublist in snap_idx_target for item in sublist]
# Now every rank knows what snapshot indices correspond to the fixed HI
# values. We run 'classic' MPI loop.
k = []
P21 = []
PHII = []
for idx in range(rank, num_models*num_fractions, size):
# Determine what model this corresponds to.
model_number = int(idx / num_fractions)
model_cosmo = cosmology_allmodels[model_number]
model_boxsize = boxsize_allmodels[model_number] # Mpc/h.
# The `snap_idx_target` value will be the relative snapshot number.
# Hence need to add the `first_snap` for this model to get absolute.
snapnum = flat_snap_idx_target[idx]
redshift = z_array_reion_allmodels[model_number][snapnum]
snapnum += first_snap_allmodels[model_number]
cifog_snapnum = snapnum + 1
# Load the XHII and density fields and calculate!
XHII_path = "{0}_{1:03d}".format(XHII_fbase_allmodels[model_number],
cifog_snapnum)
XHII = rs.read_binary_grid(XHII_path, GridSize_allmodels[model_number],
XHII_precision_allmodels[model_number])
density_path = "{0}{1:03d}.dens.dat".format(density_fbase_allmodels[model_number],
snapnum)
density = rs.read_binary_grid(density_path,
GridSize_allmodels[model_number],
density_precision_allmodels[model_number])
T0 = T_naught(redshift, model_cosmo.H(0).value/100.0,
model_cosmo.Om0, model_cosmo.Ob0)
# Be careful, passing the boxsize at Mpc/h.
tmp_k, tmp_PowSpec, tmp_Error, \
tmp_k_XHII, tmp_Pspec_HII, tmp_Error_XHII = calc_ps(XHII, density,
model_boxsize)
k.append(tmp_k)
P21.append(tmp_PowSpec * T0*T0 * tmp_k**3 * 4.0*np.pi)
PHII.append(tmp_Pspec_HII * tmp_k**3 * 4.0*np.pi)
comm.Barrier()
# Now at this point each rank has a subset of the power spectra.
# What we want to do is go through each index again and pass all of these
# back onto the master rank.
if rank == 0:
k_master = []
P21_master = []
PHII_master = []
rank_count = 0
model_count = -1
for idx in range(0, num_models*num_fractions):
# This is the idx within each rank.
ps_array_idx = int(idx / size)
# If we've reached the end of the number of fractions, go to next
# model.
if idx % num_fractions == 0:
model_count += 1
k_master.append([])
P21_master.append([])
PHII_master.append([])
# For every non-zero rank, we need to wait to receive the data from
# the other processers.
if rank_count == 0:
k_master[model_count].append(k[ps_array_idx])
P21_master[model_count].append(P21[ps_array_idx])
PHII_master[model_count].append(PHII[ps_array_idx])
else:
tag = int(rank_count*100 + ps_array_idx)
k_this_idx = comm.recv(source = rank_count,
tag = tag)
P21_this_idx = comm.recv(source = rank_count,
tag = tag+1)
PHII_this_idx = comm.recv(source = rank_count,
tag = tag+2)
k_master[model_count].append(k_this_idx)
P21_master[model_count].append(P21_this_idx)
PHII_master[model_count].append(PHII_this_idx)
rank_count += 1
if rank_count == size:
rank_count = 0
return k_master, P21_master, PHII_master
else:
# We generate a unique tag for each rank + idx combination and then
# send this processor's data to the master rank.
for idx in range(len(P21)):
tag = int(rank*100 + idx)
k_this_idx = k[idx]
P21_this_idx = P21[idx]
PHII_this_idx = PHII[idx]
comm.send(k_this_idx, dest = 0, tag = tag)
comm.send(P21_this_idx, dest = 0, tag = tag+1)
comm.send(PHII_this_idx, dest = 0, tag = tag+2)
return None, None, None
def calc_tau(z_array_reion_allmodels, cosmology_allmodels, helium_allmodels,
mass_frac_allmodels):
"""
Calculates the Thomson integrated optical depth.
Parameters
----------
z_array_reion_allmodels : 2D nested list of floats. Outer length is number
of models, inner is number of snapshots.
The redshift at each snapshot for each model.
cosmology_allmodels : List of class ``astropy.cosmology``. Length is number
of models.
``Astropy`` class containing the cosmology for each model.
helium_allmodels : Nested list of floats. Length is number of models,
The helium fraction for each model.
mass_frac_allmodels : 2D nested list of floats. Dimensions equal to
``z_array_reion_allmodels``.
The mass weighted neutral fraction at each snapshot for each model.
Returns
---------
tau : 2D nested list of floats. Dimensions equal to
``z_array_reion_allmodels``.
The Thomson optical depth at each snapshot for each model.
"""
def integrand(z, h, OM):
H = av.Hubble_Param(z, h, OM) / (av.pc_to_m * 1.0e6 / 1.0e3)
return (((1 + z)**2) / H)
tau = []
for model_number in range(len(mass_frac_allmodels)):
# Set up some things for the model cosmology etc.
model_mass_frac = mass_frac_allmodels[model_number]
model_helium = helium_allmodels[model_number]
model_h = cosmology_allmodels[model_number].H(0).value/100.0
model_OM = cosmology_allmodels[model_number].Om0
model_OB = cosmology_allmodels[model_number].Ob0
model_z = z_array_reion_allmodels[model_number]
model_tau = np.zeros(len(model_mass_frac))
# First determine optical depth for redshift 0 to 4.
tau_04 = integrate.quad(integrand, 0, 4, args=(model_h, model_OM,))[0]
tau_04 *= (1 + 2*model_helium/(4 * (1-model_helium)))
# Then determine optical depth from z = 4 to lowest z of model.
tau_46 = integrate.quad(integrand, 4, model_z[-1], args=(model_h, model_OM,))[0]
tau_46 *= (1 + model_helium/(4* (1-model_helium)))
tau_06 = tau_04 + tau_46
model_tau[-1] = tau_06
# Then loop down through snapshots (low z to high z) and calculate tau.
for snapnum in np.arange(len(model_mass_frac) - 2, -1, -1):
this_z = model_z[snapnum]
prev_z = model_z[snapnum + 1]
# Hubble Parameter in Mpc/s/Mpc.
H = av.Hubble_Param(this_z, model_h, model_OM) / (av.pc_to_m * 1.0e6 / 1.0e3)
numerator = ((1 + this_z) **2) * (1.0 - model_mass_frac[snapnum])
model_tau[snapnum] = model_tau[snapnum+1] + (( numerator / H) * (this_z - prev_z) * (1 + model_helium/(4 * (1-model_helium))))
model_tau *= av.n_HI(0, model_h, model_OB, model_helium) * av.c_in_ms * av.Sigmat
tau.append(model_tau)
return tau
def gather_ps(rank, size, comm, k_allmodels, P21_allmodels, PHII_allmodels,
first_snap_allmodels, last_snap_allmodels):
"""
Gathers the power spectra calculated on each processor onto the root rank.
Each rank calculates the spectra of only a subset of snapshots so here we
gather the spectra of all models and snapshots onto root rank.
Parameters
----------
rank : Integer
This processor rank.
size : Integer
The total number of processors executing the pipeline.
comm : Class ``mpi4py.MPI.Intracomm``
The ``mpi4py`` communicator.
k_allmodels : 3D nested list of floats. Outer length is number of models,
next is number of snapshots processed by this processor and
final is the number of wavenumber bins.
Wavenumber the spectra are binned on (units of Mpc/h). Processor
unique.
P21_allmodels, PHII_allmodels : 3D nested lists of floats. Dimensions are
identical to ``k_allmodels``.
The 21cm and HII power spectra for each model at each snapshot.
Processor unique.
first_snap_allmodels, last_snap_allmodels : List of integers. Length is
number of models.
The first and last snapshot that defines the snapshot range that
``cifog`` was run on.
Returns
---------
For all non-zero ranks, the returns are:
None, None, None.
k_master, P21_master, PHII_master : 3D nested lists of floats. Dimensions
are identical to `k_allmodels` except
the 2nd axis length is the snapshot
range for that particular model (not a
subset).
The wavenumber bins, 21cm and HII power spectra for each model for all
snapshots.
"""
def generate_tag(rank):
tag = int(rank*100)
return tag
# Rank 0 will gather the wavenumber bins/power spectra from all other
# ranks.
if rank == 0:
k_master = []
P21_master = []
PHII_master = []
# Go through each model.
for model_number in range(len(k_allmodels)):
k_master.append([])
P21_master.append([])
PHII_master.append([])
model_k = k_allmodels[model_number]
model_P21 = P21_allmodels[model_number]
model_PHII = PHII_allmodels[model_number]
num_snaps = last_snap_allmodels[model_number] - \
first_snap_allmodels[model_number]
rank_count = 0
my_count = 0
# Then go through each snapshot.
# In the main data loop (``generate_data()``) the snapshots are
# scatter sequentially. Hence when we gather, we get snap0 from
# rank 0, snap1 from rank 1 etc. So we increase rank_count for each
# snapshot and then reset it when we reach `size`.
for snap_idx in range(num_snaps):
if rank_count == 0:
this_k = model_k[my_count]
this_P21 = model_P21[my_count]
this_PHII = model_PHII[my_count]
my_count += 1
else:
# Each rank will use a unique tag.
tag = generate_tag(rank_count)
# Then the tag is offset for each data array.
this_k = comm.recv(source = rank_count,
tag = tag)
this_P21 = comm.recv(source = rank_count,
tag = tag+1)
this_PHII = comm.recv(source = rank_count,
tag = tag+2)
# Now we have the data, append it to the master.
k_master[model_number].append(this_k)
P21_master[model_number].append(this_P21)
PHII_master[model_number].append(this_PHII)
rank_count += 1
if rank_count == size:
rank_count = 0
# Snapshot Loop.
# Model Loop.
return k_master, P21_master, PHII_master
else:
# For all other ranks, go through the power spectra it calculated and
# send it back to the root rank.
for model_number in range(len(k_allmodels)):
for idx in range(len(P21_allmodels[model_number])):
tag = generate_tag(rank)
k_this_idx = k_allmodels[model_number][idx]
P21_this_idx = P21_allmodels[model_number][idx]
PHII_this_idx = PHII_allmodels[model_number][idx]
comm.send(k_this_idx, dest = 0, tag = tag)
comm.send(P21_this_idx, dest = 0, tag = tag+1)
comm.send(PHII_this_idx, dest = 0, tag = tag+2)
# Non-zero ranks return junk.
return None, None, None
def calculate_bubble_MC(XHII, output_file, N=1e5):
"""
Determines the size of ionized regions using MC walks.
Parameters
----------
XHII : 3D nested list of floats. Lengths are equal and given by the grid
size of the model.
Grid containing the ionized neutral hydrogen fraction in each cell.
output_file : String.
Path to where the results of the MC walks will be saved.
N : Integer, optional.
The number of walks performed.
Returns
---------
None.
"""
def MC_walk(cells, indices, phase, N, Ncell, output_file):
"""
Performs the MC walk.
Parameters
----------
cells : 3D nested list of floats. Lengths are equal and given
``Ncell``.
Grid containing the ionized neutral hydrogen fraction in each cell.
indices : List of integers.
Indices corresponding to the ionized/neutral grid cells.
phase : Integer.
Flag denoting whether ``indices`` correspond to neutral (=0) or
ionized (=1) cells.
N : Integer.
The number of walks performed.
Ncell : Integer.
The number of grid cells on a side of ``cells``.
output_file : String.
Path to where the results will be saved.
Returns
---------
None. The results of the walk are saved as a ``.txt`` file.
"""
radii = []
for j in range(N):
# Select a random direction to walk through.
direction = random.randint(1,6)
if direction == 1:
x = 1
y = 0
z = 0
elif direction == 2:
x = -1
y = 0
z = 0
elif direction == 3:
x = 0
y = 1
z = 0
elif direction == 4:
x = 0
y = -1
z = 0
elif direction == 5:
x = 0
y = 0
z = 1
else:
x = 0
y = 0
z = -1
# Pick the x,y,z coordinates of a random ionized/neutral cell
random_index = random.randint(0,len(indices[0])-1)
walk_x = indices[0][random_index]
walk_y = indices[1][random_index]
walk_z = indices[2][random_index]
R = 0
phase_transition = phase
# Then keep walking in that direction until we reach a
# neutral/ionized cell.
while (phase_transition == phase):
R += 1
phase_transition = cells[(walk_x + R*x) % Ncell, (walk_y + R*y) % Ncell, (walk_z + R*z) % Ncell]
if (phase_transition > 0.8):
phase_transition = 1
else:
phase_transition = 0
if (R >= Ncell): # If the radius has gone beyond the number of
phase_transition = (phase + 1) % 2 # available cells,
# force the change.
radii.append(R)
np.savetxt(output_file, radii, delimiter = ',')
Ncell = XHII.shape[0]
# First determine where the ionized cells are.
XHII_indices= | np.where(XHII > 0.8) | numpy.where |
import warnings
warnings.filterwarnings("ignore")
import torch
import cv2
import numpy as np
import os
import json
from model import Net
# 训练配置
# **********************************************************
network = Net()
# netHg = nn.DataParallel(network, devices = [0, 1, 2]) # 并行训练
network.load_state_dict(torch.load('model/cifar10_20.pt'))
# torch.backends.cudnn.benchmark = True
device = torch.device('cuda:0')
network.to(device)
network.eval() # 测试的时候用
# ---------------------------------------------------------
if __name__ == '__main__':
imageSize = [32, 32]
imgsNp = | np.zeros((1, 3, imageSize[0], imageSize[1])) | numpy.zeros |
import os
import unittest
import numpy as np
import config
from mos.dataset_reader import RawDatasetReader, MissingDataRawDatasetReader, \
SyntheticRawDatasetReader, CorruptSubjectRawDatasetReader
from mos.subjective_model import MosModel, DmosModel, \
MaximumLikelihoodEstimationModelReduced, MaximumLikelihoodEstimationModel, \
LiveDmosModel, MaximumLikelihoodEstimationDmosModel, LeastSquaresModel, \
SubjrejMosModel, ZscoringSubjrejMosModel, SubjrejDmosModel, \
ZscoringSubjrejDmosModel, PerSubjectModel
from tools.misc import import_python_file
__copyright__ = "Copyright 2016, Netflix, Inc."
__license__ = "Apache, Version 2.0"
class SubjectiveModelTest(unittest.TestCase):
def setUp(self):
self.dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
self.output_dataset_filepath = config.ROOT + '/workspace/workdir/NFLX_dataset_public_test.py'
self.output_dataset_pyc_filepath = config.ROOT + '/workspace/workdir/NFLX_dataset_public_test.pyc'
def tearDown(self):
if os.path.exists(self.output_dataset_filepath):
os.remove(self.output_dataset_filepath)
if os.path.exists(self.output_dataset_pyc_filepath):
os.remove(self.output_dataset_pyc_filepath)
def test_mos_subjective_model(self):
dataset = import_python_file(self.dataset_filepath)
dataset_reader = RawDatasetReader(dataset)
subjective_model = MosModel(dataset_reader)
result = subjective_model.run_modeling()
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
def test_mos_subjective_model_output(self):
dataset = import_python_file(self.dataset_filepath)
dataset_reader = RawDatasetReader(dataset)
subjective_model = MosModel(dataset_reader)
subjective_model.run_modeling()
subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
self.assertTrue(os.path.exists(self.output_dataset_filepath))
dataset2 = import_python_file(self.output_dataset_filepath)
dis_video = dataset2.dis_videos[0]
self.assertTrue('groundtruth' in dis_video)
self.assertTrue('os' not in dis_video)
self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
def test_mos_subjective_model_output_custom_resampling(self):
dataset = import_python_file(self.dataset_filepath)
dataset_reader = RawDatasetReader(dataset)
subjective_model = MosModel(dataset_reader)
subjective_model.run_modeling()
subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos')
self.assertTrue(os.path.exists(self.output_dataset_filepath))
dataset2 = import_python_file(self.output_dataset_filepath)
self.assertFalse(hasattr(dataset2, 'quality_height'))
self.assertFalse(hasattr(dataset2, 'quality_width'))
self.assertEquals(dataset2.resampling_type, 'lanczos')
dis_video = dataset2.dis_videos[0]
self.assertTrue('groundtruth' in dis_video)
self.assertTrue('os' not in dis_video)
self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
def test_mos_subjective_model_output2(self):
dataset = import_python_file(self.dataset_filepath)
dataset_reader = RawDatasetReader(dataset)
subjective_model = MosModel(dataset_reader)
subjective_model.run_modeling()
dataset2 = subjective_model.to_aggregated_dataset()
dis_video = dataset2.dis_videos[0]
self.assertTrue('groundtruth' in dis_video)
self.assertTrue('os' not in dis_video)
self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
def test_mos_subjective_model_normalize_final(self):
dataset = import_python_file(self.dataset_filepath)
dataset_reader = RawDatasetReader(dataset)
subjective_model = MosModel(dataset_reader)
result = subjective_model.run_modeling(normalize_final=True)
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4)
self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4)
self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
def test_mos_subjective_model_transform_final(self):
dataset = import_python_file(self.dataset_filepath)
dataset_reader = RawDatasetReader(dataset)
subjective_model = MosModel(dataset_reader)
result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1})
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 49.84615384615385, places=4)
self.assertAlmostEquals(scores[10], 21.769230769230771, places=4)
self.assertAlmostEquals(np.mean(scores), 36.44790652385589, places=4)
def test_from_dataset_file(self):
subjective_model = MosModel.from_dataset_file(self.dataset_filepath)
result = subjective_model.run_modeling()
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
def test_dmos_subjective_model(self):
subjective_model = DmosModel.from_dataset_file(self.dataset_filepath)
result = subjective_model.run_modeling()
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 5.0, places=4)
self.assertAlmostEquals(scores[10], 2.1923076923076921, places=4)
self.assertAlmostEquals(np.mean(scores), 3.7731256085686473, places=4)
def test_dmos_subjective_model_normalize_final(self):
subjective_model = DmosModel.from_dataset_file(self.dataset_filepath)
result = subjective_model.run_modeling(normalize_final=True)
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 1.0440613892053001, places=4)
self.assertAlmostEquals(scores[10], -1.3452648137895296, places=4)
self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
def test_dmos_subjective_model_dscore_mode_same(self):
subjective_model = DmosModel.from_dataset_file(self.dataset_filepath)
result = subjective_model.run_modeling(normalize_final=True)
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 1.0440613892053001, places=4)
self.assertAlmostEquals(scores[10], -1.3452648137895296, places=4)
self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
def test_observer_aware_subjective_model_with_dscoring(self):
subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling(dscore_mode=True)
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.090840910829083799, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 298.35293969059796, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4163670233392607, places=4)
def test_observer_aware_subjective_model_with_zscoring(self):
subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling(zscore_mode=True)
self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.568205661696393, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0079989301785523791, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 0.80942484781493518, places=4)
def test_observer_aware_subjective_model_with_dscoring_and_zscoring(self):
subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling(dscore_mode=True, zscore_mode=True)
self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.628499078069273, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0082089371266301642, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 0.80806512456121071, places=4)
def test_observer_aware_subjective_model_use_log(self):
subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling(use_log=True)
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.082429594509296211, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.2889206910113, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4355485462027884, places=4)
def test_observer_content_aware_subjective_model(self):
subjective_model = MaximumLikelihoodEstimationModel.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.sum(result['content_ambiguity']), 3.8972884776604402, places=4)
self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0041122094732031289, places=4)
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.055712761348815837, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.085842891905121704, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 10.164665557559516, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.028749990587721687, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.20774261173619, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4351342153719635, places=4)
def test_observer_content_aware_subjective_model_missingdata(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'missing_probability': 0.1,
}
dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.sum(result['content_ambiguity']), 3.9104244772977128, places=4)
self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0037713583509767193, places=4)
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.21903272050455846, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.084353684687185043, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 9.8168943054654481, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.028159236075789944, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.05548186797336, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4339487982797514, places=4)
np.random.seed(0)
info_dict = {
'missing_probability': 0.5,
}
dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.sum(result['content_ambiguity']), 2.63184284168883, places=4)
self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.019164097909450246, places=4)
self.assertAlmostEquals(np.sum(result['observer_bias']), 0.2263148440748638, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.070613033112114504, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.317917502439435, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.029455722248727296, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.29962156788139, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4717366222424826, places=4)
def test_observer_content_aware_subjective_model_nocontent(self):
subjective_model = MaximumLikelihoodEstimationModel.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling(mode='NO_CONTENT')
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.090840910829083799, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.31447815213642, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4355485462027884, places=4)
self.assertAlmostEquals(np.sum(result['content_bias']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['content_bias']), 0.0, places=4)
self.assertAlmostEquals(np.sum(result['content_ambiguity']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0, places=4)
def test_observer_content_aware_subjective_model_nosubject(self):
subjective_model = MaximumLikelihoodEstimationModel.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling(mode='NO_SUBJECT')
self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.0384615384616, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4012220200639218, places=4)
self.assertAlmostEquals(np.sum(result['content_bias']), 0.0, places=4)
self.assertAlmostEquals(np.var(result['content_bias']), 0.0, places=4)
self.assertAlmostEquals(np.sum(result['content_ambiguity']), 6.06982228334157, places=4)
self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0045809756997836721, places=4)
def test_observer_aware_subjective_model_synthetic(self):
np.random.seed(0)
dataset = import_python_file(self.dataset_filepath)
info_dict = {
'quality_scores': np.random.uniform(1, 5, 79),
'observer_bias': np.random.normal(0, 1, 26),
'observer_inconsistency': np.abs(np.random.uniform(0.4, 0.6, 26)),
'content_bias': np.zeros(9),
'content_ambiguity': np.zeros(9),
}
dataset_reader = SyntheticRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.90138622499935517, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.84819162765420342, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.742288471632817, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0047638169604076975, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 236.78529213581052, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.3059726132293354, places=4)
def test_observer_aware_subjective_model(self):
subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.090840910829083799, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.31447815213642, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4355485462027884, places=4)
def test_observer_aware_subjective_model_missingdata(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'missing_probability': 0.1,
}
dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['observer_bias']), -0.18504017984241944, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.087350553292201705, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.520738471447299, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.010940587327083341, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 279.94975274863879, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4325574378911554, places=4)
np.random.seed(0)
info_dict = {
'missing_probability': 0.5,
}
dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['observer_bias']), 0.057731868199093525, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.081341845650928557, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 14.996238224489693, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.013666025579465165, places=4)
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.67100837103203, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4637917512768972, places=4)
def test_livedmos_subjective_model(self):
subjective_model = LiveDmosModel.from_dataset_file(self.dataset_filepath)
result = subjective_model.run_modeling()
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 65.307711974116913, places=4)
self.assertAlmostEquals(scores[10], 30.204773267864258, places=4)
self.assertAlmostEquals(np.mean(scores), 50.0, places=4)
def test_livedmos_subjective_model_normalize_final(self):
subjective_model = LiveDmosModel.from_dataset_file(self.dataset_filepath)
result = subjective_model.run_modeling(normalize_final=True)
scores = result['quality_scores']
self.assertAlmostEquals(scores[0], 1.0392964273048528, places=4)
self.assertAlmostEquals(scores[10], -1.3439701802061783, places=4)
self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
def test_livedmos_subjective_model_dscore_mode_bad(self):
subjective_model = LiveDmosModel.from_dataset_file(self.dataset_filepath)
with self.assertRaises(AssertionError):
subjective_model.run_modeling(dscore_mode=True)
def test_observer_aware_subjective_model_corruptdata(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
def test_mos_subjective_model_corruptdata(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MosModel(dataset_reader)
result = subjective_model.run_modeling()
scores = result['quality_scores']
self.assertAlmostEquals(np.mean(scores), 3.5447906523855899, places=4)
self.assertAlmostEquals(np.var(scores), 0.95893305294535369, places=4) # 1.4012220200639218
def test_mos_subjective_model_corruptdata_subjreject(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MosModel(dataset_reader)
result = subjective_model.run_modeling(subject_rejection=True)
scores = result['quality_scores']
self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4)
self.assertAlmostEquals(np.var(scores), 1.1049505732699529, places=4) # 1.4012220200639218
def test_zscore_mos_subjective_model_corruptdata_subjreject(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MosModel(dataset_reader)
result = subjective_model.run_modeling(zscore_mode=True, subject_rejection=True)
scores = result['quality_scores']
self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
self.assertAlmostEquals(np.var(scores), 0.66670826882879042, places=4)
def test_observer_aware_subjective_model_subjreject(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
with self.assertRaises(AssertionError):
result = subjective_model.run_modeling(subject_rejection=True)
def test_observer_content_aware_subjective_model_subjreject(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
with self.assertRaises(AssertionError):
result = subjective_model.run_modeling(subject_rejection=True)
def test_observer_content_aware_subjective_dmos_model(self):
subjective_model = MaximumLikelihoodEstimationDmosModel.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['quality_scores']), 288.56842946051466, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4166132275824235, places=4)
self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)
self.assertAlmostEquals(np.sum(result['content_ambiguity']), 3.8972884776604402, places=4)
self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0041122094732031289, places=4)
self.assertAlmostEquals(np.sum(result['observer_bias']), 3.1293776428507774, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.085842891905121704, places=4)
self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 10.164665557559516, places=4)
self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.028749990587721687, places=4)
def test_least_squares_model(self):
subjective_model = LeastSquaresModel.from_dataset_file(
self.dataset_filepath)
result = subjective_model.run_modeling()
self.assertAlmostEquals(np.sum(result['quality_scores']), 280.03846153847428, places=4)
self.assertAlmostEquals(np.var(result['quality_scores']), 1.4012220200638821, places=4)
self.assertAlmostEquals(np.sum(result['observer_bias']), 0, places=4)
self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621522581, places=4)
def test_subjrejmos_subjective_model_corruptdata_subjreject(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = SubjrejMosModel(dataset_reader)
result = subjective_model.run_modeling()
scores = result['quality_scores']
self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4)
self.assertAlmostEquals(np.var(scores), 1.1049505732699529, places=4) # 1.4012220200639218
def test_zscoresubjrejmos_subjective_model_corruptdata_subjreject(self):
dataset = import_python_file(self.dataset_filepath)
np.random.seed(0)
info_dict = {
'selected_subjects': range(5),
}
dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
subjective_model = ZscoringSubjrejMosModel(dataset_reader)
result = subjective_model.run_modeling()
scores = result['quality_scores']
self.assertAlmostEquals(np.mean(scores), 0, places=4)
self.assertAlmostEquals( | np.var(scores) | numpy.var |
#! /usr/bin/env python
"""
Functions involving masked arrays
Some functions are general array operations, others involve geospatial information
"""
import sys
import os
import glob
import numpy as np
from osgeo import gdal
from pygeotools.lib import iolib
#Notes on geoma
#Note: Need better init overloading
#http://stackoverflow.com/questions/141545/overloading-init-in-python
#Might make more sense to create ma subclass, and add gdal ds as new object
#http://stackoverflow.com/questions/12597827/how-to-subclass-numpy-ma-core-masked-array
#http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
#Want to implement basic array indexing with map coordinates, display in plt
#See pyresample
#https://github.com/talltom/PyRaster/blob/master/rasterIO.py
#http://www2-pcmdi.llnl.gov/cdat/tutorials/training/cdat_2004/06-arrays-variables-etc.pdf
#http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
#=======================
#Masked array stack
#=======================
#Want to add error attributes
#Want to make consistent with stack_count vs count keywords/attributes
class DEMStack:
def __init__(self, fn_list=[], stack_fn=None, outdir=None, res=None, extent=None, srs=None, trend=True, robust=False, med=False, stats=True, save=True, sort=True, datestack=True, mask_geom=None, min_dt_ptp=np.nan, n_thresh=2, n_cpu=None):
self.sort = sort
if self.sort:
#This sorts filenames, should probably sort by datetime to be safe
fn_list = sorted(fn_list, key=lambda x: os.path.split(x)[-1])
self.fn_list = list(fn_list)
self.stack_fn = stack_fn
if not self.fn_list and stack_fn is None:
raise ValueError('Must specify input filename list or existing stack filename')
self.res = res
self.extent = extent
self.srs = srs
self.trend = trend
self.robust = robust
self.med = med
self.stats = stats
self.save = save
self.datestack = datestack
self.mask_geom = mask_geom
#This is the minimum number of arrays in stack to compute trend
self.n_thresh = n_thresh
#This is the minimum number of days between first and last timestamp to compute trend
self.min_dt_ptp = min_dt_ptp
self.n_cpu = n_cpu
#Use this to limit memory use and filesizes
#self.dtype = 'float32'
self.dtype = np.float32
#Want to do this before filenames, etc. are determined
#self.get_date_list()
#if sort:
# idx = np.argsort(self.date_list_o)
# self.date_list = self.date_list[idx]
# self.date_list_o = self.date_list_o[idx]
# self.fn_list = (np.array(self.fn_list)[idx]).tolist()
#Determine appropriate stack filename
if outdir is None:
#Use directory of first filename
if self.fn_list:
self.outdir = os.path.abspath(os.path.split(self.fn_list[0])[0])
else:
self.outdir = os.getcwd()
else:
if os.path.exists(outdir):
self.outdir = outdir
else:
os.makedirs(outdir)
self.outdir = outdir
#raise IOError('Specified output directory does not exist')
#If we're on Pleiades, make sure striping is set up properly on output directory
#This now has check for lustre filesystem
iolib.setstripe(self.outdir, self.n_cpu)
#Flag specifying whether user has specified output stack filename
#Hack to prevent new stack_fn generation if files are missing or sorted
self.user_stack_fn = False
if self.stack_fn is not None:
self.user_stack_fn = True
if not self.user_stack_fn:
if self.fn_list:
self.get_stack_fn()
else:
raise ValueError('Must specify input filename list or existing stack filename')
if os.path.exists(self.stack_fn):
self.loadstack()
#This was an attempt to ensure that input fn/res/extent is consistent with saved npz
#Should really check to see if new extent falls within existing extent
#Only regenerate if new extent is larger
#Res check is not working correctly
#print extent, self.extent
#print res, self.res
#if (self.extent != extent) or (self.res != res):
#self.res = res
#self.extent = extent
#self.makestack()
#if self.stats:
#self.compute_stats()
#self.write_stats()
#self.savestack()
else:
self.ma_stack = None
self.makestack()
if self.ma_stack is not None:
if self.mask_geom is not None:
from pygeotools.lib import geolib
mask = geolib.geom2mask(self.mask_geom, self.get_ds())
self.ma_stack = np.ma.array(self.ma_stack, mask=np.broadcast_to(mask, self.ma_stack.shape))
#Initialize source and error lists
self.source = ['None' for i in self.fn_list]
#TODO: This needs to be fixed, source_dict moved to stack_view.py
#self.get_source()
self.error_dict_list = [None for i in self.fn_list]
#TODO: This needs to be fixed, source_dict moved to stack_view.py
#self.get_error_dict_list()
self.error = np.ma.zeros(len(self.fn_list))
#TODO: This needs to be fixed, source_dict moved to stack_view.py
#self.get_error()
self.get_date_list()
if sort:
sort_idx = self.get_sortorder()
if np.any(self.date_list_o != self.date_list_o[sort_idx]):
self.sort_in_place(sort_idx)
self.finish()
def finish(self):
if self.datestack:
#Only do this if we have valid dates
if self.date_list_o.count() > 1:
#self.make_datestack()
#self.write_datestack()
self.compute_dt_stats()
self.write_datestack()
if self.stats:
self.compute_stats()
if self.save:
self.write_stats()
if self.trend:
if not self.datestack:
self.make_datestack()
self.compute_trend()
if self.save:
self.write_trend()
if self.save:
self.savestack()
def get_stack_fn(self):
if not self.user_stack_fn:
self.stack_fn = os.path.splitext(os.path.split(self.fn_list[0])[-1])[0] + '_' \
+ os.path.splitext(os.path.split(self.fn_list[-1])[1])[0] \
+ '_stack_%i' % len(self.fn_list) + '.npz'
self.stack_fn = os.path.join(self.outdir, self.stack_fn)
#p = os.path.join(topdir, d, '*_warp.tif'))
def get_fn_list(p):
fn_list = glob.glob(p)
return fn_list
#This stores float
def get_res(self):
#Should check to make sure gt is defined
self.res = np.abs([self.gt[1], self.gt[5]]).mean()
#This stores list
def get_extent(self):
from pygeotools.lib import geolib
#Should check to make sure gt is defined
self.extent = geolib.gt_extent(self.gt, self.ma_stack.shape[2], self.ma_stack.shape[1])
#This returns a dummy dataset for the stack
#Useful for some applications
def get_ds(self):
nl = self.ma_stack.shape[1]
ns = self.ma_stack.shape[2]
gdal_dtype = iolib.np2gdal_dtype(np.dtype(self.dtype))
m_ds = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, gdal_dtype)
m_gt = [self.extent[0], self.res, 0, self.extent[3], 0, -self.res]
m_ds.SetGeoTransform(m_gt)
#this should already be WKT
m_ds.SetProjection(self.proj)
return m_ds
"""
#TODO: Need to clean up the source_dict and error_dict code below
#This is pretty clunky
def get_source(self):
for i, fn in enumerate(self.fn_list):
for key, d in source_dict.items():
if d['fn_pattern'] in fn:
self.source[i] = key
break
#Should probably just preserve the error dictionary here
def get_error_dict_list(self):
import error_analysis
for i, fn in enumerate(self.fn_list):
error_log = error_analysis.parse_pc_align_log(fn)
if error_log is not None:
self.error_dict_list[i] = error_log
def get_error(self):
for i, fn in enumerate(self.fn_list):
if self.error_dict_list[i] is not None:
self.error[i] = self.error_dict_list[i]['Output Sampled Median Error']
elif self.source[i] is not 'None':
if 'error_perc' in source_dict[self.source[i]]:
istat = fast_median(self.ma_stack[i])
#Probably want to avoid using max, as could have bogus values
#istat = calcperc(self.ma_stack[i], clim=(2,98))[1]
self.error[i] = source_dict[self.source[i]]['error_perc'] * istat
else:
self.error[i] = source_dict[self.source[i]]['error']
"""
def makestack(self):
from pygeotools.lib import warplib
print("Creating stack of %i files" % len(self.fn_list))
#Jako front
#res = 16
res = 'min'
if self.res is not None:
res=self.res
#extent = '-195705.297256 -2286746.61662 -170642.601955 -2256442.61662'
#extent = 'intersection'
extent = 'union'
if self.extent is not None:
extent = self.extent
srs='first'
if self.srs is not None:
srs = self.srs
ds_list = warplib.memwarp_multi_fn(self.fn_list, res=res, extent=extent, t_srs=srs)
#Check to eliminate empty datasets
from pygeotools.lib import geolib
#Returns True if empty
bad_ds_idx = np.array([geolib.ds_IsEmpty(ds) for ds in ds_list])
if np.all(bad_ds_idx):
print("\nNo valid ds remain")
else:
if np.any(bad_ds_idx):
print("\n%i empty ds removed:" % len(bad_ds_idx.nonzero()[0]))
print(np.array(self.fn_list)[bad_ds_idx])
self.fn_list = np.array(self.fn_list)[~bad_ds_idx].tolist()
print("%i valid input ds\n" % len(self.fn_list))
#Only create a stack if we have more than one valid input
if len(self.fn_list) > 1:
self.get_stack_fn()
print("Creating ma_stack")
#Note: might not need ma here in the 0 axis - shouldn't be any missing data
#self.ma_stack = np.ma.array([iolib.ds_getma(ds) for ds in ds_list], dtype=self.dtype)
self.ma_stack = np.ma.array([iolib.ds_getma(ds) for ds in np.array(ds_list)[~bad_ds_idx]], dtype=self.dtype)
#Might want to convert to proj4
self.proj = ds_list[0].GetProjectionRef()
self.gt = ds_list[0].GetGeoTransform()
#Now set these for stack, regardless of input
self.get_res()
self.get_extent()
def get_sortorder(self):
sort_idx = np.argsort(self.date_list)
return sort_idx
def sort_in_place(self, sort_idx):
self.fn_list = (np.array(self.fn_list)[sort_idx]).tolist()
self.get_stack_fn()
self.ma_stack = self.ma_stack[sort_idx]
self.date_list = self.date_list[sort_idx]
self.date_list_o = self.date_list_o[sort_idx]
self.source = (np.array(self.source)[sort_idx]).tolist()
self.error = self.error[sort_idx]
self.error_dict_list = (np.array(self.error_dict_list)[sort_idx]).tolist()
#This is depreciated, but is useful for computing mean, median or std
#Create separate array of datetime objects
def make_datestack(self):
self.datestack = True
print("Creating datestack")
self.dt_stack = np.ma.copy(self.ma_stack).astype(self.dtype)
for n, dt_o in enumerate(self.date_list_o):
self.dt_stack[n].data[:] = dt_o
self.dt_stack_min = np.ma.min(self.dt_stack, axis=0)
self.dt_stack_max = np.ma.max(self.dt_stack, axis=0)
self.dt_stack_ptp = np.ma.masked_equal((self.dt_stack_max - self.dt_stack_min), 0)
self.dt_stack_center = self.dt_stack_min + self.dt_stack_ptp/2.0
#self.dt_stack_mean = np.ma.mean(self.dt_stack, axis=0)
def compute_dt_stats(self):
self.datestack = True
print("Computing date stats")
allmask = np.ma.getmaskarray(self.ma_stack).all(axis=0)
minidx = np.argmin(np.ma.getmaskarray(self.ma_stack), axis=0)
maxidx = np.argmin(np.ma.getmaskarray(self.ma_stack[::-1]), axis=0)
dt_stack_min = np.zeros(minidx.shape, dtype=self.dtype)
dt_stack_max = np.zeros(maxidx.shape, dtype=self.dtype)
for n, dt_o in enumerate(self.date_list_o):
dt_stack_min[minidx == n] = dt_o
dt_stack_max[maxidx == (len(self.date_list_o)-1 - n)] = dt_o
self.dt_stack_min = np.ma.array(dt_stack_min, mask=allmask)
self.dt_stack_max = np.ma.array(dt_stack_max, mask=allmask)
self.dt_stack_ptp = np.ma.masked_equal((self.dt_stack_max - self.dt_stack_min), 0)
self.dt_stack_center = self.dt_stack_min + self.dt_stack_ptp.filled(0)/2.0
#Should pull out unmasked indices at each pixel along axis 0
#Take min index along axis 0
#Then create grids by pulling out corresponding value from date_list_o
def write_datestack(self):
#stat_list = ['dt_stack_ptp', 'dt_stack_mean', 'dt_stack_min', 'dt_stack_max', 'dt_stack_center']
stat_list = ['dt_stack_ptp', 'dt_stack_min', 'dt_stack_max', 'dt_stack_center']
if any([not hasattr(self, i) for i in stat_list]):
#self.make_datestack()
self.compute_dt_stats()
print("Writing out datestack stats")
#Create dummy ds - might want to use vrt here instead
driver = gdal.GetDriverByName("MEM")
ds = driver.Create('', self.dt_stack_ptp.shape[1], self.dt_stack_ptp.shape[0], 1, gdal.GDT_Float32)
ds.SetGeoTransform(self.gt)
ds.SetProjection(self.proj)
#Write out with malib, should preserve ma type
out_prefix = os.path.splitext(self.stack_fn)[0]
iolib.writeGTiff(self.dt_stack_ptp, out_prefix+'_dt_ptp.tif', ds)
self.dt_stack_ptp.set_fill_value(-9999)
#iolib.writeGTiff(self.dt_stack_mean, out_prefix+'_dt_mean.tif', ds)
#self.dt_stack_mean.set_fill_value(-9999)
iolib.writeGTiff(self.dt_stack_min, out_prefix+'_dt_min.tif', ds)
self.dt_stack_min.set_fill_value(-9999)
iolib.writeGTiff(self.dt_stack_max, out_prefix+'_dt_max.tif', ds)
self.dt_stack_max.set_fill_value(-9999)
iolib.writeGTiff(self.dt_stack_center, out_prefix+'_dt_center.tif', ds)
self.dt_stack_center.set_fill_value(-9999)
#Note: want ot change the variable names min/max here
#Might be better to save out as multiband GTiff here
def savestack(self):
print("Saving stack to: %s" % self.stack_fn)
out_args = {}
out_args['ma_stack_full'] = self.ma_stack.filled(np.nan)
out_args['proj'] = str(self.proj)
out_args['gt'] = self.gt
out_args['res'] = self.res
out_args['extent'] = self.extent
out_args['n_thresh'] = self.n_thresh
out_args['min_dt_ptp'] = self.min_dt_ptp
out_args['fn_list'] = np.array(self.fn_list)
out_args['source'] = np.array(self.source)
out_args['error'] = self.error.filled(np.nan)
out_args['error_dict_list'] = self.error_dict_list
out_args['date_list'] = self.date_list.astype('str').filled('None')
out_args['date_list_o'] = self.date_list_o.filled(np.nan)
#Should really write out flags used for stack creation
#out_args['flags']={'datestack':self.datestack, 'stats':self.stats, 'med':self.med, 'trend':self.trend, 'sort':self.sort, 'save':self.save}
if self.datestack:
#out_args['dt_stack'] = self.dt_stack.filled(np.nan)
#out_args['dt_mean'] = self.dt_stack_mean.filled(np.nan)
out_args['dt_ptp'] = self.dt_stack_ptp.filled(np.nan)
out_args['dt_min'] = self.dt_stack_min.filled(np.nan)
out_args['dt_max'] = self.dt_stack_max.filled(np.nan)
out_args['dt_center'] = self.dt_stack_center.filled(np.nan)
if self.stats:
out_args['count'] = self.stack_count.filled(0)
out_args['mean'] = self.stack_mean.filled(np.nan)
out_args['min'] = self.stack_min.filled(np.nan)
out_args['max'] = self.stack_max.filled(np.nan)
out_args['std'] = self.stack_std.filled(np.nan)
if self.med:
out_args['med'] = self.stack_med.filled(np.nan)
out_args['nmad'] = self.stack_nmad.filled(np.nan)
if self.trend:
out_args['robust'] = self.robust
out_args['trend'] = self.stack_trend.filled(np.nan)
out_args['intercept'] = self.stack_intercept.filled(np.nan)
out_args['detrended_std'] = self.stack_detrended_std.filled(np.nan)
#out_args['rsquared'] = self.stack_rsquared.filled(np.nan)
np.savez_compressed(self.stack_fn, **out_args)
#Now write out a filename list for reference
#Could also add metadata like extent, res, etc.
#Might be best to dump as json
list_fn = os.path.splitext(self.stack_fn)[0]+'_fn_list.txt'
f = open(list_fn,'w')
for i in self.fn_list:
f.write('%s\n' % i)
f.close()
def loadstack(self):
print("Loading stack from: %s" % self.stack_fn)
data = np.load(self.stack_fn, encoding='latin1')
#self.fn_list = list([i.decode("utf-8") for i in data['fn_list']])
self.fn_list = data['fn_list']
#Load flags originally used for stack creation
#self.flags = data['flags']
#{'datestack':self.datestack, 'stats':self.stats, 'med':self.med, 'trend':self.trend, 'sort':self.sort, 'save':self.save}
if 'source' in data:
self.source = list(data['source'])
else:
self.source = ['None' for i in self.fn_list]
if 'error' in data:
self.error = np.ma.fix_invalid(data['error'], fill_value=-9999)
else:
self.error = np.ma.zeros(len(self.fn_list))
#if 'error_dict_list' in data:
# self.error_dict_list = data['error_dict_list'][()]
#else:
self.error_dict_list = [None for i in self.fn_list]
#This is a shortcut, should load from the data['date_list'] arrays
if 'date_list_o' in data:
from pygeotools.lib import timelib
from datetime import datetime
self.date_list_o = np.ma.fix_invalid(data['date_list_o'], fill_value=1.0)
#This is a hack - need universal timelib time zone support or stripping
self.date_list = np.ma.masked_equal([i.replace(tzinfo=None) for i in timelib.o2dt(self.date_list_o)], datetime(1,1,1))
else:
self.get_date_list()
print("Loading ma stack")
self.ma_stack = np.ma.fix_invalid(data['ma_stack_full']).astype(self.dtype)
#Note: the str is an intermediate fix - all new stacks should have str written
self.proj = str(data['proj'])
#If we don't have gt, we're in trouble - can't recompute res/extent
if 'gt' in data:
self.gt = data['gt']
else:
print("No geotransform found in stack")
#Check if res and extent are defined - can reconstruct
#Should throw error
#Note: Once we have gt, could just run get_res() and get_extent() to avoid the following
#Or could check to make sure consistent
#Some stacks in Oct 2015 and Nov 2015 did not have res/extent saved properly
"""
if 'res' in data:
if data['res'] != 'None':
#self.res = float(data['res'])
self.res = float(np.atleast_1d(data['res'])[0])
else:
self.get_res()
else:
self.get_res()
if 'extent' in data:
if data['extent'] != 'None':
#self.extent = list(data['extent'])
#self.extent = list(np.atleast_1d(data['extent'])[0])
extent = np.atleast_1d(data['extent'])[0]
if isinstance(extent, str):
self.extent = [float(x) for x in extent.split()]
else:
self.extent = list(extent)
else:
self.get_extent()
else:
self.get_extent()
"""
#Just do this to be safe, if gt is bad, no point in proceeding
self.get_res()
self.get_extent()
saveflag=False
if self.datestack:
#statlist = ['dt_stack', 'dt_mean', 'dt_ptp', 'dt_min', 'dt_max', 'dt_center']
statlist = ['dt_ptp', 'dt_min', 'dt_max', 'dt_center']
if all([s in data for s in statlist]):
print("Loading datestack")
#self.dt_stack = np.ma.fix_invalid(data['dt_stack']).astype(self.dtype)
#self.dt_stack_mean = np.ma.fix_invalid(data['dt_mean'], fill_value=-9999).astype(self.dtype)
self.dt_stack_ptp = np.ma.fix_invalid(data['dt_ptp'], fill_value=-9999).astype(self.dtype)
self.dt_stack_min = np.ma.fix_invalid(data['dt_min'], fill_value=-9999).astype(self.dtype)
self.dt_stack_max = np.ma.fix_invalid(data['dt_max'], fill_value=-9999).astype(self.dtype)
self.dt_stack_center = np.ma.fix_invalid(data['dt_center'], fill_value=-9999).astype(self.dtype)
else:
if self.date_list_o.count() > 1:
#self.make_datestack()
self.compute_dt_stats()
self.write_datestack()
saveflag=True
if self.stats:
#Could do this individually to save time
statlist = ['count', 'mean', 'std', 'min', 'max']
if self.med:
statlist.append('med')
statlist.append('nmad')
if all([s in data for s in statlist]):
print("Loading stats")
self.stack_count = np.ma.masked_equal(data['count'], 0).astype(np.uint16)
self.stack_mean = np.ma.fix_invalid(data['mean'], fill_value=-9999).astype(self.dtype)
self.stack_std = np.ma.fix_invalid(data['std'], fill_value=-9999).astype(self.dtype)
self.stack_min = np.ma.fix_invalid(data['min'], fill_value=-9999).astype(self.dtype)
self.stack_max = np.ma.fix_invalid(data['max'], fill_value=-9999).astype(self.dtype)
if self.med:
self.stack_med = np.ma.fix_invalid(data['med'], fill_value=-9999).astype(self.dtype)
self.stack_nmad = | np.ma.fix_invalid(data['nmad'], fill_value=-9999) | numpy.ma.fix_invalid |
from collections import defaultdict
import logging
import numpy as np
import pandas as pd
from config import paths
from descr import dihedrals, contacts, hbonds, params
from pdb_component import pdb_interface
import traceback
import os
def calculate_single_tolerant(pdb_id, cid, seq_marker):
seq_marker = int(seq_marker)
pdb_data = pdb_interface.get_info_for(pdb_id)
if pdb_data is None:
raise Exception(f"PDB file download fail for {pdb_id}.")
ATOM, HETATM, hb = pdb_data
try:
dsr_snos = _get_sno_range(ATOM, cid, seq_marker)
# if dsr_snos is None or len(dsr_snos) != 30 or dsr_snos[0] != seq_marker:
# msg = f"ATOM lines not found in range({seq_marker}, "
# f"{seq_marker + 30}) for {pdb_id}:{cid}.<br>"
# raise Exception(msg)
res, C, CA, N = _from_considered_elements_single(ATOM, dsr_snos, cid)
pept_bonds = _get_pept_bonds(CA, dsr_snos)
# For filling descr df
res_CA = _get_res_CA(res, CA, dsr_snos)
angles, CA = dihedrals.get_descr_dihedrals(C, CA, N, dsr_snos)
hbond_descr = hbonds.get_descr_hb(hb, ATOM, HETATM, dsr_snos)
heavy_atom_contacts, hetatom_contacts, hetatom_covalent = \
contacts.get_contacts(ATOM, HETATM, cid, dsr_snos)
descr = _assemble_descr(hetatom_contacts, hetatom_covalent,
heavy_atom_contacts, angles, hbond_descr,
res_CA, pept_bonds)
full_descr = _add_columns(descr, pdb_id, seq_marker, cid)
except Exception as e:
msg = f"Exception caught in descriptor calculation. Traceback: " \
f"<{traceback.format_exc()}>. Error: <{e}>"
raise Exception(msg)
return full_descr
def calculate_single(pdb_id, cid, seq_marker):
seq_marker = int(seq_marker)
pdb_data = pdb_interface.get_info_for(pdb_id)
if pdb_data is None:
raise Exception(f"PDB file download fail for {pdb_id}.")
ATOM, HETATM, hb = pdb_data
try:
dsr_snos = _get_sno_range(ATOM, cid, seq_marker)
if dsr_snos is None or len(dsr_snos) != 30 or dsr_snos[0] != seq_marker:
msg = f"ATOM lines not found in range({seq_marker}, "
f"{seq_marker + 30}) for {pdb_id}:{cid}.<br>"
raise Exception(msg)
res, C, CA, N = _from_considered_elements_single(ATOM, dsr_snos, cid)
pept_bonds = _get_pept_bonds(CA, dsr_snos)
# For filling descr df
res_CA = _get_res_CA(res, CA, dsr_snos)
angles, CA = dihedrals.get_descr_dihedrals(C, CA, N, dsr_snos)
hbond_descr = hbonds.get_descr_hb(hb, ATOM, HETATM, dsr_snos)
heavy_atom_contacts, hetatom_contacts, hetatom_covalent = \
contacts.get_contacts(ATOM, HETATM, cid, dsr_snos)
descr = _assemble_descr(hetatom_contacts, hetatom_covalent,
heavy_atom_contacts, angles, hbond_descr,
res_CA, pept_bonds)
full_descr = _add_columns(descr, pdb_id, seq_marker, cid)
except Exception as e:
msg = f"Exception caught in descriptor calculation. Traceback: " \
f"<{traceback.format_exc()}>. Error: <{e}>"
raise Exception(msg)
return full_descr
def calculate(motif_pos_map):
descrs = pd.DataFrame()
print(f"Total length: {len(motif_pos_map)}.")
print(len(motif_pos_map))
for i, (pdb_id, motif_cid_map) in enumerate(motif_pos_map.items()):
if not (i % 10):
print(i)
print(f"{len(motif_pos_map) - i}: {pdb_id}")
motif_pos_s = motif_cid_map['sno_markers']
cids = motif_cid_map['cid']
pdb_data = pdb_interface.get_info_for(pdb_id)
if pdb_data is None:
continue
ATOM, HETATM, hb = pdb_data
if not isinstance(motif_pos_s, list):
motif_pos_s = [motif_pos_s]
cids = [cids]
for motif_pos, cid in zip(motif_pos_s, cids):
try:
dsr_snos = _get_sno_range(ATOM, cid, motif_pos)
if dsr_snos is None:
continue
res, C, CA, N = _from_considered_elements(ATOM, dsr_snos, cid)
pept_bonds = _get_pept_bonds(CA, dsr_snos)
# For filling descr df
res_CA = _get_res_CA(res, CA, dsr_snos)
angles, CA = dihedrals.get_descr_dihedrals(C, CA, N, dsr_snos)
hbond_descr = hbonds.get_descr_hb(hb, ATOM, HETATM, dsr_snos)
heavy_atom_contacts, hetatom_contacts, hetatom_covalent = \
contacts.get_contacts(ATOM, HETATM, cid, dsr_snos)
descr = _assemble_descr(hetatom_contacts, hetatom_covalent,
heavy_atom_contacts, angles, hbond_descr,
res_CA, pept_bonds)
full_descr = _add_columns(descr, pdb_id, motif_pos, cid)
descrs = descrs.append(full_descr, ignore_index=True)
except Exception as e:
print(e)
print(f"Calc_descr failed for {pdb_id}:{cid}")
pdb_suffix = pdb_id.lower().strip()
if pdb_suffix+".pkl" in paths.PDB_PARSED_SET:
os.remove(os.path.join(paths.PDB_PARSED,
pdb_suffix + ".pkl"))
# raise
continue
return descrs
def _get_param_to_consider(ATOM, marker, cids):
param_to_consider = []
for cid in cids:
dsr_snos = _get_sno_range(ATOM, cid, marker)
if dsr_snos is None:
continue
param_to_consider.append((cid, dsr_snos, marker))
return param_to_consider
def _get_sno_range(ATOM, cid, seq_marker):
start_sno = seq_marker + params.OFFSETS[0]
end_sno = seq_marker + params.OFFSETS[1]
ATOM_cid = ATOM[ATOM.cid.isin([cid])]
while start_sno not in ATOM_cid.sno.values:
start_sno += 1
if start_sno == end_sno:
msg = (f"No ATOM lines found in dsr_snos range "
f"{start_sno}-{end_sno}.")
print(msg)
return None
while end_sno not in ATOM_cid.sno.values:
end_sno -= 1
if start_sno == end_sno:
return None
dsr_snos = range(start_sno, end_sno)
return dsr_snos
def _add_columns(descr, filename, seq_marker, cid):
length = len(descr['sno'])
descr['filename'] = [filename for _ in range(length)]
descr['seq_marker'] = [seq_marker for _ in range(length)]
descr['cid'] = [cid for _ in range(length)]
descr['relative_sno'] = descr.sno.values - descr.seq_marker.values
descr = descr.reindex(sorted(descr.columns), axis=1)
return descr
def _get_pept_bonds(CA, dsr_snos):
"""
This assumes that there exist a peptide bond, if two residues with same
cid, aname is CA, and resi in resi_list, and are adjacent to each other in
CA, are at a distance of less than 4, in x/y/z coord units.
Return a set() of indices (i, i+1) indicating the pair of linked atoms.
Relative to position along dsr_snos.
"""
peptide_pairs = set()
for i in range(len(CA) - 1):
a = CA[i]
b = CA[i + 1]
c = a - b
if np.sqrt(np.einsum('i,i', c, c)) < 4:
peptide_pairs.add((i, i + 1))
# bonds_matrix = True if (i, j) in dsr_snos else False
bonds_matrix = np.zeros((len(dsr_snos), len(dsr_snos)), dtype=bool)
peptide_pairs = np.array(list(peptide_pairs)).T
bonds_matrix[peptide_pairs[0], peptide_pairs[1]] = True
bonds_matrix = list(bonds_matrix)
peptide_bonds = dict()
peptide_bonds['sno'] = dsr_snos
peptide_bonds['pept_bonds'] = bonds_matrix
return peptide_bonds
def _get_res_CA(ress, CAs, dsr_snos):
res_CA = defaultdict(list)
for sno, res, ca in zip(dsr_snos, ress, CAs):
res_CA['sno'].append(sno)
res_CA['res'].append(res)
res_CA['CA'].append(ca)
return res_CA
def _from_considered_elements(ATOM, dsr_snos, cid):
ATOM = ATOM.filter(['cid', 'sno', 'aname', 'coord', 'res'])
ATOM = ATOM[(ATOM.sno.isin(dsr_snos)) &
(ATOM.aname.isin(("N", "C", "CA"))) &
(ATOM.cid == cid)]
coords = []
res = []
for sno in dsr_snos:
desired_row = ATOM[ATOM.sno == sno]
for term in ('C', 'CA', 'N'):
selected = desired_row[desired_row.aname == term]
res.append(selected.res.values[0])
coords.append(selected.coord.values[0])
res = np.array([i for i in res[::3]])
C = np.array([i for i in coords[::3]])
CA = np.array([i for i in coords[1::3]])
N = np.array([i for i in coords[2::3]])
assert len(res) == len(dsr_snos), len(res)
assert len(C) == len(dsr_snos)
return res, C, CA, N
# def _from_considered_elements(ATOM, dsr_snos, cid):
# print(ATOM.columns)
# ATOM = ATOM.filter(['cid', 'sno', 'aname', 'coord', 'res'])
# ATOM = ATOM[
# (ATOM.sno.isin(dsr_snos)) & (ATOM.aname.isin(("N", "C", "CA"))) & (
# ATOM.cid == cid)]
#
# res = np.array([i for i in ATOM.res.values[::3]])
# coords = ATOM.coord.values
# C = np.array([i for i in coords[::3]])
# CA = np.array([i for i in coords[1::3]])
# N = np.array([i for i in coords[2::3]])
# res = []
# for sno in dsr_snos:
# desired_row = ATOM[ATOM.sno == sno]
#
# coords = []
# for term in ('C', 'CA', 'N'):
# selected = desired_row[desired_row.aname == term][0]
# res.append(selected.res)
# coords.append(selected.coord) # res.append(selected.res[0])
# C = np.array([i for i in coords[::3]])
# CA = np.array([i for i in coords[1::3]])
# N = np.array([i for i in coords[2::3]])
# # print()
#
# # ATOM = ATOM.set_index(['sno'], drop=False)
# # for row in ATOM.iterrows():
# # print(row)
# import sys
# sys.exit()
# print(ATOM)
# ATOM = ATOM.sort_values(['sno', 'aname'])
# # ATOM = ATOM.sort(['sno', 'aname'])
# print(ATOM)
# np.testing.assert_array_equal(ATOM.aname.values[:3],
# np.array(['C', 'CA', 'N']))
# np.testing.assert_array_equal(ATOM.aname.values[3:6],
# np.array(['C', 'CA', 'N']))
#
# res = np.array([i for i in ATOM.res.values[::3]])
# coords = ATOM.coord.values
# C = np.array([i for i in coords[::3]])
# CA = np.array([i for i in coords[1::3]])
# N = np.array([i for i in coords[2::3]])
#
# assert len(res) == len(dsr_snos), len(res)
# assert len(C) == len(dsr_snos)
#
# return res, C, CA, N
def _from_considered_elements_single(ATOM, dsr_snos, cid):
ATOM = ATOM.filter(['cid', 'sno', 'aname', 'coord', 'res'])
ATOM = ATOM[(ATOM.sno.isin(dsr_snos))
& (ATOM.aname.isin(("N", "C", "CA")))
& (ATOM.cid.isin([cid]))]
ATOM = ATOM.set_index(['sno', 'aname'], drop=False)
ATOM = ATOM.sort_index(axis=0, sort_remaining=True)
np.testing.assert_array_equal(ATOM.aname.values[:3],
np.array(['C', 'CA', 'N']))
np.testing.assert_array_equal(ATOM.aname.values[3:6],
np.array(['C', 'CA', 'N']))
res = | np.array([i for i in ATOM.res.values[::3]]) | numpy.array |
#!/usr/bin/env python
"""Basic implementation of CHOMP trajectory optimization algorithm.
Optimize over q1...qn, with q0 and qn+1 the fixed end points.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
import IPython
from mm2d import models
class CircleField:
def __init__(self, c, r):
self.c = c
self.r = r
def signed_dist(self, x):
return np.linalg.norm(x - self.c) - self.r
def signed_dist_grad(self, x):
return (x - self.c) / np.linalg.norm(x - self.c)
def cost(self, x, eps):
d = self.signed_dist(x)
if d <= 0:
return -d + 0.5 * eps
elif d <= eps:
return (d - eps) ** 2 / (2 * eps)
return 0
def cost_grad(self, x, eps):
d = self.signed_dist(x)
dg = self.signed_dist_grad(x)
if d <= 0:
return -dg
elif d <= eps:
return -(d - eps) * dg / eps
return np.zeros(dg.shape)
class FloorField:
def __init__(self, y):
self.y = y
def signed_dist(self, p):
return p[1] - self.y
def signed_dist_grad(self, p):
return np.sign([0, p[1]])
def cost(self, p, eps):
d = self.signed_dist(p)
if d <= 0:
return d ** 2
return 0
def cost_grad(self, x, eps):
d = self.signed_dist(x)
dg = self.signed_dist_grad(x)
if d <= 0:
return 2 * d * dg
return np.zeros(dg.shape)
class ObstacleField:
def __init__(self, obstacles):
self.obstacles = obstacles
def cost(self, p, eps):
cost = np.sum([obs.cost(p, eps) for obs in self.obstacles])
return cost
def cost_grad(self, p, eps):
grad = np.sum([obs.cost_grad(p, eps) for obs in self.obstacles], axis=0)
return grad
def fd1(N, n, q0, qf):
"""First-order finite differencing matrix."""
# construct the finite differencing matrix
d1 = np.ones(N + 1)
d2 = -np.ones(N)
# K0 is N+1 x N
K0 = sparse.diags((d1, d2), [0, -1]).toarray()[:, :-1]
# kron to make it work for n-dimensional inputs
K = np.kron(K0, np.eye(n))
e = np.zeros((N + 1) * n)
e[:n] = -q0
e[-n:] = qf
return K, e
def fd2(N, n, q0, qf):
"""Second-order finite differencing matrix."""
# construct the finite differencing matrix
d1 = -2 * | np.ones(N) | numpy.ones |
import glob
import json
import logging
import os
import random
import shutil
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import h5py
import nibabel as nib
import nrrd
import numpy as np
import yaml
from filelock import FileLock
from nibabel.nifti1 import Nifti1Image
from nibabel.nifti2 import Nifti2Image
from PIL import Image
NiftiImage = Union[Nifti1Image, Nifti2Image]
# define suffixes for created files
annot_suffix_labelmap = "_annot_labelmap.nrrd"
annot_suffix_segments = "_annot.seg.nrrd"
data_suffix = "_data.nii.gz"
default_seg_file = "Segmentation_empty.seg.nrrd"
piece_overlap = (5, 5, 5)
completed_value = 1
overview_bound_size = (1024, 1024, 512)
annotation_field_map = {
"space": "string",
"kinds": "string list",
"space directions": "double matrix",
"space origin": "double vector",
}
segmentation_field_map = {
"space": "string",
"kinds": "string list",
"space directions": "double matrix",
"space origin": "double vector",
"measurement frame": "double matrix",
}
segment_properties = [
"Color",
"ColorAutoGenerated",
"Extent",
"ID",
"Name",
"NameAutoGenerated",
"Tags",
]
PieceIndex = Tuple[int, int, int]
def find_path_pieces(path: str) -> List[PieceIndex]:
"""
Find pieces in the given path, for example which pieces have been completed or excluded
:param path: Path to find pieces in
:return: List of piece indices found in the path
"""
piece_glob = os.path.join(path, "*_*_*" + annot_suffix_segments)
found_pieces = glob.glob(piece_glob)
found_indices = []
for f in found_pieces:
file = os.path.split(f)[-1]
filebase, _ = os.path.splitext(file)
index_strings = filebase.split("_")[:3]
index: PieceIndex = tuple([int(x) for x in index_strings])
found_indices.append(index)
logging.info("Finding %d pieces in %s" % (len(found_indices), path))
return found_indices
def get_completed_map(
config: Dict[str, Any],
subdir_num: int,
annot_pieces_dim: Optional[Tuple[int, int, int]] = None,
find_in_progress: bool = False,
) -> np.ndarray:
"""
Get map of pieces that have been annotated and recorded as completed (ie the pieces
in the completed directory)
:param Dict[str, Any] config: Configuration dictionary
:param int subdir_num: Subdirectory number
:param Optional[Tuple[int, int, int]] annot_pieces_dim: Dimensions of full annotation pieces map
:param bool find_in_progress: Find pieces from the in-progress folder instead of completed folder
:return: Array of annotation pieces, of same size as annotation map, representing which pieces
have been completed
"""
if annot_pieces_dim is None:
# get annotation piece map from file
annot_map, annot_header, annotation_scale = get_annot_map(
config, subdir_num
)
annot_pieces_dim = annot_map.shape
piece_path_name = "completed_piece_path"
if find_in_progress:
piece_path_name = "inprogress_piece_path"
completed_piece_path = get_full_path(config, subdir_num, piece_path_name)
found_indices = find_path_pieces(completed_piece_path)
# create occupancy grid of found completed annotations
completed_map = np.zeros(annot_pieces_dim, dtype="int")
if len(found_indices) > 0:
i = np.array(found_indices)
completed_map[i[:, 0], i[:, 1], i[:, 2]] = completed_value
return completed_map
def get_annot_map(
config: Dict[str, Any], subdir_num: int
) -> Tuple[np.ndarray, Dict[str, Any], np.ndarray]:
"""
Get map of annotation pieces from file, representing which pieces are within the
covered region.
:param Dict[str, Any] config: Configuration dictionary
:param int subdir_num: Subdirectory number
:return: Tuple of array of annotation pieces, shape (x, y, z), with values indicating if each
piece is within the covered region, dictionary of header recorded in annotation file,
and array with values=3, representing scale of annotation map compared to source data.
"""
annotation_piece_file = get_full_path(
config, subdir_num, "pieces_overview"
)
excluded_path = get_full_path(config, subdir_num, "excluded_piece_path")
annot_map, annot_header = nrrd.read(
annotation_piece_file, custom_field_map=annotation_field_map
)
annotation_scale_matrix = annot_header["space directions"]
annotation_scale = np.abs(
np.round(np.diagonal(annotation_scale_matrix)).astype("int")
) # should be same as annotation_size
all_annotated = annot_map.sum()
# find pieces in the excluded folder and remove from the annotation map
excluded_indices = find_path_pieces(excluded_path)
# create occupancy grid of found completed annotations
if len(excluded_indices) > 0:
i = np.array(excluded_indices)
annot_map[i[:, 0], i[:, 1], i[:, 2]] = 0
logging.info(
"%d annotation pieces (%d total, %d excluded) from %s"
% (
annot_map.sum(),
all_annotated,
len(excluded_indices),
annot_map.shape,
)
)
return annot_map, annot_header, annotation_scale
def get_project_data(project_data_file: str, lock: Optional[FileLock] = None):
"""
Read project data fields from given project data file
:param str project_data_file: Path to project data file
:param Optional[FileLock] lock: Lock to use if already created. This allows a write to be performed under
the same lock
:return: Dict of fields read from data file
"""
if lock is None:
lock = FileLock(project_data_file + ".lock")
stored_data = {}
with lock:
# read existing records from file
if os.path.exists(project_data_file):
try:
with open(project_data_file, "r") as assoc_file:
stored_data = json.load(assoc_file)
except FileNotFoundError:
logging.info(
"Piece association file %s not found" % project_data_file
)
except IOError:
logging.info(
"Piece association file %s not found" % project_data_file
)
if stored_data == {}:
raise RuntimeError(
"Could not read project data from file %s"
% project_data_file
)
return stored_data
def init_logging():
""" Initialise logging """
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
root.addHandler(handler)
# todo: add output to log file in project dir as well
def get_annotation(
piece_index: np.ndarray, data_path: str, segment_format=False
) -> Tuple[np.ndarray, Dict[str, Any], np.ndarray]:
"""
Get annotation data for the given piece index
:param np.ndarray piece_index: Array with values=3, of index of piece to read
:param str data_path: Full path of directory containing given piece
:param bool segment_format: Read resulting annotation as segment format, with shape shape (nsegs, x, y, z),
with one layer per segment, if true. Otherwise return as layer format (nlayers, x, y, z)
:return: Tuple of annotation piece data in either segment format with shape (nsegs, x, y, z)
or layer format with shape (nlayers, x, y, z), dictionary of header information read from annotation file, and
offset of data array within the space of the tile
"""
indices_str = [str(x) for x in piece_index.tolist()]
filename = "_".join(indices_str) + annot_suffix_segments
neighbour_file = os.path.join(data_path, filename)
input_data, header = nrrd.read(neighbour_file)
data_suboffset = header["space origin"]
if segment_format:
seg_data = make_seg_format(input_data)
else:
seg_data, layer_header = make_layer_format(input_data)
# update header to set layers produced in make_layer_format
header.update(layer_header)
return seg_data, header, data_suboffset
def write_annot_file(
index_name: Optional[str],
write_path: Optional[str],
annot_data: Optional[np.ndarray],
annot_fields: Optional[Dict[str, Any]],
scales: np.ndarray,
annot_write_path: Optional[str] = None,
) -> str:
"""
Write data to annotation file
:param str index_name: Name representing index of annotation piece
:param str write_path: Path to write to
:param np.ndarray annot_data: Annotation data to write, in segment format with shape (nsegs, x, y, z)
:param Dict[str, Any] annot_fields: Header information to write to file
:param np.ndarray scales: Array of scale/direction information written in header
:param Optional[str] annot_write_path: Full path to write to (used instead of index_name and write_path)
:return: Full path of file written to
"""
if (
annot_write_path is None
and index_name is not None
and write_path is not None
):
annot_file = index_name + annot_suffix_segments
annot_write_path = os.path.join(write_path, annot_file)
os.makedirs(write_path, exist_ok=True)
if annot_data is None or annot_fields is None:
# copy default segment file
script_path = os.path.dirname(os.path.abspath(__file__))
default_file = os.path.join(script_path, os.pardir, default_seg_file)
shutil.copyfile(default_file, annot_write_path)
logging.info(
"Copying initial annotation from %s to %s"
% (default_file, annot_write_path)
)
else:
space_array = np.concatenate(
[np.full((1, 3), np.nan, dtype="float"), scales], axis=0
) # (4,3)
header = {
"space": "right-anterior-superior", # copied from Slicer example
}
header.update(annot_fields)
# set space values last to ensure that segmentation properties are defined,
# as the source data may be in labelmap format
header.update(
{
"kinds": [
"list",
"domain",
"domain",
"domain",
], # copied from Slicer example
"space directions": space_array,
"space origin": np.zeros((3,), dtype="float"),
}
)
logging.info("Writing annotation output to %s" % annot_write_path)
# produce layered format representation
layer_data, segment_layer_header = make_layer_format(annot_data)
# update header information to show layers for each segment
header.update(segment_layer_header)
nrrd.write(
annot_write_path,
layer_data,
header,
custom_field_map=segmentation_field_map,
)
return annot_write_path
def get_data_piece(
piece_index: Union[np.ndarray, List[int]], piece_path: str
) -> NiftiImage:
"""
Given a piece index, read the data from file in the given path and return as a
Nifti data object
:param Union[np.ndarray, List[int]] piece_index: List/array of piece index
:param str piece_path: Path containing data file
:return: Nifti data object for piece
"""
index_name = "_".join([str(x) for x in piece_index])
piece_filename = index_name + data_suffix
piece_filepath = os.path.join(piece_path, piece_filename)
piece_data = nib.load(piece_filepath)
return piece_data
def assign_cropped_region(
source_array: np.ndarray,
source_offset: np.ndarray,
dest_array: np.ndarray,
dest_offset: np.ndarray,
copy_dim: np.ndarray,
) -> None:
"""
Given a source and destination arrays, copy data from source to destination for a given
region, specified by an offset within each array, and a dimension representing the size
of the region to be copied. Only data within the region on both source and dest will be
copied. This assumes each array is 3-dimensional
:param source_array: Source array, shape (x, y, z)
:param source_offset: Offset from source array origin to copy from (3,)
:param dest_array: Destination array, shape (x, y, z)
:param dest_offset: Offset from destination array origin to copy to (3,)
:param copy_dim: Size of region to copy, shape (x, y, z)
"""
source_size = np.array(source_array.shape[-3:])
dest_size = np.array(dest_array.shape[-3:])
copy_size = np.array(copy_dim)
source_crop_min = np.clip(source_offset, 0, source_size - 1)
source_crop_max = np.clip(source_offset + copy_size, 0, source_size)
dest_crop_min = np.clip(dest_offset, 0, dest_size - 1)
dest_crop_max = np.clip(dest_offset + copy_size, 0, dest_size)
# find common copy region and update copy range
common_dim = np.minimum(
source_crop_max - source_crop_min, dest_crop_max - dest_crop_min
)
source_crop_max = source_crop_min + common_dim
dest_crop_max = dest_crop_min + common_dim
# create slice definition for defined dimensions in offset/dim, and cover all preceding
# dimensions
source_full_dims = [slice(None)] * (
source_array.ndim - len(source_crop_min)
)
source_slices = source_full_dims + [
slice(x, y) for x, y in zip(source_crop_min, source_crop_max)
]
dest_full_dims = [slice(None)] * (dest_array.ndim - len(dest_crop_min))
dest_slices = dest_full_dims + [
slice(x, y) for x, y in zip(dest_crop_min, dest_crop_max)
]
dest_array[tuple(dest_slices)] = source_array[tuple(source_slices)]
def get_file_list(data_dir: str, extension: str = "tiff") -> List[str]:
""" Get list of files in given directory with given extension """
glob_pattern = os.path.join(data_dir, "*.%s" % extension)
glob_list = glob.glob(glob_pattern)
sorted_files = sorted(glob_list)
return sorted_files
def load_config(config_file: str) -> Dict[str, Any]:
"""
Given a config file, read and return values as a dict
:param str config_file: Config file path
:return: Configuration dict
"""
# todo: add checks that each required field is defined?
config = yaml.safe_load(open(config_file, "r"))
return config
def get_overview_scale(config: Dict[str, Any], subdir_num: int):
"""
Find the overview scale by reading from the overview data file
"""
overview_volume_path = get_full_path(
config, subdir_num, "overview_reduced_data"
)
overview_data = nib.load(overview_volume_path)
return np.diag(overview_data.affine)[:3]
def read_segment_file(
in_file: str, format: str = "segmentation"
) -> Tuple[np.ndarray, np.ndarray]:
"""
Read the given segmentation from file
:param str in_file: Source file path to read from
:param str format: Format that
:return: Tuple of array of segmentation data, and array of scales of segmentation data
"""
logging.info("Reading overview segmentation from %s" % in_file)
seg_map, seg_header = nrrd.read(in_file)
# todo: change overview segmentation file from labelmap to .seg.nrrd
# todo: check orientation of segment map, and use of values from the following array.
# they appear to be negated in examples, is that important?
seg_scales = np.abs(np.diagonal(seg_header["space directions"]))
if format == "labelmap":
# labelmap format, which is a binary map of the full space. this is simpler to read in
# however is a bit more awkward for the user to create in Slicer so not preferred
pass
elif format == "segmentation":
# read coverage data from .seg.nrrd file
if seg_map.ndim > 3:
# reduce if multiple segments present (take sum)
seg_map = np.clip(seg_map.sum(axis=0), 0, completed_value)
# pad array so it covers the full region (from origin) without an offset. this may
# crop larger values, ie the tensor may start at 0,0,0 and have size smaller than the
# original space, but this shouldn't cause problems
space_origin = seg_header["space origin"]
grid_origin = np.round(space_origin / seg_scales).astype("int")
padding_before = list(zip(grid_origin, [0, 0, 0]))
seg_map = np.pad(seg_map, padding_before, mode="constant")
else:
raise RuntimeError("Unknown format %s" % format)
return seg_map, seg_scales
def get_cropped_source_data(
stack_list: List[str], crop_origin: np.ndarray, crop_max: np.ndarray
) -> np.ndarray:
"""
Read data from the given image files in an image stack
:param List[str] stack_list: List of filenames representing images in a stack
:param np.ndarray crop_origin: Origin of region to crop, array of shape (3,)
:param np.ndarray crop_max: Max position of region to crop, array of shape (3,)
:return: Cropped source data as an array of shape (x,y,z)
"""
stack_files = stack_list[crop_origin[2] : crop_max[2]]
img_slices = []
for f in stack_files:
img = Image.open(f)
img_arr = np.array(img)
# crop from image
img_crop = img_arr[
crop_origin[0] : crop_max[0],
crop_origin[1] : crop_max[1],
]
img_slices.append(img_crop)
return np.stack(img_slices, axis=2)
def get_source_tile_stored(
config: Dict[str, Any],
subdir_num: int,
tile_index: np.ndarray,
check_only=False,
) -> Union[bool, np.ndarray]:
"""
Read source tile from stored data folder and return data, or check if present
:param Dict[str, Any] config: Config dictionary
:param int subdir_num: Subdir data number
:param np.ndarray tile_index: Tile index number, array of shape (3,)
:param bool check_only: Only check if data is present or not
:return: Either array containing data from source data,
or bool value representing if data is present
"""
# try and read from file
indices_str = [str(x) for x in tile_index.tolist()]
data_path = get_full_path(config, subdir_num, "source_piece_path")
filename = "_".join(indices_str) + data_suffix
source_file = os.path.join(data_path, filename)
if check_only:
return os.path.exists(source_file)
nii_data = nib.load(source_file)
return np.array(nii_data.dataobj)
def get_source_data_stored(
config: Dict[str, Any],
subdir_num: int,
data_crop_origin: np.ndarray,
crop_size: np.ndarray,
check_only=False,
) -> Union[bool, np.ndarray]:
"""
Get source data from the given crop position from locally stored tile data, or check
if it is present
:param Dict[str, Any] config: Config dictionary
:param int subdir_num: Subdir data number
:param np.ndarray data_crop_origin: Origin in source units of section to crop, array of shape (3,)
:param np.ndarray crop_size: Size of section to crop, array of shape (3,)
:param bool check_only: Only check if data is present or not
:return: Either array containing specified cropped region from source data,
or bool value representing if data is present
"""
# check if the crop region is covered by stored source tiles
tile_size = np.array(config["annotation_size"])
min_tile = data_crop_origin // tile_size
max_tile_inclusive = (data_crop_origin + crop_size) // tile_size
section_data = None
for x in range(min_tile[0], max_tile_inclusive[0] + 1):
for y in range(min_tile[1], max_tile_inclusive[1] + 1):
for z in range(min_tile[2], max_tile_inclusive[2] + 1):
this_tile_index = np.array([x, y, z])
# try and read file
source_tile = get_source_tile_stored(
config, subdir_num, this_tile_index, check_only
)
if check_only:
if not source_tile:
return False
else:
if section_data is None:
section_data = np.zeros(
crop_size, dtype=source_tile.dtype
)
tile_offset = this_tile_index * tile_size
tile_section_offset = tile_offset - data_crop_origin
assign_cropped_region(
source_tile,
np.array([0, 0, 0]),
section_data,
tile_section_offset,
tile_size,
)
if check_only:
return True
return section_data
def get_source_data(
config: Dict[str, Any],
subdir_num: int,
data_crop_origin: np.ndarray,
crop_size: np.ndarray,
) -> np.ndarray:
"""
Get source data from the given crop position in the given subdir, using
a crop size from the specified section dimensions
:param Dict[str, Any] config: Config dictionary
:param int subdir_num: Subdir data number
:param np.ndarray data_crop_origin: Origin in source units of section to crop, array of shape (3,)
:param np.ndarray crop_size: Size of section to crop, array of shape (3,)
:return: Array containing specified cropped region from source data
"""
if get_source_data_stored(
config, subdir_num, data_crop_origin, crop_size, check_only=True
):
logging.info(
"Reading source data with origin %s, size %s from stored data"
% (data_crop_origin, crop_size)
)
return get_source_data_stored(
config, subdir_num, data_crop_origin, crop_size, check_only=False
)
else:
format = config["source_data_format"][subdir_num]
logging.info(
"Reading source data with origin %s, size %s with source format %s"
% (data_crop_origin, crop_size, format)
)
if format == "tiff-stack":
return get_source_data_stack(
config, subdir_num, data_crop_origin, crop_size
)
else:
return get_source_data_hdf5(
config, subdir_num, data_crop_origin, crop_size
)
def get_source_data_stack(
config: Dict[str, Any],
subdir_num: int,
data_crop_origin: np.ndarray,
crop_size: np.ndarray,
) -> np.ndarray:
"""
Get source data from the given crop position from image stack data, using
a crop size from the specified section dimensions
:param Dict[str, Any] config: Config dictionary
:param int subdir_num: Subdir data number
:param np.ndarray data_crop_origin: Origin in source units of section to crop, array of shape (3,)
:param np.ndarray crop_size: Size of section to crop, array of shape (3,)
:return: Array containing specified cropped region from source data
"""
source_data_path = get_source_data_path(config, subdir_num)
section_dims = np.asarray(crop_size)
# read data for given piece from source data
stack_list = get_file_list(source_data_path)
if len(stack_list) == 0:
raise RuntimeError(
"Could not find any source files at path %s" % source_data_path
)
data_crop_max = data_crop_origin + section_dims
logging.info(
"get source data, crop origin %s max %s"
% (data_crop_origin, data_crop_max)
)
return get_cropped_source_data(stack_list, data_crop_origin, data_crop_max)
def get_source_data_hdf5(
config: Dict[str, Any],
subdir_num: int,
data_crop_origin: np.ndarray,
crop_size: np.ndarray,
) -> np.ndarray:
"""
Get source data from the given crop position from HDF5 format source data, using
the specified crop size.
:param Dict[str, Any] config: Config dictionary
:param int subdir_num: Subdir data number
:param np.ndarray data_crop_origin: Origin in source units of section to crop, array of shape (3,)
:param np.ndarray crop_size: Size of section to crop, array of shape (3,)
:return: Array containing specified cropped region from source data
"""
source_path = config["source_data_paths"][subdir_num]
h5_source = h5py.File(source_path, "r")
h5_data_name = config["source_hdf5_dataset_name"]
source_dataset = h5_source[h5_data_name]
logging.info(
"Reading from source H5 file %s, source data size %s"
% (h5_source, source_dataset.shape)
)
if source_dataset.ndim != 3:
raise RuntimeError(
"H5 source data has %d dims, expected 3" % source_dataset.ndim
)
cropped_data = source_dataset[
data_crop_origin[0] : data_crop_origin[0] + crop_size[0],
data_crop_origin[1] : data_crop_origin[1] + crop_size[1],
data_crop_origin[2] : data_crop_origin[2] + crop_size[2],
]
return cropped_data
def get_source_tile_data(
config: Dict[str, Any],
subdir_num: int,
tile_index: np.ndarray,
pad_to_size=True,
) -> np.ndarray:
"""
Get source data for the given tile in the given subdir
:param Dict[str, Any] config: Config dictionary
:param int subdir_num: Subdir number to read from
:param np.ndarray tile_index: Index of tile to read, as array of 3 values
:param bool pad_to_size: If read data is smaller than tile size, pad with zeros to fill tile size
:return: Array of tile data, with dims=3 and shape (x, y, z)
"""
tile_size = np.array(config["annotation_size"])
data_crop_origin = tile_index * tile_size
read_data = get_source_data(
config, subdir_num, data_crop_origin, tile_size
)
read_shape = np.array(read_data.shape)
if np.any(read_shape < tile_size) and pad_to_size:
# add padding to fill to size. this should be added to larger index edge as tiles
# should have origin matching data source origin
pad_size = tile_size - read_shape
read_data = np.pad(
read_data, list(zip([0, 0, 0], pad_size)), mode="constant"
)
return read_data
def flat_to_indexed(
flat_pos: int, config: Dict[str, Any]
) -> Tuple[int, np.ndarray]:
"""
Convert a position from flat format (single number) to indexed format (subdir number and array of 3 numbers)
:param int flat_pos: Position in flat format
:param Dict[str, Any] config: Config dictionary
:return: Tuple of the resulting subdir number and position in indexed format represented as an array
"""
# find subdir number first
num_subdirs = len(config["subdir_paths"])
remainder = flat_pos // num_subdirs
subdir_num = flat_pos - remainder * num_subdirs
# get annotation map dims for this subdir
annot_map, _, _ = get_annot_map(config, subdir_num)
position_index = np.unravel_index(remainder, annot_map.shape)
return subdir_num, | np.array(position_index) | numpy.array |
from ai_ct_scans import data_loading
import torch
from ai_ct_scans import models
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
from cv2 import blur
from ai_ct_scans import phase_correlation_image_processing
from ai_ct_scans.data_writing import ndarray_to_memmap
plt.ion()
if torch.cuda.is_available():
dev = torch.device("cuda")
else:
dev = "cpu"
def det(tensor):
"""Detach a torch Tensor to a cpu numpy version
Args:
tensor (torch.Tensor): A tensor to be detached and turned into an ndarray
Returns:
(ndarray): The same data as an ndarray
"""
return tensor.cpu().detach().numpy()
def debug_plot(model_out, batch, index=0):
"""Plot the original image, the masked image, and the infilled version. Useful during debugging.
Args:
model_out: The infilled image stack from an Infiller model
batch (dict of tensors): A dictionary with torch Tensors 'labels' and 'input_images'
index (int): The index of the model's output to compare to the original image and masked version, [0-batch size]
"""
out = det(model_out)
ims = det(batch["labels"])
inputs = det(batch["input_images"])
f, axes = plt.subplots(1, 3)
axes = np.ravel(axes)
axes[0].imshow(ims[index, 0, :, :])
axes[0].set_title("original image")
axes[1].imshow(inputs[index, 0, :, :])
axes[1].set_title("masked inputs")
axes[2].imshow(out[index, 0, :, :])
axes[2].set_title("output")
class InfillTrainer:
"""A class for training an ai_ct_scans.models.Infiller network"""
def __init__(
self,
axial_width=256,
coronal_width=256,
sagittal_width=256,
batch_size=8,
batch_width=256,
batch_height=256,
blank_width=64,
num_encoder_convs=3,
encoder_filts_per_layer=10,
neurons_per_dense=512,
num_dense_layers=3,
decoder_filts_per_layer=10,
num_decoder_convs=3,
kernel_size=3,
learning_rate=1e-5,
save_dir=None,
clear_previous_memmaps=False,
save_freq=200,
blur_kernel=None,
show_outline=False,
):
"""Initialises the network and dataset handling, gets the trainer ready for run self.train_for_iterations()
Args:
axial_width (int): How wide the model will expect views taken from the axial plane to be in pixels
coronal_width (int): How wide the model will expect views taken from the coronal plane to be in pixels
sagittal_width (int):How wide the model will expect views taken from the sagittal plane to be in pixels
batch_size (int): How many random views to take for a single training iteration (typically 1-8 trialled)
batch_width (int): How wide the views should be at the point of input to the model in pixels
batch_height (int): How high the views should be at the point of input to the model in pixels
blank_width (int): Square size of the centre masked region to be applied in the middle of each view before
input to network
num_encoder_convs (int): How many convolution-maxpool steps to build into the model in the encoder
encoder_filts_per_layer (int): How many filters to include in the first convolution layer (to be doubled at
each subsequent layer Unet style)
neurons_per_dense (int): (currently disconnected) How many neurons in each dense layer that connects the
convolutional layers in the encoder to the convolutional layers in the decoder
num_dense_layers (int): (currently disconnected) How many layers of dense neurons to use to connect the
convolutional encoder and decoder layers
decoder_filts_per_layer (int): (currently must be same as encoder filts_per_layer)
num_decoder_convs (int): How many upsample-convolutional layers to include in the decoder, currently
throws an error if not equal to num_encoder_convs to fit Unet style of the network
kernel_size (int or tuple of two ints): 2D size of kernels used in Conv2D layers
learning_rate (float): parameter to control the rate at which the model learns, typically <1e-4
save_dir (pathlib Path): A directory in which to save the model during training
clear_previous_memmaps (bool): Whether to re-initialise the dataset (i.e. rebuild memmaps off of original
DICOM data)
save_freq (int): How often to save the model, every save_freq iterations
blur_kernel (None or tuple of ints): If not None, apply a blur to the input views before masking and feeding
into the network. This is theorised to prevent the model getting stuck due to attemptin to recreate high
frequency random noise
show_outline (bool): Whether to perform an edge detection and expose these edges in the masked region. This
helps the model to get the correct shapes at output, without showing it much about the intensity/texture it
should aim for.
"""
self.multi_patient_loader = data_loading.MultiPatientLoader()
# Included just abdo due to simplicity of focusing on one body part
# Mentioned twice in the array to preserve the index for testing for multiple body parts
# self.body_parts = ['abdo', 'thorax']
self.body_parts = ["abdo", "abdo"]
self.scan_nums = ["scan_1", "scan_2"]
for patient in self.multi_patient_loader.patients:
for body_part in self.body_parts:
for scan_num in self.scan_nums:
if clear_previous_memmaps is True:
patient.__getattribute__(body_part).__getattribute__(
scan_num
).delete_memmap()
patient.__getattribute__(body_part).__getattribute__(
scan_num
).load_memmap_and_clear_scan()
self.blur_kernel = blur_kernel
self.show_outline = show_outline
self.axial_width = axial_width
self.coronal_width = coronal_width
self.sagittal_width = sagittal_width
self.batch_width = batch_width
self.batch_height = batch_height
self.batch_size = batch_size
self.blank_width = blank_width
self.loss_weighting_width = int(self.blank_width * 1.5)
self.slicers = [
self.random_axial_slicer,
self.random_coronal_slicer,
self.random_sagittal_slicer,
]
self.plane_masks = self.plane_mask_builder()
self.edge_detection_pad = 3
self.edge_window_width = 2 * self.edge_detection_pad + self.blank_width
self.edge_detection_mask = np.logical_not(
self.plane_mask_builder(blank_width=self.edge_window_width)[0]
)
self.inv_plane_mask = np.logical_not(self.plane_masks[0])
self.loss_masks = self.plane_mask_builder(self.loss_weighting_width)
self.loss_masks = self._convert_loss_masks_to_tensor()
self.label_masks = [
np.logical_not(plane_mask) for plane_mask in self.plane_masks
]
self.patient_indices = list(range(len(self.multi_patient_loader.patients)))
self.model = models.Infiller(
input_height=self.batch_height,
input_width=self.batch_width,
output_height=self.blank_width,
output_width=self.blank_width,
num_encoder_convs=num_encoder_convs,
encoder_filts_per_layer=encoder_filts_per_layer,
neurons_per_dense=neurons_per_dense,
num_dense_layers=num_dense_layers,
decoder_filts_per_layer=decoder_filts_per_layer,
num_decoder_convs=num_decoder_convs,
kernel_size=kernel_size,
)
self.optimiser = torch.optim.AdamW(self.model.parameters(), lr=learning_rate)
if save_dir is None:
save_dir = data_loading.data_root_directory().parent / "infiller"
save_dir.mkdir(exist_ok=True, parents=True)
self.save_dir = save_dir
self.iteration = 0
self.last_n_losses = []
self.loss_num_to_ave_over = 100
self.latest_loss = np.inf
self.save_freq = save_freq
self.input_stack = np.zeros(
[self.batch_size, 1, self.batch_height, self.batch_width], dtype="float64"
)
self.plane_mask_stack = np.zeros_like(self.input_stack)
self.error_weighting = (
self.loss_weighting_width ** 2 / self.axial_width ** 2 + 1
)
self.best_loss = np.inf
def _convert_loss_masks_to_tensor(self):
"""Convert existing loss masks, used to reweight the central masked region in the loss function, to tensors
Returns:
(tensor): A stack of tensors that are 1 in the central and border regions around the mask, and 0 elsewhere
"""
return torch.Tensor(self.loss_masks).to(dev)
def loss(self, model_out, batch):
"""Defines a custom loss function for the network. Weights the loss such that reproduction of the masked region
(and a small border area around it) contributes to the overall loss on the same order of magnitude as all other
pixels that were predicted
Args:
model_out (torch Tensor): Stack of images that the model has predicted
batch (dict as built by self.build_batch): The batch that was used for the iteration, which should include
at least a 'labels' stack of Tensor images of the same shape as model_out
Returns:
(torch Tensor): the MSE error in the output prediction after reweighting masked region of prediction
"""
error = model_out - batch["labels"]
squared_error = error ** 2
weighted_error = squared_error * (self.error_weighting - self.loss_masks[0])
mse = torch.mean(weighted_error)
return mse
def train_step(self):
"""Build a single batch, do a single forward and backward pass."""
batch = self.build_batch()
self.optimiser.zero_grad()
out = self.model(batch)
loss = self.loss(out, batch)
loss.backward()
self.optimiser.step()
detached_loss = loss.cpu().detach().numpy()
if len(self.last_n_losses) == self.loss_num_to_ave_over:
self.last_n_losses.pop(0)
self.last_n_losses.append(detached_loss)
else:
self.last_n_losses.append(detached_loss)
self.iteration += 1
if self.iteration % 5000 == 0:
print(f"{self.iteration} iterations complete")
def train_for_iterations(self, iterations):
"""Train the model for a set number of iterations
Args:
iterations (int): Number of iterations to train for
"""
self.model.train()
progress_bar = tqdm(range(iterations))
for _ in progress_bar:
self.train_step()
progress_bar.set_description(f"Average loss {np.mean(self.last_n_losses)}")
if (self.iteration % self.save_freq) == 0:
self.save_model(self.save_dir)
def save_model(self, directory, bypass_loss_check=False):
"""Save the model. If it has achieved the best loss, save to 'model.pth' within directory, otherwise save to
'latest_model.pth'
Args:
directory (pathlib Path): A directory in which to save the model
"""
directory.mkdir(exist_ok=True, parents=True)
curr_loss = np.mean(self.last_n_losses)
if curr_loss < self.best_loss or bypass_loss_check:
torch.save(
{
"iteration": self.iteration,
"model_state_dict": self.model.state_dict(),
"optimiser_state_dict": self.optimiser.state_dict(),
"loss": curr_loss,
"running_loss": self.last_n_losses,
},
str(directory / "model.pth"),
)
self.best_loss = curr_loss
else:
torch.save(
{
"iteration": self.iteration,
"model_state_dict": self.model.state_dict(),
"optimiser_state_dict": self.optimiser.state_dict(),
"loss": curr_loss,
"running_loss": self.last_n_losses,
},
str(directory / "latest_model.pth"),
)
self.best_loss = curr_loss
def load_model(self, directory, model="model.pth"):
"""Load a pretrained model, optimiser state, loss at time of saving, iteration at time of saving
Args:
directory (pathlib Path): Directory in which the model is saved
model (str): Model filename, defaults to 'model.pth'
"""
checkpoint = torch.load(str(directory / model))
self.model.load_state_dict(checkpoint["model_state_dict"])
self.optimiser.load_state_dict(checkpoint["optimiser_state_dict"])
self.latest_loss = checkpoint["loss"]
self.iteration = checkpoint["iteration"]
self.last_n_losses = checkpoint["running_loss"]
self.best_loss = checkpoint["loss"]
def plane_mask_builder(self, blank_width=None):
"""Get a list of logical ndarrays that can be used to mask out the central region of an input image, and
extract that local region for a 'label' array at output of the model
Returns:
(list of 2D ndarrays): A set of masks to apply to the axial, coronal and sagittal views taken during
building of a batch
"""
if blank_width is None:
blank_width = self.blank_width
axial_mask = np.ones([self.coronal_width, self.sagittal_width], dtype=bool)
coronal_mask = np.ones([self.axial_width, self.sagittal_width], dtype=bool)
sagittal_mask = np.ones([self.axial_width, self.coronal_width], dtype=bool)
for mask in [axial_mask, coronal_mask, sagittal_mask]:
row_start = int(np.floor(mask.shape[0] / 2) - blank_width / 2)
col_start = int(np.floor(mask.shape[1] / 2) - blank_width / 2)
mask[
row_start : row_start + blank_width, col_start : col_start + blank_width
] = False
# for mask, border_mask in zip([axial_mask, coronal_mask, sagittal_mask], self.border_masks):
# mask *= border_mask
return [axial_mask, coronal_mask, sagittal_mask]
def border_mask_builder(self):
"""Get a list of logical ndarrays that can be used to mask out the border region of an input image, and
extract that local region for a 'label' array at output of the model. Applying these should help with aliasing
effects at the edges of cnn output
Returns:
(list of 2D ndarrays): A set of masks to apply to the axial, coronal and sagittal views taken during
building of a batch
"""
axial_mask = np.ones([self.coronal_width, self.sagittal_width], dtype=bool)
coronal_mask = np.ones([self.axial_width, self.sagittal_width], dtype=bool)
sagittal_mask = np.ones([self.axial_width, self.coronal_width], dtype=bool)
for mask in [axial_mask, coronal_mask, sagittal_mask]:
mask[: self.border_width] = False
mask[-self.border_width :] = False
mask[:, : self.border_width] = False
mask[:, -self.border_width :] = False
return [axial_mask, coronal_mask, sagittal_mask]
@staticmethod
def _rand_nd_ints(high, low=0):
return np.random.randint(low, high)
def random_coronal_slicer(self, arr, indices=None, allow_off_edge=False):
"""Takes a random crop from a random coronal plane of 3D array arr
Args:
allow_off_edge (bool): optional, defaults to False. Whether to allow indices which will take the view
off the edges of arr
indices (list of 3 ints): Coordinates at which to take the slice from. 0th and 2nd indices define a top
left corner of the view, 1st index defines the coronal slice
arr (ndarray): 3D volume
Returns:
(ndarray): 2D image
"""
if indices is None:
indices = self._rand_nd_ints(
high=[
arr.shape[0] - self.axial_width,
arr.shape[1],
arr.shape[2] - self.sagittal_width,
]
)
if allow_off_edge:
out_arr = np.zeros([self.axial_width, self.sagittal_width])
new_out_start_inds = []
new_out_end_inds = []
new_arr_start_inds = []
new_arr_end_inds = []
non_coronal_inds = [indices[0], indices[2]]
for ind, width, arr_width in zip(
non_coronal_inds,
[self.axial_width, self.sagittal_width],
[arr.shape[0], arr.shape[2]],
):
if ind < 0:
new_out_start_inds.append(-ind)
else:
new_out_start_inds.append(0)
new_arr_start_inds.append(max(ind, 0))
remaining_width = width - new_out_start_inds[-1]
if new_arr_start_inds[-1] + remaining_width > arr_width:
new_arr_end_inds.append(arr_width)
else:
new_arr_end_inds.append(new_arr_start_inds[-1] + remaining_width)
curr_width = new_arr_end_inds[-1] - new_arr_start_inds[-1]
new_out_end_inds.append(new_out_start_inds[-1] + curr_width)
out_arr[
new_out_start_inds[0] : new_out_end_inds[0],
new_out_start_inds[1] : new_out_end_inds[1],
] = arr[
new_arr_start_inds[0] : new_arr_end_inds[0],
indices[1],
new_arr_start_inds[1] : new_arr_end_inds[1],
]
else:
out_arr = arr[
indices[0] : indices[0] + self.axial_width,
indices[1],
indices[2] : indices[2] + self.sagittal_width,
]
return out_arr, indices
def random_sagittal_slicer(self, arr, indices=None, allow_off_edge=False):
"""Takes a random crop from a random sagittal plane of 3D array arr
Args:
allow_off_edge (bool): optional, defaults to False. Whether to allow indices which will take the view
off the edges of arr
indices (list of 3 ints): Coordinates at which to take the slice from. 0th and 1st indices define a top
left corner of the view, 0th index defines the sagittal slice
arr (ndarray): 3D volume
Returns:
(ndarray): 2D image
"""
if indices is None:
indices = self._rand_nd_ints(
high=[
arr.shape[0] - self.axial_width,
arr.shape[1] - self.coronal_width,
arr.shape[2],
]
)
if allow_off_edge:
out_arr = np.zeros([self.axial_width, self.coronal_width])
new_out_start_inds = []
new_out_end_inds = []
new_arr_start_inds = []
new_arr_end_inds = []
for ind, width, arr_width in zip(
indices[:2],
[self.axial_width, self.coronal_width],
[arr.shape[0], arr.shape[1]],
):
if ind < 0:
new_out_start_inds.append(-ind)
else:
new_out_start_inds.append(0)
new_arr_start_inds.append(max(ind, 0))
remaining_width = width - new_out_start_inds[-1]
if new_arr_start_inds[-1] + remaining_width > arr_width:
new_arr_end_inds.append(arr_width)
else:
new_arr_end_inds.append(new_arr_start_inds[-1] + remaining_width)
curr_width = new_arr_end_inds[-1] - new_arr_start_inds[-1]
new_out_end_inds.append(new_out_start_inds[-1] + curr_width)
out_arr[
new_out_start_inds[0] : new_out_end_inds[0],
new_out_start_inds[1] : new_out_end_inds[1],
] = arr[
new_arr_start_inds[0] : new_arr_end_inds[0],
new_arr_start_inds[1] : new_arr_end_inds[1],
indices[2],
]
else:
out_arr = arr[
indices[0] : indices[0] + self.axial_width,
indices[1] : indices[1] + self.coronal_width,
indices[2],
]
return out_arr, indices
def random_axial_slicer(self, arr, indices=None, allow_off_edge=False):
"""Takes a random crop from a random axial plane of 3D array arr
Args:
allow_off_edge (bool): optional, defaults to False. Whether to allow indices which will take the view
off the edges of arr
indices (list of 3 ints): Coordinates at which to take the slice from. 1st and 2nd indices define a top
left corner of the view, 0th index defines the axial slice
arr (ndarray): 3D volume
Returns:
(ndarray): 2D image
"""
if indices is None:
indices = self._rand_nd_ints(
high=[
arr.shape[0],
arr.shape[1] - self.coronal_width,
arr.shape[2] - self.sagittal_width,
]
)
if allow_off_edge:
out_arr = np.zeros([self.coronal_width, self.sagittal_width])
new_out_start_inds = []
new_out_end_inds = []
new_arr_start_inds = []
new_arr_end_inds = []
for ind, width, arr_width in zip(
indices[1:],
[self.coronal_width, self.sagittal_width],
[arr.shape[1], arr.shape[2]],
):
if ind < 0:
new_out_start_inds.append(-ind)
else:
new_out_start_inds.append(0)
new_arr_start_inds.append(max(ind, 0))
remaining_width = width - new_out_start_inds[-1]
if new_arr_start_inds[-1] + remaining_width > arr_width:
new_arr_end_inds.append(arr_width)
else:
new_arr_end_inds.append(new_arr_start_inds[-1] + remaining_width)
curr_width = new_arr_end_inds[-1] - new_arr_start_inds[-1]
new_out_end_inds.append(new_out_start_inds[-1] + curr_width)
out_arr[
new_out_start_inds[0] : new_out_end_inds[0],
new_out_start_inds[1] : new_out_end_inds[1],
] = arr[
indices[0],
new_arr_start_inds[0] : new_arr_end_inds[0],
new_arr_start_inds[1] : new_arr_end_inds[1],
]
else:
out_arr = arr[
indices[0],
indices[1] : indices[1] + self.coronal_width,
indices[2] : indices[2] + self.sagittal_width,
]
return out_arr, indices
def build_batch(
self,
patient_indices=None,
body_part_indices=None,
plane_indices=None,
scan_num_indices=None,
coords_input_array=None,
batch_size=None,
require_above_thresh=True,
allow_off_edge=False,
):
"""Get a batch of inputs and labels for a ai_ct_scans.models.
Args:
patient_indices (optional, ndarray of type int): The indices of patients to access for each batch element,
with zero-indexing (i.e. patient 1 will be at 0). Should be length equal to batch_size, if batch_size used
body_part_indices (optional, ndarray of type int): Indices of body parts to use to build each batch element,
0 for abdomen, 1 for thorax
plane_indices (optional, ndarray of type int): Indices of plane to view the batch element from, 0 for axial,
1 for coronal, 2 for sagittal
scan_num_indices (optional, ndarray of type int): Indices of which sequential scan from each patient to use,
0 for first scan, 1 for second scan
coords_input_array (optional, list of lendth 3 1D ndarrays of type int): The coordinates to use when
building each batch element. The coordinate corresponding to the plane_index will be the slice along that
index, while the other two coordinates will define the top left coordinate of the rectangle extracted from
that plane
batch_size (optional, int): How many slices to return for a batch
require_above_thresh (bool): Whether to reject random slices that do not have any elements above
self.threshold and seek out new slices until one is found
allow_off_edge (bool): Whether to allow coords_input_array to cause the output slice to overlap the edges of
the original scans - useful to ensure that it is possible for every part of a scan to occur at the central
masked region
Returns:
(dict of torch.Tensors): 'input_images': a stack of 2d axial, coronal and sagittal slices
'input_planes': a stack of one hot vectors, that correspond to which view the slice
was taken from. Shape [batch size, 3]
'input_body_part': a stack of one hot vectors, that correspond to the body part the
slice was taken from. Shape [batch size, 2]
'input_coords': a stack of 1D vectors describing the original xyz location of the
slice taken
'labels': a stack of 2D axial, coronal and sagittal slices, representing the data
that was masked at the centre of each element of input_images
"""
coords_sets = []
labels = []
if batch_size is None:
batch_size = self.batch_size
if batch_size != self.batch_size:
# if batch size for evaluation is different to what was used originally for the trainer, resize the input
# stack and reset batch size
self.input_stack = np.zeros(
[batch_size, 1, self.batch_height, self.batch_width]
)
self.batch_size = batch_size
if patient_indices is None:
patient_indices = np.random.choice(self.patient_indices, batch_size)
if plane_indices is None:
plane_indices = np.random.randint(0, 3, batch_size)
plane_one_hots = []
if body_part_indices is None:
body_part_indices = np.random.randint(0, 2, batch_size)
body_part_one_hots = []
if scan_num_indices is None:
scan_num_indices = | np.random.randint(0, 2, batch_size) | numpy.random.randint |
import math
import os
import numpy as np
import cv2
import skimage.transform
from scipy.io import loadmat
import scipy.spatial as spatial
import matplotlib.pyplot as plt
import torchvision
from math import cos, sin, atan2, asin
import scipy.misc
def get_vertices(pos):
all_vertices = np.reshape(pos, [resolution**2, -1])
vertices = all_vertices[face_ind, :]
return vertices
def get_landmarks(pos):
kpt = pos[uv_kpt_ind[1,:].astype(np.int32), uv_kpt_ind[0,:].astype(np.int32), :]
return kpt
#region RENDER
def isPointInTri(point, tri_points):
''' Judge whether the point is in the triangle
Method:
http://blackpawn.com/texts/pointinpoly/
Args:
point: (2,). [u, v] or [x, y]
tri_points: (3 vertices, 2 coords). three vertices(2d points) of a triangle.
Returns:
bool: true for in triangle
'''
tp = tri_points
# vectors
v0 = tp[2,:] - tp[0,:]
v1 = tp[1,:] - tp[0,:]
v2 = point - tp[0,:]
# dot products
dot00 = np.dot(v0.T, v0)
dot01 = np.dot(v0.T, v1)
dot02 = np.dot(v0.T, v2)
dot11 = np.dot(v1.T, v1)
dot12 = np.dot(v1.T, v2)
# barycentric coordinates
if dot00*dot11 - dot01*dot01 == 0:
inverDeno = 0
else:
inverDeno = 1/(dot00*dot11 - dot01*dot01)
u = (dot11*dot02 - dot01*dot12)*inverDeno
v = (dot00*dot12 - dot01*dot02)*inverDeno
# check if point in triangle
return (u >= 0) & (v >= 0) & (u + v < 1)
def get_point_weight(point, tri_points):
''' Get the weights of the position
Methods: https://gamedev.stackexchange.com/questions/23743/whats-the-most-efficient-way-to-find-barycentric-coordinates
-m1.compute the area of the triangles formed by embedding the point P inside the triangle
-m2.<NAME>'s book "Real-Time Collision Detection". faster.(used)
Args:
point: (2,). [u, v] or [x, y]
tri_points: (3 vertices, 2 coords). three vertices(2d points) of a triangle.
Returns:
w0: weight of v0
w1: weight of v1
w2: weight of v3
'''
tp = tri_points
# vectors
v0 = tp[2,:] - tp[0,:]
v1 = tp[1,:] - tp[0,:]
v2 = point - tp[0,:]
# dot products
dot00 = np.dot(v0.T, v0)
dot01 = np.dot(v0.T, v1)
dot02 = np.dot(v0.T, v2)
dot11 = np.dot(v1.T, v1)
dot12 = np.dot(v1.T, v2)
# barycentric coordinates
if dot00*dot11 - dot01*dot01 == 0:
inverDeno = 0
else:
inverDeno = 1/(dot00*dot11 - dot01*dot01)
u = (dot11*dot02 - dot01*dot12)*inverDeno
v = (dot00*dot12 - dot01*dot02)*inverDeno
w0 = 1 - u - v
w1 = v
w2 = u
return w0, w1, w2
def rasterize_triangles(vertices, triangles, h, w):
'''
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
h: height
w: width
Returns:
depth_buffer: [h, w] saves the depth, here, the bigger the z, the fronter the point.
triangle_buffer: [h, w] saves the tri id(-1 for no triangle).
barycentric_weight: [h, w, 3] saves corresponding barycentric weight.
# Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
# h, w is the size of rendering
'''
# initial
depth_buffer = np.zeros([h, w]) - 999999. #+ np.min(vertices[2,:]) - 999999. # set the initial z to the farest position
triangle_buffer = np.zeros([h, w], dtype = np.int32) - 1 # if tri id = -1, the pixel has no triangle correspondance
barycentric_weight = np.zeros([h, w, 3], dtype = np.float32) #
for i in range(triangles.shape[0]):
tri = triangles[i, :] # 3 vertex indices
# the inner bounding box
umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
umax = min(int(np.floor(np.max(vertices[tri, 0]))), w-1)
vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
vmax = min(int(np.floor(np.max(vertices[tri, 1]))), h-1)
if umax<umin or vmax<vmin:
continue
for u in range(umin, umax+1):
for v in range(vmin, vmax+1):
if not isPointInTri([u,v], vertices[tri, :2]):
continue
w0, w1, w2 = get_point_weight([u, v], vertices[tri, :2]) # barycentric weight
point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
if point_depth > depth_buffer[v, u]:
depth_buffer[v, u] = point_depth
triangle_buffer[v, u] = i
barycentric_weight[v, u, :] = np.array([w0, w1, w2])
return depth_buffer, triangle_buffer, barycentric_weight
def render_colors_ras(vertices, triangles, colors, h, w, c = 3):
''' render mesh with colors(rasterize triangle first)
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3]
h: height
w: width
c: channel
Returns:
image: [h, w, c]. rendering.
'''
assert vertices.shape[0] == colors.shape[0]
depth_buffer, triangle_buffer, barycentric_weight = rasterize_triangles(vertices, triangles, h, w)
triangle_buffer_flat = np.reshape(triangle_buffer, [-1]) # [h*w]
barycentric_weight_flat = np.reshape(barycentric_weight, [-1, c]) #[h*w, c]
weight = barycentric_weight_flat[:, :, np.newaxis] # [h*w, 3(ver in tri), 1]
colors_flat = colors[triangles[triangle_buffer_flat, :], :] # [h*w(tri id in pixel), 3(ver in tri), c(color in ver)]
colors_flat = weight*colors_flat # [h*w, 3, 3]
colors_flat = np.sum(colors_flat, 1) #[h*w, 3]. add tri.
image = np.reshape(colors_flat, [h, w, c])
# mask = (triangle_buffer[:,:] > -1).astype(np.float32)
# image = image*mask[:,:,np.newaxis]
return image
def render_colors(vertices, triangles, colors, h, w, c = 3):
''' render mesh with colors
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3]
h: height
w: width
Returns:
image: [h, w, c].
'''
assert vertices.shape[0] == colors.shape[0]
# initial
image = np.zeros((h, w, c))
depth_buffer = np.zeros([h, w]) - 999999.
for i in range(triangles.shape[0]):
tri = triangles[i, :] # 3 vertex indices
# the inner bounding box
umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
umax = min(int(np.floor(np.max(vertices[tri, 0]))), w-1)
vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
vmax = min(int(np.floor(np.max(vertices[tri, 1]))), h-1)
if umax<umin or vmax<vmin:
continue
for u in range(umin, umax+1):
for v in range(vmin, vmax+1):
if not isPointInTri([u,v], vertices[tri, :2]):
continue
w0, w1, w2 = get_point_weight([u, v], vertices[tri, :2])
point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
if point_depth > depth_buffer[v, u]:
depth_buffer[v, u] = point_depth
image[v, u, :] = w0*colors[tri[0], :] + w1*colors[tri[1], :] + w2*colors[tri[2], :]
return image
#endregion
#region POSE
def isRotationMatrix(R):
''' checks if a matrix is a valid rotation matrix(whether orthogonal or not)
'''
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def matrix2angle(R):
''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
Args:
R: (3,3). rotation matrix
Returns:
x: yaw
y: pitch
z: roll
'''
# assert(isRotationMatrix(R))
if R[2, 0] != 1 or R[2, 0] != -1:
x = asin(R[2, 0])
y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x))
z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x))
else: # Gimbal lock
z = 0 # can be anything
if R[2, 0] == -1:
x = np.pi / 2
y = z + atan2(R[0, 1], R[0, 2])
else:
x = -np.pi / 2
y = -z + atan2(-R[0, 1], -R[0, 2])
return x, y, z
def angle2matrix(angles):
''' get rotation matrix from three rotation angles(radian). The same as in 3DDFA.
Args:
angles: [3,]. x, y, z angles
x: yaw.
y: pitch.
z: roll.
Returns:
R: 3x3. rotation matrix.
'''
# x, y, z = np.deg2rad(angles[0]), np.deg2rad(angles[1]), np.deg2rad(angles[2])
# x, y, z = angles[0], angles[1], angles[2]
y, x, z = angles[0], angles[1], angles[2]
# x
Rx=np.array([
[1, 0, 0],
[0, cos(x), -sin(x)],
[0, sin(x), cos(x)]
])
# y
Ry=np.array([
[ cos(y), 0, sin(y)],
[ 0, 1, 0],
[-sin(y), 0, cos(y)]
])
# z
Rz=np.array([
[cos(z), -sin(z), 0],
[sin(z), cos(z), 0],
[ 0, 0, 1]
])
R = Rz.dot(Ry).dot(Rx)
return R.astype(np.float32)
def P2sRt(P):
''' decomposing camera matrix P.
Args:
P: (3, 4). Affine Camera Matrix.
Returns:
s: scale factor.
R: (3, 3). rotation matrix.
t2d: (2,). 2d translation.
'''
t2d = P[:2, 3]
R1 = P[0:1, :3]
R2 = P[1:2, :3]
s = (np.linalg.norm(R1) + np.linalg.norm(R2)) / 2.0
r1 = R1 / np.linalg.norm(R1)
r2 = R2 / np.linalg.norm(R2)
r3 = np.cross(r1, r2)
R = np.concatenate((r1, r2, r3), 0)
return s, R, t2d
def compute_similarity_transform(points_static, points_to_transform):
p0 = np.copy(points_static).T
p1 = np.copy(points_to_transform).T
t0 = -np.mean(p0, axis=1).reshape(3, 1)
t1 = -np.mean(p1, axis=1).reshape(3, 1)
t_final = t1 - t0
p0c = p0 + t0
p1c = p1 + t1
covariance_matrix = p0c.dot(p1c.T) #3 3
U, S, V = np.linalg.svd(covariance_matrix) #U 3 3 S 3 V 3 3
R = U.dot(V) #R 3 3
if np.linalg.det(R) < 0:
R[:, 2] *= -1
rms_d0 = np.sqrt(np.mean( | np.linalg.norm(p0c, axis=0) | numpy.linalg.norm |
# Copyright 2018-2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane_lightning_gpu.LightningGPU` device.
"""
# pylint: disable=protected-access,cell-var-from-loop
import math
import numpy as np
import pennylane as qml
import pytest
from pennylane import DeviceError
try:
from pennylane_lightning_gpu.lightning_gpu import CPP_BINARY_AVAILABLE
import pennylane_lightning_gpu as plg
if not CPP_BINARY_AVAILABLE:
raise ImportError("PennyLane-Lightning-GPU is unsupported on this platform")
except (ImportError, ModuleNotFoundError):
pytest.skip(
"PennyLane-Lightning-GPU is unsupported on this platform. Skipping.",
allow_module_level=True,
)
U2 = np.array(
[
[
-0.07843244 - 3.57825948e-01j,
0.71447295 - 5.38069384e-02j,
0.20949966 + 6.59100734e-05j,
-0.50297381 + 2.35731613e-01j,
],
[
-0.26626692 + 4.53837083e-01j,
0.27771991 - 2.40717436e-01j,
0.41228017 - 1.30198687e-01j,
0.01384490 - 6.33200028e-01j,
],
[
-0.69254712 - 2.56963068e-02j,
-0.15484858 + 6.57298384e-02j,
-0.53082141 + 7.18073414e-02j,
-0.41060450 - 1.89462315e-01j,
],
[
-0.09686189 - 3.15085273e-01j,
-0.53241387 - 1.99491763e-01j,
0.56928622 + 3.97704398e-01j,
-0.28671074 - 6.01574497e-02j,
],
]
)
U_toffoli = np.diag([1 for i in range(8)])
U_toffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]])
U_swap = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
U_cswap = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
H = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
THETA = np.linspace(0.11, 1, 3)
PHI = np.linspace(0.32, 1, 3)
VARPHI = np.linspace(0.02, 1, 3)
class TestApply:
"""Tests that operations of certain operations are applied correctly or
that the proper errors are raised.
"""
from pennylane_lightning_gpu import LightningGPU as lg
test_data_no_parameters = [
(qml.PauliX, [1, 0], np.array([0, 1])),
(
qml.PauliX,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / math.sqrt(2), 1 / math.sqrt(2)],
),
(qml.PauliY, [1, 0], [0, 1j]),
(
qml.PauliY,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[-1j / math.sqrt(2), 1j / math.sqrt(2)],
),
(qml.PauliZ, [1, 0], [1, 0]),
(
qml.PauliZ,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / math.sqrt(2), -1 / math.sqrt(2)],
),
(qml.S, [1, 0], [1, 0]),
(
qml.S,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / math.sqrt(2), 1j / math.sqrt(2)],
),
(qml.T, [1, 0], [1, 0]),
(
qml.T,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / math.sqrt(2), np.exp(1j * np.pi / 4) / math.sqrt(2)],
),
(qml.Hadamard, [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)]),
(qml.Hadamard, [1 / math.sqrt(2), -1 / math.sqrt(2)], [0, 1]),
]
@pytest.mark.parametrize("operation,input,expected_output", test_data_no_parameters)
def test_apply_operation_single_wire_no_parameters(
self, qubit_device_1_wire, tol, operation, input, expected_output
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
dev = qubit_device_1_wire
gpu_ctor = plg.lightning_gpu._gpu_dtype(dev.C_DTYPE)
dev._gpu_state = gpu_ctor(np.array(input).astype(dev.C_DTYPE))
dev.apply([operation(wires=[0])])
assert np.allclose(dev._state, np.array(expected_output), atol=tol, rtol=0)
test_data_two_wires_no_parameters = [
(qml.CNOT, [1, 0, 0, 0], [1, 0, 0, 0]),
(qml.CNOT, [0, 0, 1, 0], [0, 0, 0, 1]),
(
qml.CNOT,
[1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)],
[1 / math.sqrt(2), 0, 1 / math.sqrt(2), 0],
),
(qml.SWAP, [1, 0, 0, 0], [1, 0, 0, 0]),
(qml.SWAP, [0, 0, 1, 0], [0, 1, 0, 0]),
(
qml.SWAP,
[1 / math.sqrt(2), 0, -1 / math.sqrt(2), 0],
[1 / math.sqrt(2), -1 / math.sqrt(2), 0, 0],
),
(qml.CZ, [1, 0, 0, 0], [1, 0, 0, 0]),
(qml.CZ, [0, 0, 0, 1], [0, 0, 0, -1]),
(
qml.CZ,
[1 / math.sqrt(2), 0, 0, -1 / math.sqrt(2)],
[1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)],
),
]
@pytest.mark.parametrize("operation,input,expected_output", test_data_two_wires_no_parameters)
def test_apply_operation_two_wires_no_parameters(
self, qubit_device_2_wires, tol, operation, input, expected_output
):
"""Tests that applying an operation yields the expected output state for two wire
operations that have no parameters."""
dev = qubit_device_2_wires
gpu_ctor = plg.lightning_gpu._gpu_dtype(dev.C_DTYPE)
dev._gpu_state = gpu_ctor(np.array(input).reshape(2 * [2]).astype(dev.C_DTYPE))
dev.apply([operation(wires=[0, 1])])
assert np.allclose(dev.state, np.array(expected_output), atol=tol, rtol=0)
test_data_three_wires_no_parameters = [
(qml.CSWAP, [1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0]),
(qml.CSWAP, [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0]),
(qml.CSWAP, [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0]),
(qml.Toffoli, [1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0]),
(qml.Toffoli, [0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0]),
(qml.Toffoli, [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1]),
(qml.Toffoli, [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0]),
]
@pytest.mark.parametrize("operation,input,expected_output", test_data_three_wires_no_parameters)
def test_apply_operation_three_wires_no_parameters(
self, qubit_device_3_wires, tol, operation, input, expected_output
):
"""Tests that applying an operation yields the expected output state for three wire
operations that have no parameters."""
dev = qubit_device_3_wires
gpu_ctor = plg.lightning_gpu._gpu_dtype(dev.C_DTYPE)
dev._gpu_state = gpu_ctor(np.array(input).reshape(3 * [2]).astype(dev.C_DTYPE))
dev.apply([operation(wires=[0, 1, 2])])
assert np.allclose(dev.state, np.array(expected_output), atol=tol, rtol=0)
@pytest.mark.parametrize(
"operation,expected_output,par",
[
(qml.BasisState, [0, 0, 1, 0], [1, 0]),
(qml.BasisState, [0, 0, 1, 0], [1, 0]),
(qml.BasisState, [0, 0, 0, 1], [1, 1]),
(qml.QubitStateVector, [0, 0, 1, 0], [0, 0, 1, 0]),
(qml.QubitStateVector, [0, 0, 1, 0], [0, 0, 1, 0]),
(qml.QubitStateVector, [0, 0, 0, 1], [0, 0, 0, 1]),
(
qml.QubitStateVector,
[1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)],
[1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)],
),
(
qml.QubitStateVector,
[1 / math.sqrt(3), 0, -1 / math.sqrt(3), 1 / math.sqrt(3)],
[1 / math.sqrt(3), 0, -1 / math.sqrt(3), 1 / math.sqrt(3)],
),
],
)
def test_apply_operation_state_preparation(
self, qubit_device_2_wires, tol, operation, expected_output, par
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
par = np.array(par)
qubit_device_2_wires.reset()
qubit_device_2_wires.apply([operation(par, wires=[0, 1])])
assert np.allclose(qubit_device_2_wires.state, np.array(expected_output), atol=tol, rtol=0)
""" operation,input,expected_output,par """
test_data_single_wire_with_parameters = [
(qml.PhaseShift, [1, 0], [1, 0], [math.pi / 2]),
(qml.PhaseShift, [0, 1], [0, 1j], [math.pi / 2]),
(
qml.PhaseShift,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / math.sqrt(2), 1 / 2 + 1j / 2],
[math.pi / 4],
),
(qml.RX, [1, 0], [1 / math.sqrt(2), -1j * 1 / math.sqrt(2)], [math.pi / 2]),
(qml.RX, [1, 0], [0, -1j], [math.pi]),
(
qml.RX,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / 2 - 1j / 2, 1 / 2 - 1j / 2],
[math.pi / 2],
),
(qml.RY, [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [math.pi / 2]),
(qml.RY, [1, 0], [0, 1], [math.pi]),
(qml.RY, [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, 1], [math.pi / 2]),
(qml.RZ, [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2]),
(qml.RZ, [0, 1], [0, 1j], [math.pi]),
(
qml.RZ,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / 2 - 1j / 2, 1 / 2 + 1j / 2],
[math.pi / 2],
),
(
qml.Rot,
[1, 0],
[1 / math.sqrt(2) - 1j / math.sqrt(2), 0],
[math.pi / 2, 0, 0],
),
(qml.Rot, [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, math.pi / 2, 0]),
(
qml.Rot,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / 2 - 1j / 2, 1 / 2 + 1j / 2],
[0, 0, math.pi / 2],
),
(
qml.Rot,
[1, 0],
[-1j / math.sqrt(2), -1 / math.sqrt(2)],
[math.pi / 2, -math.pi / 2, math.pi / 2],
),
(
qml.Rot,
[1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / 2 + 1j / 2, -1 / 2 + 1j / 2],
[-math.pi / 2, math.pi, math.pi],
),
]
@pytest.mark.parametrize(
"operation,input,expected_output,par", test_data_single_wire_with_parameters
)
def test_apply_operation_single_wire_with_parameters(
self, qubit_device_1_wire, tol, operation, input, expected_output, par
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have parameters."""
dev = qubit_device_1_wire
gpu_ctor = plg.lightning_gpu._gpu_dtype(dev.C_DTYPE)
dev._gpu_state = gpu_ctor(np.array(input).astype(dev.C_DTYPE))
dev.apply([operation(*par, wires=[0])])
assert np.allclose(dev.state, np.array(expected_output), atol=tol, rtol=0)
""" operation,input,expected_output,par """
test_data_two_wires_with_parameters = [
(qml.CRX, [0, 1, 0, 0], [0, 1, 0, 0], [math.pi / 2]),
(qml.CRX, [0, 0, 0, 1], [0, 0, -1j, 0], [math.pi]),
(
qml.CRX,
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), 1 / 2, -1j / 2],
[math.pi / 2],
),
(
qml.CRY,
[0, 0, 0, 1],
[0, 0, -1 / math.sqrt(2), 1 / math.sqrt(2)],
[math.pi / 2],
),
(qml.CRY, [0, 0, 0, 1], [0, 0, -1, 0], [math.pi]),
(
qml.CRY,
[1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0],
[1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0],
[math.pi / 2],
),
(
qml.CRZ,
[0, 0, 0, 1],
[0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)],
[math.pi / 2],
),
(qml.CRZ, [0, 0, 0, 1], [0, 0, 0, 1j], [math.pi]),
(
qml.CRZ,
[1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0],
[1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0],
[math.pi / 2],
),
(
qml.CRot,
[0, 0, 0, 1],
[0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)],
[math.pi / 2, 0, 0],
),
(
qml.CRot,
[0, 0, 0, 1],
[0, 0, -1 / math.sqrt(2), 1 / math.sqrt(2)],
[0, math.pi / 2, 0],
),
(
qml.CRot,
[0, 0, 1 / math.sqrt(2), 1 / math.sqrt(2)],
[0, 0, 1 / 2 - 1j / 2, 1 / 2 + 1j / 2],
[0, 0, math.pi / 2],
),
(
qml.CRot,
[0, 0, 0, 1],
[0, 0, 1 / math.sqrt(2), 1j / math.sqrt(2)],
[math.pi / 2, -math.pi / 2, math.pi / 2],
),
(
qml.CRot,
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), 0, -1 / 2 + 1j / 2],
[-math.pi / 2, math.pi, math.pi],
),
(
qml.ControlledPhaseShift,
[1, 0, 0, 0],
[1, 0, 0, 0],
[math.pi / 2],
),
(
qml.ControlledPhaseShift,
[0, 1, 0, 0],
[0, 1, 0, 0],
[math.pi / 2],
),
(
qml.ControlledPhaseShift,
[0, 0, 1, 0],
[0, 0, 1, 0],
[math.pi / 2],
),
(
qml.ControlledPhaseShift,
[0, 0, 0, 1],
[0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)],
[math.pi / 4],
),
(
qml.ControlledPhaseShift,
[1 / math.sqrt(2), 1 / math.sqrt(2), 1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / math.sqrt(2), 1 / math.sqrt(2), 1 / math.sqrt(2), 1 / 2 + 1j / 2],
[math.pi / 4],
),
(qml.IsingXX, [1, 0, 0, 0], [1 / math.sqrt(2), 0, 0, -1j / math.sqrt(2)], [math.pi / 2]),
(
qml.IsingXX,
[0, 1 / math.sqrt(2), 0, 1 / math.sqrt(2)],
[-0.5j, 0.5, -0.5j, 0.5],
[math.pi / 2],
),
(qml.IsingYY, [1, 0, 0, 0], [1 / math.sqrt(2), 0, 0, 1j / math.sqrt(2)], [math.pi / 2]),
(
qml.IsingYY,
[1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)],
[0.5 + 0.5j, 0, 0, 0.5 + 0.5j],
[math.pi / 2],
),
(qml.IsingZZ, [1, 0, 0, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0, 0, 0], [math.pi / 2]),
(
qml.IsingZZ,
[1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)],
[0.5 - 0.5j, 0, 0, 0.5 - 0.5j],
[math.pi / 2],
),
(qml.MultiRZ, [1, 0, 0, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0, 0, 0], [math.pi / 2]),
(
qml.MultiRZ,
[1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)],
[0.5 - 0.5j, 0, 0, 0.5 - 0.5j],
[math.pi / 2],
),
]
@pytest.mark.parametrize(
"operation,input,expected_output,par", test_data_two_wires_with_parameters
)
def test_apply_operation_two_wires_with_parameters(
self, qubit_device_2_wires, tol, operation, input, expected_output, par
):
"""Tests that applying an operation yields the expected output state for two wire
operations that have parameters."""
dev = qubit_device_2_wires
gpu_ctor = plg.lightning_gpu._gpu_dtype(dev.C_DTYPE)
dev._gpu_state = gpu_ctor(np.array(input).reshape(2 * [2]).astype(dev.C_DTYPE))
dev.apply([operation(*par, wires=[0, 1])])
assert np.allclose(dev.state, np.array(expected_output), atol=tol, rtol=0)
def test_apply_errors_qubit_state_vector(self, qubit_device_2_wires):
"""Test that apply fails for incorrect state preparation, and > 2 qubit gates"""
with pytest.raises(ValueError, match="Sum of amplitudes-squared does not equal one."):
qubit_device_2_wires.apply([qml.QubitStateVector(np.array([1, -1]), wires=[0])])
with pytest.raises(ValueError, match=r"State vector must be of length 2\*\*wires."):
p = np.array([1, 0, 1, 1, 0]) / np.sqrt(3)
qubit_device_2_wires.apply([qml.QubitStateVector(p, wires=[0, 1])])
with pytest.raises(
DeviceError,
match="Operation QubitStateVector cannot be used after other Operations have already been applied ",
):
qubit_device_2_wires.reset()
qubit_device_2_wires.apply(
[
qml.RZ(0.5, wires=[0]),
qml.QubitStateVector(np.array([0, 1, 0, 0]), wires=[0, 1]),
]
)
def test_apply_errors_basis_state(self, qubit_device_2_wires):
with pytest.raises(
ValueError, match="BasisState parameter must consist of 0 or 1 integers."
):
qubit_device_2_wires.apply([qml.BasisState(np.array([-0.2, 4.2]), wires=[0, 1])])
with pytest.raises(
ValueError, match="BasisState parameter and wires must be of equal length."
):
qubit_device_2_wires.apply([qml.BasisState(np.array([0, 1]), wires=[0])])
with pytest.raises(
DeviceError,
match="Operation BasisState cannot be used after other Operations have already been applied ",
):
qubit_device_2_wires.reset()
qubit_device_2_wires.apply(
[qml.RZ(0.5, wires=[0]), qml.BasisState(np.array([1, 1]), wires=[0, 1])]
)
class TestExpval:
"""Tests that expectation values are properly calculated or that the proper errors are raised."""
@pytest.mark.parametrize(
"operation,input,expected_output",
[
(qml.PauliX, [1 / math.sqrt(2), 1 / math.sqrt(2)], 1),
(qml.PauliX, [1 / math.sqrt(2), -1 / math.sqrt(2)], -1),
(qml.PauliX, [1, 0], 0),
(qml.PauliY, [1 / math.sqrt(2), 1j / math.sqrt(2)], 1),
(qml.PauliY, [1 / math.sqrt(2), -1j / math.sqrt(2)], -1),
(qml.PauliY, [1, 0], 0),
(qml.PauliZ, [1, 0], 1),
(qml.PauliZ, [0, 1], -1),
(qml.PauliZ, [1 / math.sqrt(2), 1 / math.sqrt(2)], 0),
(qml.Hadamard, [1, 0], 1 / math.sqrt(2)),
(qml.Hadamard, [0, 1], -1 / math.sqrt(2)),
(qml.Hadamard, [1 / math.sqrt(2), 1 / math.sqrt(2)], 1 / math.sqrt(2)),
(qml.Identity, [1, 0], 1),
(qml.Identity, [0, 1], 1),
(qml.Identity, [1 / math.sqrt(2), -1 / math.sqrt(2)], 1),
],
)
def test_expval_single_wire_no_parameters(
self, qubit_device_1_wire, tol, operation, input, expected_output
):
"""Tests that expectation values are properly calculated for single-wire observables without parameters."""
obs = operation(wires=[0])
qubit_device_1_wire.reset()
qubit_device_1_wire.apply(
[qml.QubitStateVector(np.array(input), wires=[0])],
rotations=obs.diagonalizing_gates(),
)
res = qubit_device_1_wire.expval(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
class TestVar:
"""Tests that variances are properly calculated."""
@pytest.mark.parametrize(
"operation,input,expected_output",
[
(qml.PauliX, [1 / math.sqrt(2), 1 / math.sqrt(2)], 0),
(qml.PauliX, [1 / math.sqrt(2), -1 / math.sqrt(2)], 0),
(qml.PauliX, [1, 0], 1),
(qml.PauliY, [1 / math.sqrt(2), 1j / math.sqrt(2)], 0),
(qml.PauliY, [1 / math.sqrt(2), -1j / math.sqrt(2)], 0),
(qml.PauliY, [1, 0], 1),
(qml.PauliZ, [1, 0], 0),
(qml.PauliZ, [0, 1], 0),
(qml.PauliZ, [1 / math.sqrt(2), 1 / math.sqrt(2)], 1),
(qml.Hadamard, [1, 0], 1 / 2),
(qml.Hadamard, [0, 1], 1 / 2),
(qml.Hadamard, [1 / math.sqrt(2), 1 / math.sqrt(2)], 1 / 2),
(qml.Identity, [1, 0], 0),
(qml.Identity, [0, 1], 0),
(qml.Identity, [1 / math.sqrt(2), -1 / math.sqrt(2)], 0),
],
)
def test_var_single_wire_no_parameters(
self, qubit_device_1_wire, tol, operation, input, expected_output
):
"""Tests that variances are properly calculated for single-wire observables without parameters."""
obs = operation(wires=[0])
qubit_device_1_wire.reset()
qubit_device_1_wire.apply(
[qml.QubitStateVector(np.array(input), wires=[0])],
rotations=obs.diagonalizing_gates(),
)
res = qubit_device_1_wire.var(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
class TestLightningGPUIntegration:
"""Integration tests for lightning.gpu. This test ensures it integrates
properly with the PennyLane interface, in particular QNode."""
def test_load_default_qubit_device(self):
"""Test that the default plugin loads correctly"""
dev = qml.device("lightning.gpu", wires=2)
assert dev.num_wires == 2
assert dev.shots is None
assert dev.short_name == "lightning.gpu"
def test_no_backprop(self):
"""Test that lightning.gpu does not support the backprop
differentiation method."""
if not CPP_BINARY_AVAILABLE:
pytest.skip("Skipping test because lightning.gpu is behaving like default.qubit")
dev = qml.device("lightning.gpu", wires=2)
def circuit():
"""Simple quantum function."""
return qml.expval(qml.PauliZ(0))
with pytest.raises(qml.QuantumFunctionError):
qml.QNode(circuit, dev, diff_method="backprop")
def test_best_gets_lightning_gpu(self):
"""Test that the best differentiation method returns LightningGPU."""
if not CPP_BINARY_AVAILABLE:
pytest.skip("Skipping test because lightning.gpu is behaving like lightning.qubit")
dev = qml.device("lightning.gpu", wires=2)
def circuit():
"""Simple quantum function."""
return qml.expval(qml.PauliZ(0))
qnode = qml.QNode(circuit, dev, diff_method="best")
assert isinstance(qnode.device, plg.LightningGPU)
def test_args(self):
"""Test that the plugin requires correct arguments"""
with pytest.raises(TypeError, match="missing 1 required positional argument: 'wires'"):
qml.device("lightning.gpu")
def test_qubit_circuit(self, tol):
"""Test that the default qubit plugin provides correct result for a simple circuit"""
p = 0.543
dev = qml.device("lightning.gpu", wires=1)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
expected = -np.sin(p)
assert np.isclose(circuit(p), expected, atol=tol, rtol=0)
def test_qubit_identity(self, tol):
"""Test that the default qubit plugin provides correct result for the Identity expectation"""
p = 0.543
dev = qml.device("lightning.gpu", wires=1)
@qml.qnode(dev)
def circuit(x):
"""Test quantum function"""
qml.RX(x, wires=0)
return qml.expval(qml.Identity(0))
assert np.isclose(circuit(p), 1, atol=tol, rtol=0)
def test_nonzero_shots(self, tol):
"""Test that the default qubit plugin provides correct result for high shot number"""
shots = 10**4
dev = qml.device("lightning.gpu", wires=1, shots=shots)
p = 0.543
@qml.qnode(dev)
def circuit(x):
"""Test quantum function"""
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
runs = []
for _ in range(100):
runs.append(circuit(p))
assert np.isclose(np.mean(runs), -np.sin(p), atol=1e-2, rtol=0)
# This test is ran against the state |0> with one Z expval
@pytest.mark.parametrize(
"name,expected_output",
[
("PauliX", -1),
("PauliY", -1),
("PauliZ", 1),
("Hadamard", 0),
],
)
def test_supported_gate_single_wire_no_parameters(
self, qubit_device_1_wire, tol, name, expected_output
):
"""Tests supported gates that act on a single wire that are not parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_1_wire.supports_operation(name)
@qml.qnode(qubit_device_1_wire)
def circuit():
op(wires=0)
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran against the state |Phi+> with two Z expvals
@pytest.mark.parametrize(
"name,expected_output",
[
("CNOT", [-1 / 2, 1]),
("SWAP", [-1 / 2, -1 / 2]),
("CZ", [-1 / 2, -1 / 2]),
],
)
def test_supported_gate_two_wires_no_parameters(
self, qubit_device_2_wires, tol, name, expected_output
):
"""Tests supported gates that act on two wires that are not parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_2_wires.supports_operation(name)
@qml.qnode(qubit_device_2_wires)
def circuit():
qml.QubitStateVector(np.array([1 / 2, 0, 0, math.sqrt(3) / 2]), wires=[0, 1])
op(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,expected_output",
[
("CSWAP", [-1, -1, 1]),
],
)
def test_supported_gate_three_wires_no_parameters(
self, qubit_device_3_wires, tol, name, expected_output
):
"""Tests supported gates that act on three wires that are not parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_3_wires.supports_operation(name)
@qml.qnode(qubit_device_3_wires)
def circuit():
qml.BasisState(np.array([1, 0, 1]), wires=[0, 1, 2])
op(wires=[0, 1, 2])
return (
qml.expval(qml.PauliZ(0)),
qml.expval(qml.PauliZ(1)),
qml.expval(qml.PauliZ(2)),
)
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran with two Z expvals
@pytest.mark.parametrize(
"name,par,expected_output",
[
("BasisState", [0, 0], [1, 1]),
("BasisState", [1, 0], [-1, 1]),
("BasisState", [0, 1], [1, -1]),
("QubitStateVector", [1, 0, 0, 0], [1, 1]),
("QubitStateVector", [0, 0, 1, 0], [-1, 1]),
("QubitStateVector", [0, 1, 0, 0], [1, -1]),
],
)
def test_supported_state_preparation(
self, qubit_device_2_wires, tol, name, par, expected_output
):
"""Tests supported state preparations"""
op = getattr(qml.ops, name)
assert qubit_device_2_wires.supports_operation(name)
@qml.qnode(qubit_device_2_wires)
def circuit():
op(np.array(par), wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran with two Z expvals
@pytest.mark.parametrize(
"name,par,wires,expected_output",
[
("BasisState", [1, 1], [0, 1], [-1, -1]),
("BasisState", [1], [0], [-1, 1]),
("BasisState", [1], [1], [1, -1]),
],
)
def test_basis_state_2_qubit_subset(
self, qubit_device_2_wires, tol, name, par, wires, expected_output
):
"""Tests qubit basis state preparation on subsets of qubits"""
op = getattr(qml.ops, name)
@qml.qnode(qubit_device_2_wires)
def circuit():
op(np.array(par), wires=wires)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is run with two expvals
@pytest.mark.parametrize(
"name,par,wires,expected_output",
[
("QubitStateVector", [0, 1], [1], [1, -1]),
("QubitStateVector", [0, 1], [0], [-1, 1]),
("QubitStateVector", [1.0 / np.sqrt(2), 1.0 / np.sqrt(2)], [1], [1, 0]),
("QubitStateVector", [1j / 2.0, np.sqrt(3) / 2.0], [1], [1, -0.5]),
("QubitStateVector", [(2 - 1j) / 3.0, 2j / 3.0], [0], [1 / 9.0, 1]),
],
)
def test_state_vector_2_qubit_subset(
self, qubit_device_2_wires, tol, name, par, wires, expected_output
):
"""Tests qubit state vector preparation on subsets of 2 qubits"""
op = getattr(qml.ops, name)
par = np.array(par)
@qml.qnode(qubit_device_2_wires)
def circuit():
op(par, wires=wires)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is run with three expvals
@pytest.mark.parametrize(
"name,par,wires,expected_output",
[
(
"QubitStateVector",
[
1j / np.sqrt(10),
(1 - 2j) / np.sqrt(10),
0,
0,
0,
2 / np.sqrt(10),
0,
0,
],
[0, 1, 2],
[1 / 5.0, 1.0, -4 / 5.0],
),
(
"QubitStateVector",
[1 / | np.sqrt(2) | numpy.sqrt |
import numpy as np
import py.test
import random
from weldnumpy import *
from test_utils import *
'''
TODO:
1. tests that preserve view properties.
'''
'''
Tests that are common for both 1-d and multi-dimensional cases
'''
def test_unary_elemwise():
'''
Tests all the unary ops in UNARY_OPS.
FIXME: For now, unary ops seem to only be supported on floats.
'''
for SHAPE in SHAPES:
for op in UNARY_OPS:
for dtype in TYPES:
print(dtype)
# int still not supported for the unary ops in Weld.
if "int" in dtype:
continue
np_test, w = random_arrays(SHAPE, dtype)
w2 = op(w)
np_result = op(np_test)
w2_eval = w2.evaluate()
assert np.allclose(w2, np_result)
assert np.array_equal(w2_eval, np_result)
def test_binary_elemwise():
'''
'''
for SHAPE in SHAPES:
for op in BINARY_OPS:
for dtype in TYPES:
np_test, w = random_arrays(SHAPE, dtype)
np_test2, w2 = random_arrays(SHAPE, dtype)
w3 = op(w, w2)
weld_result = w3.evaluate()
np_result = op(np_test, np_test2)
# Need array equal to keep matching types for weldarray, otherwise
# allclose tries to subtract floats from ints.
assert np.array_equal(weld_result, np_result)
def test_mix_np_weld_ops():
'''
Weld Ops + Numpy Ops - before executing any of the numpy ops, the
registered weld ops must be evaluateuated.
'''
for SHAPE in SHAPES:
np_test, w = random_arrays(SHAPE, 'float32')
np_test = np.exp(np_test)
np_result = np.sin(np_test)
w2 = np.exp(w)
w2 = np.sin(w2)
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_scalars():
'''
Special case of broadcasting rules - the scalar is applied to all the
Weldrray members.
'''
for SHAPE in SHAPES:
print('shape = ', SHAPE)
t = "int32"
print("t = ", t)
n, w = random_arrays(SHAPE, t)
n2 = n + 2
w2 = w + 2
w2 = w2.evaluate()
assert np.allclose(w2, n2)
# test by combining it with binary op.
n, w = random_arrays(SHAPE, t)
w += 10
n += 10
n2, w2 = random_arrays(SHAPE, t)
w = np.add(w, w2)
n = | np.add(n, n2) | numpy.add |
#!/usr/bin/env python
"""
https://www.kaggle.com/angps95/intro-to-reinforcement-learning-with-openai-gym
REQUIRES:
env.py in the same directory
OUTPUTS:
The optimal policies as calculated by the PI and VI algorithms on screen and as .csv files
If run through launch file, .csv files go to tejas/resources directory, else .csv files go to same directory as script
"""
import sys
import numpy as np
import env
import csv
import csv_interface
def policy_eval(policy, discount_factor=1.0, theta=0.00001):
"""
Evaluate a policy given an environment and a full description of the environment's dynamics.
Args:
policy: [S, A] shaped matrix representing the policy.
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
theta: We stop evaluation once our value function change is less than theta for all states.
discount_factor: Gamma discount factor.
Returns:
Vector of length env.nS representing the value function.
"""
# Start with a random (all 0) value function
V = np.zeros(env.nS)
while True:
# TODO: Implement!
delta = 0 #delta = change in value of state from one iteration to next
for state in range(env.nS): #for all states
val = 0 #initiate value as 0
for action,act_prob in enumerate(policy[state]): #for all actions/action probabilities
for prob,next_state,reward,_ in env.P[state][action]: #transition probabilities,state,rewards of each action
val += act_prob * prob * (reward + discount_factor * V[next_state]) #eqn to calculate
delta = max(delta, np.abs(val-V[state]))
V[state] = val
if delta < theta: #break if the change in value is less than the threshold (theta)
break
return | np.array(V) | numpy.array |
# -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements automatic and manually exponential time series smoothing models."""
__all__ = ["AutoETS"]
__author__ = ["<NAME>"]
from sktime.forecasting.base.adapters import _StatsModelsAdapter
from statsmodels.tsa.exponential_smoothing.ets import ETSModel as _ETSModel
from itertools import product
from joblib import delayed, Parallel
import numpy as np
class AutoETS(_StatsModelsAdapter):
"""ETS models with both manual and automatic fitting capabilities.
Manual fitting is adapted from statsmodels' version,
while automatic fitting is adapted from R version of ets.
The first few parameters are the same as the ones on statsmodels
(from ``error`` to ``return_params``, link:
https://www.statsmodels.org/stable/_modules/statsmodels/tsa/exponential_smoothing/ets.html#ETSModel).
The next few parameters are adapted from the ones on R
(``auto`` to ``additive_only``, link:
https://www.rdocumentation.org/packages/forecast/versions/8.12/topics/ets),
and are used for automatic model selection.
Parameters
----------
error : str, optional
The error model. "add" (default) or "mul".
trend : str or None, optional
The trend component model. "add", "mul", or None (default).
damped_trend : bool, optional
Whether or not an included trend component is damped. Default is
False.
seasonal : str, optional
The seasonality model. "add", "mul", or None (default).
sp : int, optional
The number of periods in a complete seasonal cycle for seasonal
(Holt-Winters) models. For example, 4 for quarterly data with an
annual cycle or 7 for daily data with a weekly cycle. Required if
`seasonal` is not None. Default is `1`.
initialization_method : str, optional
Method for initialization of the state space model. One of:
* 'estimated' (default)
* 'heuristic'
* 'known'
If 'known' initialization is used, then `initial_level` must be
passed, as well as `initial_trend` and `initial_seasonal` if
applicable.
'heuristic' uses a heuristic based on the data to estimate initial
level, trend, and seasonal state. 'estimated' uses the same heuristic
as initial guesses, but then estimates the initial states as part of
the fitting process. Default is 'estimated'.
initial_level : float, optional
The initial level component. Only used if initialization is 'known'.
initial_trend : float, optional
The initial trend component. Only used if initialization is 'known'.
initial_seasonal : array_like, optional
The initial seasonal component. An array of length `seasonal_periods`.
Only used if initialization is 'known'.
bounds : dict or None, optional
A dictionary with parameter names as keys and the respective bounds
intervals as values (lists/tuples/arrays).
The available parameter names are, depending on the model and
initialization method:
* "smoothing_level"
* "smoothing_trend"
* "smoothing_seasonal"
* "damping_trend"
* "initial_level"
* "initial_trend"
* "initial_seasonal.0", ..., "initial_seasonal.<m-1>"
The default option is ``None``, in which case the traditional
(nonlinear) bounds as described in [1]_ are used.
start_params : array_like, optional
Initial values for parameters that will be optimized. If this is
``None``, default values will be used.
The length of this depends on the chosen model. This should contain
the parameters in the following order, skipping parameters that do
not exist in the chosen model.
* `smoothing_level` (alpha)
* `smoothing_trend` (beta)
* `smoothing_seasonal` (gamma)
* `damping_trend` (phi)
If ``initialization_method`` was set to ``'estimated'`` (the
default), additionally, the parameters
* `initial_level` (:math:`l_{-1}`)
* `initial_trend` (:math:`l_{-1}`)
* `initial_seasonal.0` (:math:`s_{-1}`)
* ...
* `initial_seasonal.<m-1>` (:math:`s_{-m}`)
also have to be specified.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
auto : bool, optional
Set True to enable automatic model selection.
Default is False.
information_criterion : str, optional
Information criterion to be used in model selection. One of:
* "aic"
* "bic"
* "aicc"
Default is "aic".
allow_multiplicative_trend : bool, optional
If True, models with multiplicative trend are allowed when
searching for a model. Otherwise, the model space excludes them.
Default is False.
restrict : bool, optional
If True, the models with infinite variance will not be allowed.
Default is True.
additive_only : bool, optional
If True, will only consider additive models.
Default is False.
ignore_inf_ic: bool, optional
If True models with negative infinity Information Criterion
(aic, bic, aicc) will be ignored.
Default is True
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for automatic model fitting.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
References
----------
.. [1] <NAME>., & <NAME>. (2019) *Forecasting:
principles and practice*, 3rd edition, OTexts: Melbourne,
Australia. OTexts.com/fpp3. Accessed on April 19th 2020.
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.ets import AutoETS
>>> y = load_airline()
>>> forecaster = AutoETS(auto=True, n_jobs=-1, sp=12)
>>> forecaster.fit(y)
AutoETS(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
"""
_fitted_param_names = ("aic", "aicc", "bic", "hqic")
def __init__(
self,
error="add",
trend=None,
damped_trend=False,
seasonal=None,
sp=1,
initialization_method="estimated",
initial_level=None,
initial_trend=None,
initial_seasonal=None,
bounds=None,
dates=None,
freq=None,
missing="none",
start_params=None,
maxiter=1000,
full_output=True,
disp=False,
callback=None,
return_params=False,
auto=False,
information_criterion="aic",
allow_multiplicative_trend=False,
restrict=True,
additive_only=False,
ignore_inf_ic=True,
n_jobs=None,
**kwargs
):
# Model params
self.error = error
self.trend = trend
self.damped_trend = damped_trend
self.seasonal = seasonal
self.sp = sp
self.initialization_method = initialization_method
self.initial_level = initial_level
self.initial_trend = initial_trend
self.initial_seasonal = initial_seasonal
self.bounds = bounds
self.dates = dates
self.freq = freq
self.missing = missing
# Fit params
self.start_params = start_params
self.maxiter = maxiter
self.full_output = full_output
self.disp = disp
self.callback = callback
self.return_params = return_params
self.information_criterion = information_criterion
self.auto = auto
self.allow_multiplicative_trend = allow_multiplicative_trend
self.restrict = restrict
self.additive_only = additive_only
self.ignore_inf_ic = ignore_inf_ic
self.n_jobs = n_jobs
super(AutoETS, self).__init__()
def _fit_forecaster(self, y, X=None):
# Select model automatically
if self.auto:
# Initialise parameter ranges
error_range = ["add", "mul"]
if self.allow_multiplicative_trend:
trend_range = ["add", "mul", None]
else:
trend_range = ["add", None]
if self.sp <= 1 or self.sp is None:
seasonal_range = [None]
else:
seasonal_range = ["add", "mul", None]
damped_range = [True, False]
# Check information criterion input
if self.information_criterion not in ["aic", "bic", "aicc"]:
raise ValueError(
"information criterion must either be aic, bic or aicc"
)
# Fit model, adapted from:
# https://github.com/robjhyndman/forecast/blob/master/R/ets.R
# Initialise iterator
def _iter(error_range, trend_range, seasonal_range, damped_range):
for error, trend, seasonal, damped in product(
error_range, trend_range, seasonal_range, damped_range
):
if trend is None and damped:
continue
if self.restrict:
if error == "add" and (trend == "mul" or seasonal == "mul"):
continue
if error == "mul" and trend == "mul" and seasonal == "add":
continue
if self.additive_only and (
error == "mul" or trend == "mul" or seasonal == "mul"
):
continue
yield error, trend, seasonal, damped
# Fit function
def _fit(error, trend, seasonal, damped):
_forecaster = _ETSModel(
y,
error=error,
trend=trend,
damped_trend=damped,
seasonal=seasonal,
seasonal_periods=self.sp,
initialization_method=self.initialization_method,
initial_level=self.initial_level,
initial_trend=self.initial_trend,
initial_seasonal=self.initial_seasonal,
bounds=self.bounds,
dates=self.dates,
freq=self.freq,
missing=self.missing,
)
_fitted_forecaster = _forecaster.fit(
start_params=self.start_params,
maxiter=self.maxiter,
full_output=self.full_output,
disp=self.disp,
callback=self.callback,
return_params=self.return_params,
)
return _forecaster, _fitted_forecaster
# Fit models
_fitted_results = Parallel(n_jobs=self.n_jobs)(
delayed(_fit)(error, trend, seasonal, damped)
for error, trend, seasonal, damped in _iter(
error_range, trend_range, seasonal_range, damped_range
)
)
# Store IC values for each model in a list
# Ignore infinite likelihood models if ignore_inf_ic is True
_ic_list = []
for result in _fitted_results:
ic = getattr(result[1], self.information_criterion)
if self.ignore_inf_ic and np.isinf(ic):
_ic_list.append(np.nan)
else:
_ic_list.append(ic)
# Select best model based on information criterion
if np.all(np.isnan(_ic_list)) or len(_ic_list) == 0:
# if all models have infinite IC raise an error
raise ValueError(
"None of the fitted models have finite %s"
% self.information_criterion
)
else:
# Get index of best model
_index = | np.nanargmin(_ic_list) | numpy.nanargmin |
from typing import Dict, List
import numpy as np
from solvers.solver import Solver, Node, Graph
from collections import deque
import random
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
"""
# Deep Q-learning Agent
class DQNAgent:
def __init__(self, state_size, batch_size):
self.state_size = state_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
# self.epsilon_min = 0.01
# self.epsilon_decay = 0.995
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.01
self.batch_size = 32
self.model = self.build_model()
def build_model(self, *args):
pass
def remember(self, state, reward, next_state, done):
self.memory.append((state, reward, next_state, done))
def act(self, h_values, states):
if np.random.rand() <= self.epsilon:
idx = random.randrange(len(states))
return states[idx], h_values[idx]
else:
idx = np.argmin(h_values)
return states[idx], h_values[idx]
def replay(self):
minibatch = random.sample(self.memory, self.batch_size)
states, targets = [], []
for state, reward, next_state, done in minibatch:
if not done:
next_state = np.reshape(next_state, [1, self.state_size])
# target = reward + self.gamma*self.model.predict(next_state)
target = self.model.predict(next_state, batch_size=1)
targets.append(target[-1][-1] + 1)
else:
target = reward
targets.append(target[-1][-1])
states.append(state)
# targets.append(target[-1][-1])
states, targets = np.array(states), np.array(targets)
history = self.model.fit(states, targets, batch_size=self.batch_size, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
class LRTAStar(Solver):
def __init__(self):
super().__init__()
self.cost_table = {}
# def solve(self, start_state, goal_state):
# # No lookahead version
# start_node = Node(start_state)
# goal_node = Node(goal_state)
# queue = []
# while True:
# self.nodes_expanded = 0
# self.cost = float('inf')
# current_node = start_node
# while not current_node.isGoal(goal_node):
# self.nodes_expanded += 1
# neighbors = current_node.getNeighbors(current_node.state, goal_node.state, self.use_heuristic_cost)
# current_node.h = float('inf')
# if neighbors:
# for neighbor in neighbors:
# if neighbor in self.cost_table:
# neighbor_f = neighbor.g + self.cost_table[neighbor]
# else:
# neighbor_f = neighbor.g + neighbor.h
# if neighbor_f < current_node.h:
# current_node.h = neighbor_f
# next_node = neighbor
# self.cost_table[current_node] = current_node.h
# self.graph.setParent(current_node, next_node)
# current_node = next_node
# self.optimal_path = self.get_optimal_path(current_node)
# self.cost = self.nodes_expanded
# queue.append(self.cost)
# if len(queue) >= 10:
# last = queue[-10:]
# if len(set(last)) == 1:
# print(self.cost_table)
# return
def solve(self, start_state, goal_state):
start_node = Node(start_state)
goal_node = Node(goal_state)
episodes = 500
batch_size = 32
agent = DQNAgent(len(start_node.state), batch_size)
H_history = []
# Iterate the game
for e in range(episodes):
# reset state in the beginning of each game
nodes_expanded = 0
cost = float('inf')
current_node = start_node
done = 0
time_t = 0
while not current_node.isGoal(goal_node) and time_t < 1000:
nodes_expanded += 1
neighbors = current_node.getNeighbors(current_node.state, goal_node.state, self.use_heuristic_cost)
if neighbors:
H_values = []
for neighbor in neighbors:
#choose an action epsilon greedy
state = np.reshape(neighbor.state, [1, agent.state_size])
h_pred = agent.model.predict(state)
H_values.append(h_pred)
next_node, h_value = agent.act(H_values, neighbors)
self.graph.setParent(current_node, next_node)
agent.remember(current_node.state, h_value, next_node.state, done)
current_node = next_node
time_t += 1
if current_node.isGoal(goal_node):
done = 1
# determine how to add the h value to the replay memory after you reach the goal
agent.remember(current_node.state, [[0.0]], None, done)
cost = nodes_expanded
# print the score and break out of the loop
if not e%50:
print("episode: {}/{}, cost: {}".format(e, episodes, cost))
if len(agent.memory) >= agent.batch_size:
agent.replay()
H_history.append(agent.model.predict(np.reshape(start_node.state, [1, agent.state_size]), batch_size=1)[-1])
# Plot training loss values
plt.plot(H_history)
plt.title('Heuristic value')
plt.ylabel('H')
plt.xlabel('Episode')
plt.ylim([-5.0, 50.0])
plt.grid('on')
plt.show()
"""
# Deep Q-learning Agent
class DQNAgent:
def __init__(self, state_size, batch_size):
self.state_size = state_size
self.memory = deque(maxlen=20000)
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.99
self.learning_rate = 0.001
self.batch_size = 64
self.model = self.build_model()
def build_model(self, *args):
pass
def remember(self, state, next_states):
self.memory.append((state, next_states))
def act(self, h_values, states):
if np.random.rand() <= self.epsilon:
idx = random.randrange(len(states))
return states[idx], h_values[idx]
else:
idx = np.argmin(h_values)
return states[idx], h_values[idx]
def replay(self):
minibatch = random.sample(self.memory, self.batch_size)
states, targets = [], []
for state, next_states in minibatch:
h_pred_min = float('inf')
if not next_states:
h_pred_min = -1.0
else:
for next_state in next_states:
state_reshaped = np.reshape(next_state, [1, self.state_size])
h_pred = self.model.predict(state_reshaped, batch_size=1)
if h_pred[-1][-1] < h_pred_min:
h_pred_min = h_pred[-1][-1]
target = h_pred_min + 1
states.append(state)
targets.append(target)
states, targets = np.array(states), np.array(targets)
history = self.model.fit(states, targets, batch_size=self.batch_size, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
class LRTAStar(Solver):
def __init__(self):
super().__init__()
self.cost_table = {}
def solve(self, start_state, goal_state):
start_node = Node(start_state)
goal_node = Node(goal_state)
episodes = 300
batch_size = 64
agent = DQNAgent(len(start_node.state), batch_size)
H_history = []
# Iterate the game
# for e in range(episodes):
e = 0
while e < 10 or not abs(agent.model.predict(np.reshape(goal_node.state, [1, agent.state_size]))) < 0.1:
# reset state in the beginning of each game
nodes_expanded = 0
cost = float('inf')
current_node = start_node
done = 0
time_t = 0
while not current_node.isGoal(goal_node) and time_t < 200:
nodes_expanded += 1
neighbors = current_node.getNeighbors(current_node.state, goal_node.state, self.use_heuristic_cost)
if neighbors:
H_values = []
for neighbor in neighbors:
#choose an action epsilon greedy
state = np.reshape(neighbor.state, [1, agent.state_size])
h_pred = agent.model.predict(state)
H_values.append(h_pred)
next_node, h_value = agent.act(H_values, neighbors)
self.graph.setParent(current_node, next_node)
agent.remember(current_node.state, [x.state for x in neighbors])
if len(agent.memory) >= agent.batch_size:
agent.replay()
current_node = next_node
time_t += 1
if current_node.isGoal(goal_node):
done = 1
# determine how to add the h value to the replay memory after you reach the goal
agent.remember(current_node.state, None)
cost = nodes_expanded
print("start heuristic (= optimal cost): ", agent.model.predict( | np.reshape(start_node.state, [1, agent.state_size]) | numpy.reshape |
"""OA
"""
import numpy as np
from NNBlocks import tanh,sigm,relu,iden,isNone,expo,lrelu
def boxcars(periods):
periods = np.array(periods,dtype=int)
phases = np.zeros(periods.shape,dtype=int)
def filt(v,ph=phases,pd=periods,stored=[None]):
if isNone(stored[0]):
stored[0] = np.zeros(periods.shape+v.shape)
stored[0][ph==0] = 0
stored[0] += v
ph += 1
f = ph>=pd
ph[f] = 0
return stored[0],f
return filt
REGULARIZATION_FACTOR = 0.01
MAX_GRADIENT_STEP = 0.1
class NN:
one = np.array([1])
def __init__(self,shape=[1,8],af=tanh,history=4):
self.hist = history
self.hist_ind = 0
try:
len(af)
self.af = af
except:
self.af = [af]*(len(shape)-1)
self.shape = shape
self.weights = [np.zeros((shape[i+1],shape[i]+1),dtype=float) for i in range(len(shape)-1)]
self.gd_deltas = [np.copy(w) for w in self.weights]
self.vals = [np.zeros((history,s),dtype=float) for s in shape]
self.fvals = [np.zeros((history,s),dtype=float) for s in shape]
def reset_mem(self):
for i in range(len(self.vals)):
self.vals[i].fill(0)
self.fvals[i].fill(0)
def scramble(self,keep=0,mag=1):
for w in self.weights:
w *= keep
w += np.random.normal(w)*mag
def len(self):
return self.shape[-1]
def hi(self,o=0):
return (self.hist_ind+o)%self.hist
def __call__(self,inp=None):
if not isNone(inp):
h = self.hist_ind = self.hi(1)
self.fvals[0][h][:]= inp
for i in range(len(self.weights)):
self.vals[i+1][h][:]=self.weights[i]@np.concatenate((self.fvals[i][h],NN.one))
self.fvals[i+1][h][:]=self.af[i](self.vals[i+1][h][:])
return self.fvals[len(self.weights)][self.hi()]
def grad_desc(self,prop,a=0.001,to=0):
assert to < self.hist
assert to >= 0
h = self.hi(-to)
for i in range(len(self.weights)-1,-1,-1):
#print("1:","prop:",prop,"vals[i+1]:",self.vals[i+1][h],"weights[i]:",self.weights[i])
prop = self.af[i].gradient(self.vals[i+1][h],self.fvals[i+1][h])*prop
d = np.outer(prop,np.concatenate((self.fvals[i][h],NN.one)))
#print("2:","prop:",prop,"fvals[i]:",self.fvals[i][h],"outer:",d)
self.gd_deltas[i] -= d*a + self.weights[i]*REGULARIZATION_FACTOR*a
prop = self.weights[i].transpose()[:-1]@prop
#print("3:","prop:",prop)
#print(" ")
return prop
def grad_apply(self,vel=0):
for i in range(len(self.weights)):
self.weights[i] += np.clip(self.gd_deltas[i],-MAX_GRADIENT_STEP,MAX_GRADIENT_STEP)
self.gd_deltas[i] = np.clip(self.gd_deltas[i]*vel,-MAX_GRADIENT_STEP,MAX_GRADIENT_STEP)
#gradient ascent
def grad(self,prop,a=0.001,to=0):
assert to < self.hist
assert to >= 0
h = self.hi(-to)
for i in range(len(self.weights)-1,-1,-1):
prop = self.af[i].gradient(self.vals[i+1][h],self.fvals[i+1][h])*prop
d = np.outer(prop,np.concatenate((self.fvals[i][h],NN.one)))
self.gd_deltas[i] += d*a #- self.weights[i]*REGULARIZATION_FACTOR*a
prop = self.weights[i].transpose()[:-1]@prop
return prop
def graph(func):
from matplotlib import pyplot as plt
plt.plot([i/100 for i in range(-2000,2000)],[func(inp=i/100)[0] for i in range(-2000,2000)])
return plt
plt.show(block=0)
def grad_test_nn():
print("making (1,2,1) relu net:")
print(" -3 ")
print(" 1 -> @ 2 \ ")
print("->@ -> @ ")
print(" -1 -> @ 3 / -1 ")
print(" +2 ")
n = NN((1,2,1),relu)
n.weights[0] = np.array([[1.,-3],[-1,2]])
n.weights[1] = np.array([[2.,3,-1]])
print("expect n(0) = 5")
print(" -3 | 0 ")
print(" 0 5 | 5 ")
print(" 2 | 2 ")
print("got",n(0))
print("====")
print("expect n(1) = 2")
print(" -2 | 0 ")
print(" 1 2 | 2 ")
print(" 1 | 1 ")
print("got",n(1))
print("====")
print("gradient step size 0.01")
print("teaching n(1) = 0")
print("expect this:")
print(" [ 1,-3] f [2 3 -1] f -> 2 ")
print(" [-1, 2]^ ^ ^ ^ ")
print("gradients: ^ [0 2 2] 2 2 ")
print(" ^ ^ 4,6 ")
print(" ^ 0,6")
print(" [ 0, 0] ")
print(" [ 6, 6] ")
print("^< -6 ")
g = n.grad_desc(n(1),0.01)
print("got:",g,n.gd_deltas)
n.grad_apply()
print("now n(1) = ",n(1))
def test_nn(a=0.1,vel=0,shape=(1,2,1),t=sigm):
#print("Making",shape,"sigm net")
n = NN(shape,t)
n.scramble()
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.set_ylim((-2,2))
p, = ax.plot([i/10 for i in range(-200,200)],[n(inp=i/10)[0] for i in range(-200,200)])
def anim(i):
for j in range(100):
for x,y in [(0,1),(1,0),(-1,0)]:
v = n(x)
n.grad_desc(v-y,a)
n.grad_apply(vel)
p.set_ydata([n(inp=i/10)[0] for i in range(-200,200)])
return p,
import matplotlib.animation as animation
ani = animation.FuncAnimation(fig,anim,interval=1)
plt.show(block=0)
return ani,anim,fig,ax,p
def test_nn_stack(a=0.1,vel=0,shape=[(1,2),(2,1)],t=[sigm,sigm],xr=[-1,0,1],yr=[0,1,0]):
#print("Making",shape,"sigm net")
ns = [NN(shape[i],t[i%len(t)]) for i in range(len(shape))]
for n in ns:
n.scramble()
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.set_ylim((-2,2))
def f(i):
v = i
for n in ns:
v = n(v)
return v
def gd(g,a):
for n in ns[::-1]:
g = n.grad(g,a)
return g
p, = ax.plot([i/10 for i in range(-200,200)],[f(i/10)[0] for i in range(-200,200)])
ax.plot(xr,yr,'o')
def anim(i,a=a,vel=vel):
for i in range(len(xr)):
v = f(xr[i])
gd(yr[i]-v,a)
for n in ns:
n.grad_apply(vel)
p.set_ydata([f(i/10)[0] for i in range(-200,200)])
return p,
import matplotlib.animation as animation
ani = animation.FuncAnimation(fig,anim,interval=1)
plt.show(block=0)
return ani,anim,fig,ax,p
class NoGC:
def __init__(self,*stuff):
self.stuff = stuff
def __repr__(self):
return "NoGC()"
import random
def test_rnn(a=0.1,vel=0,l=2,h=5,shape=(2,2),t=sigm,noise=0):
#print("Making",shape,"sigm net")
n = NN(shape,t,h)
n.scramble()
from matplotlib import pyplot as plt
if type(l) == int:
l = lambda i,l=l: i == l
if type(a) != type(l):
a = lambda i,l,v=a : v
if type(vel) != type(l):
vel = lambda i,l,v=vel : v
def f(l,res=[n()]):
n.reset_mem()
r = []
n(np.zeros(n.len()))
#n(res[0])
#for i in range(int((random.random()*10+1)*h)):
# n(np.concatenate((np.zeros(1),n()[1:])))
#res[0] = np.concatenate((np.zeros(1),n()[1:]))
for i in range(h-l):
r += [np.copy(n(np.concatenate(( | np.zeros(1) | numpy.zeros |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import numpy as np
import time
from . import lime_base
from ._session_preparation import paddle_get_fc_weights, compute_features_for_kmeans, gen_user_home
from .normlime_base import combine_normlime_and_lime, get_feature_for_kmeans, load_kmeans_model
from paddlex.interpret.as_data_reader.readers import read_image
import paddlex.utils.logging as logging
import cv2
class CAM(object):
def __init__(self, predict_fn, label_names):
"""
Args:
predict_fn: input: images_show [N, H, W, 3], RGB range(0, 255)
output: [
logits [N, num_classes],
feature map before global average pooling [N, num_channels, h_, w_]
]
"""
self.predict_fn = predict_fn
self.label_names = label_names
def preparation_cam(self, data_):
image_show = read_image(data_)
result = self.predict_fn(image_show)
logit = result[0][0]
if abs(np.sum(logit) - 1.0) > 1e-4:
# softmax
logit = logit - np.max(logit)
exp_result = np.exp(logit)
probability = exp_result / np.sum(exp_result)
else:
probability = logit
# only interpret top 1
pred_label = | np.argsort(probability) | numpy.argsort |
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""slotview is an interactive tool to analyze slotmode data.
The input for the file is either the same input into slotphot or the output from slotphot
"""
# Ensure python 2.5 compatibility
import time, math
import numpy as np
import scipy as sp
from pyraf import iraf
from pyraf.iraf import pysalt
import saltprint, salttime
import slottool as st
import tkinter as Tk
from matplotlib.widgets import Cursor, SpanSelector, Slider, CheckButtons
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# Gui library imports
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
# Salt imports
from saltgui import ImageDisplay, MplCanvas
from salterror import SaltIOError
import saltsafeio as saltio
import saltsafekey as saltkey
from saltsafelog import logging
from SlotViewWindow import SlotViewWindow
debug=True
# Make sure the plotting functions work with an older version of matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
import matplotlib.pylab as plt
def slotview(newfits,indata , fileout, srcfile, fps=10.0, phottype='square', sigdet=5, contpix=10, \
driftlimit=10, clobber=True,logfile='slotview.log',verbose=True):
#set up the variables
status = 0
entries = []
vig_lo = {}
vig_hi = {}
hour = 0
min = 0
sec = 0.
time0 = 0.
nframes = 0
sleep=0
with logging(logfile,debug) as log:
#enter in the input data
saltio.fileexists(newfits)
#set the sleep parameter
if fps>0: sleep=1.0/(fps)
# read in the data file
id, time, ratio, rerr, tx, ty, tflux, terr, cx, cy, cflux, cerr=st.readlcfile(indata)
# read extraction region defintion file
amp, x, y, x_o, y_o, r, br1, br2=st.readsrcfile(srcfile)
#determine the size of the data arrays
struct = saltio.openfits(newfits)
naxis1 = saltkey.get('NAXIS1',struct[1])
naxis2 = saltkey.get('NAXIS2',struct[1])
# Plot all of the data and the first image
# Create GUI
App = QtGui.QApplication([])
aw=SlotViewWindow(struct, id, tflux, cflux, ratio, time, phottype, sleep, \
tx, ty, cx, cy, r, br1, br2, naxis1, naxis2, sigdet, contpix, driftlimit)
aw.show()
# Start application event loop
app_exit=App.exec_()
# Check if GUI was executed succesfully
if app_exit!=0:
raise SALTError('InterIdentify GUI has unexpected exit status '+str(exit))
ratio, tflux, cflux, gframe, newphot=aw.ratio, aw.tflux, aw.cflux, aw.goodframes, aw.newphot
#close the input file
saltio.closefits(struct)
# Update the indata file if necessary
lc=saltio.openascii(fileout,'w')
for i in range(len(ratio)):
x['target']=tx[i]
x['comparison']=cx[i]
y['target']=ty[i]
y['comparison']=cy[i]
reltime=False
if gframe[i]:
st.writedataout(lc, id[i], time[i], x, y, tflux[i], terr[i], \
cflux[i], cerr[i], ratio[i], rerr[i], time[0], reltime)
saltio.closeascii(lc)
# -----------------------------------------------------------
# Plot the data
class makeplotdata(QtGui.QMainWindow):
def __init__(self, struct, pid, tflux, cflux, ratio, time, phottype, sleep, vig_lo, vig_hi, \
tx, ty, cx, cy, r, br1, br2, naxis1, naxis2, clobber, logfile, verbose):
"""As the data is measured, plots the target and companion, the drift, both light curves and the ratio
returns status
"""
#set up the variables
status=0
maxcolumn=7
self.struct = struct
self.infile=struct._HDUList__file.name
self.pid=pid
self.dtime=time.copy()
self.tflux=tflux
self.cflux=cflux
self.ratio=ratio
self.min_xlim=10
self.radius=r['comparison']
self.r=r
self.br1=br1
self.br2=br2
self.tx=tx
self.ty=ty
self.cx=cx
self.cy=cy
self.phottype=phottype
self.naxis1=naxis1
self.naxis2=naxis2
self.logfile=logfile
self.clobber=clobber
self.verbose=verbose
self.fft=False
self.stopplay=False
self.sleep=sleep
self.zbox=[]
self.newphot=0
self.npoint=4
if self.phottype=='circular':
self.npoint=24
if status==0:
self.id=0
self.nframes=len(self.struct)
self.header=self.struct[int(self.pid[self.id])].header
self.goodframes=self.dtime*0+1
# Setup widget
QtGui.QMainWindow.__init__(self)
# Set main widget
self.main = QtGui.QWidget(self)
# Set window title
self.setWindowTitle("Slotview: "+self.infile)
#self.root.bind("<Destroy>", self.destroy)
#self.root.bind("D", self.deleteframe)
#self.root.bind("u", self.undeleteframe)
#self.root.bind("n", self.call_playone)
#self.root.bind("b", self.call_revone)
#self.root.bind("?", self.help)
#self.root.bind("q", self.destroy)
#self.root.bind("<Button-1>", self.callback)
#set up the variables for which graphs to plot
#self.ratiovar=Tk.IntVar(master=self.root, value=1)
#self.star1var=Tk.IntVar(master=self.root, value=0)
#self.star2var=Tk.IntVar(master=self.root, value=0)
#self.slotfig=plt.figure(figsize=(8,1.5),dpi=72)
#plot the data
#self.plotdataarray()
#self.lcfig=plt.figure(figsize=(8,5),dpi=72)
#plot the light curve
#self.lcx1=self.dtime.min()
#self.lcx2=self.dtime.max()
#self.plotlightcurve()
inrow=4
lcrow=0
pcrow=1
darow=2
cprow=3
qurow=5
#add light curve plot
#self.lccanvas = FigureCanvasTkAgg(self.lcfig, master=self.root)
#self.lccanvas.show()
#self.lccanvas.get_tk_widget().grid(row = lcrow, column = 0, columnspan = maxcolumn, sticky = 'news')
#self.lccanvas.mpl_connect('button_press_event',self.lcpickstar)
#self.lccanvas.mpl_connect('motion_notify_event',self.lcdrawbox)
#self.lccanvas.mpl_connect('button_release_event',self.lczoom)
#add data array plot
#self.canvas = FigureCanvasTkAgg(self.slotfig, master=self.root)
#self.canvas.show()
#self.canvas.blit()
#self.canvas.get_tk_widget().grid(row = darow, column = 0, columnspan = maxcolumn, sticky = 'news')
#self.canvas.mpl_connect('key_press_event',self.newphoto)
#add the control widget
#self.cpFrame = Tk.Frame(master=self.root)
#self.cpFrame.grid(row=cprow, column=0, columnspan=maxcolumn, sticky='ew')
#self.frevbutton = Tk.Button(master=self.cpFrame, text='< <', width=5, command=self.freverse)
#self.frevbutton.grid(row=0, column=0, sticky='ew')
#self.revbutton = Tk.Button(master=self.cpFrame, text='<',width=5, command=self.reverse)
#self.revbutton.grid(row=0, column=1, sticky='ew')
#self.rev1button = Tk.Button(master=self.cpFrame, text='-',width=5, command=self.revone)
#self.rev1button.grid(row=0, column=2, sticky='ew')
#self.play1button = Tk.Button(master=self.cpFrame, text='+',width=5, command=self.playone)
#self.play1button.grid(row=0, column=4, sticky='ew')
#self.playbutton = Tk.Button(master=self.cpFrame, text='>',width=5, command=self.play)
#self.playbutton.grid(row=0, column=5, sticky='ew')
#self.fplaybutton = Tk.Button(master=self.cpFrame, text='> >',width=5, command=self.fplay)
#self.fplaybutton.grid(row=0, column=6, sticky='ew')
#self.stopbutton = Tk.Button(master=self.cpFrame, text='Stop',width=5, command=self.stop)
#self.stopbutton.grid(row=0, column=3, sticky='ew')
#add the information panel
#self.idtext= Tk.StringVar(master=self.root )
#self.imgtext= Tk.StringVar(master=self.root )
#self.timetext= Tk.StringVar(master=self.root )
#self.idLabel = Tk.Label(master=self.root, fg='#000000',textvariable=self.idtext, relief='solid')
#self.idLabel.grid(row=inrow, column=0, sticky='ew')
#self.imgLabel = Tk.Label(master=self.root, textvariable=self.imgtext, relief='solid')
#self.imgLabel.grid(row=inrow, column=1, columnspan=3, sticky='ew')
#self.timeLabel = Tk.Label(master=self.root, textvariable=self.timetext, relief='solid')
#self.timeLabel.grid(row=inrow, column=4, columnspan=3, sticky='ew')
#self.setinfolabels()
#add the plot control panel
#self.ratiobutton=Tk.Checkbutton(master=self.root, text='Flux Ratio', variable=self.ratiovar, \
# command=self.calllccheck)
#self.ratiobutton.grid(row=pcrow, column=0, sticky='ew')
#self.star1button=Tk.Checkbutton(master=self.root, text='Star1 Flux', variable=self.star1var, \
# command=self.calllccheck)
#self.star1button.grid(row=pcrow, column=1, sticky='ew')
#self.star2button=Tk.Checkbutton(master=self.root, text='Star2 Flux', variable=self.star2var, \
# command=self.calllccheck)
#self.star2button.grid(row=pcrow, column=2, sticky='ew')
#self.resetbutton = Tk.Button(master=self.root, text='Reset', command=self.callreset)
#self.resetbutton.grid(row=pcrow, column=6, sticky='ew')
#self.savebutton = Tk.Button(master=self.root, text='save', command=self.callsave)
#self.savebutton.grid(row=pcrow, column=5, sticky='ew')
#add the quit button
#self.quFrame = Tk.Frame(master=self.root)
#self.quFrame.grid(row=qurow, column=0, columnspan=maxcolumn, sticky='ew')
#self.exitbutton = Tk.Button(master=self.quFrame, text='Quit', command=self.exit)
#self.exitbutton.grid(row=0, column=3, sticky='ew')
#create the tabs
self.tabWidget=QtGui.QTabWidget()
#layout the widgets
mainLayout = QtGui.QVBoxLayout(self.main)
mainLayout.addWidget(self.tabWidget)
# Set the main widget as the central widget
self.setCentralWidget(self.main)
# Destroy widget on close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
return
def runplotdata(self):
Tk.mainloop()
def destroy(self, e):
self.root.quit()
return
def exit(self):
self.root.quit()
return
def help(self, e):
"""Print the help message and the key-bindings available to the user"""
helpmessage="""
The following commands are available to the user:
? - Print this information q - quit the viewer
n - Move to the next image b - move back an image
D - Delete this image u - undelete this image
p - Perform photometry on this image
P - Perform photometry starting at this image
stop button-Stop photometry or display
reset button-Reset the light curve plot
save button-Save the current light curve plot
quit button-Quit the application
Right Click-Display image corresponding to this time
Left Click and Drag-In light curve plot, zoom in on this region
"""
print(helpmessage)
return
def setinfolabels(self):
"""Set the text labels according to the current object displayed.
Use the image header information if it is available.
"""
#set the id number
self.idtext.set(int(self.pid[self.id]))
#set the image name
oname=''
try:
oname=self.struct[int(self.pid[self.id])].header['ONAME']
oext=self.struct[int(self.pid[self.id])].header['OEXT']
oname=oname+'[%i]'%oext
except Exception as e:
try:
oname=self.struct[0].header['OBJECT']
except:
pass
oname=self.struct[0].header['OBJECT']
self.imgtext.set(oname)
#set the time
try:
utime=self.struct[int(self.pid[self.id])].header['UTC-OBS']
self.timetext.set(utime)
except:
self.timetext.set('')
return
def calllccheck(self):
#turn the ratio curve on and off
if self.ratiovar.get():
self.lightcurve.set_visible(True)
else:
self.lightcurve.set_visible(False)
#turn the star1 curve on and off
if self.star1var.get():
self.star1curve.set_visible(True)
else:
self.star1curve.set_visible(False)
#turn the star2 curve on and off
if self.star2var.get():
self.star2curve.set_visible(True)
else:
self.star2curve.set_visible(False)
self.lcy1, self.lcy2, ylabel=self.lcylimits()
self.light_plot.set_ylim(self.lcy1, self.lcy2)
self.light_plot.set_ylabel(ylabel)
self.lccanvas.draw()
def lcylimits(self):
"""Determine the y-limts depending on what plots are selected """
mask = (self.dtime > self.lcx1)*(self.dtime<self.lcx2)*(self.goodframes>0)
if self.ratiovar.get():
rarr=np.compress(mask,self.ratio)
y1=rarr.min()
y2=rarr.max()
ylabel='Star1/Star2'
else:
if self.star2var.get() and self.star1var.get():
cfarr=np.compress(mask,self.cflux).max()
tfarr=np.compress(mask,self.tflux).max()
y1=0
y2=cfarr < tfarr and tfarr or cfarr
ylabel='Star Flux'
elif self.star2var.get():
cfarr=np.compress(mask,self.cflux)
y1=0
y2=cfarr.max()
ylabel='Star2 Flux'
else:
tfarr=np.compress(mask,self.tflux)
y1=0
y2=tfarr.max()
ylabel='Star1 Flux'
return y1, y2, ylabel
def newphoto(self, e):
"""program to being new photometry"""
if e.key=='c' and e.xdata and e.ydata:
cx=e.xdata
cy=e.ydata
cr=self.radius
image=self.struct[int(self.pid[self.id])].data
cimage, cx, cy = st.calcdrift(image, cx, cy, cr, self.naxis1, self.naxis2)
if cx >= 0 and cy >= 0:
self.cx[self.id]=cx
self.cy[self.id]=cy
self.updatedataplot()
if e.key=='t' and e.xdata and e.ydata:
tx=e.xdata
ty=e.ydata
tr=self.radius
image=self.struct[int(self.pid[self.id])].data
timage, tx, ty = st.calcdrift(image, tx, ty, tr, self.naxis1, self.naxis2)
if tx >= 0 and ty >= 0:
self.tx[self.id]=tx
self.ty[self.id]=ty
self.updatedataplot()
if e.key=='p':
self.redophot(self.id)
#self.updatelightcurve()
#self.lccanvas.draw()
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
self.lccanvas.draw()
#self.callreset()
if e.key=='P':
nstart=self.id+1
nend=self.nframes-1
self.redophot(self.id)
self.stopplay=True
i=nstart
while i < nend and self.stopplay:
image=self.struct[int(self.pid[self.id])].data
# these may be changed
sigdet=5
contpix=10
sigback=3
driftlimit=10
iter=3
carray, fx,fy,status = st.finddrift(image, self.cx[i-1], self.cy[i-1], self.radius, \
self.naxis1, self.naxis2, sigdet, contpix, sigback, driftlimit, iter, self.logfile)
if fx > -1 and fy > -1:
if fx < self.naxis1 and fy < self.naxis2:
dx=self.cx[i-1]-fx
dy=self.cy[i-1]-fy
self.cx[i]=fx
self.cy[i]=fy
self.tx[i]=self.tx[i-1]-dx
self.ty[i]=self.ty[i-1]-dy
else:
message='Not able to perform photometry'
print(message)
return
else:
message='Not able to perform photometry'
print(message)
return
self.redophot(i)
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
self.lccanvas.draw()
if self.dtime[i] < self.lcx1 or self.dtime[i] > self.lcx2: self.callreset()
#self.updatelightcurve()
#self.lccanvas.draw()
self.root.update()
if not self.stopplay: self.updatedataplot()
i += 1
def redophot(self, id):
self.newphot=1
self.id=id
x={}
y={}
x['target']=self.tx[self.id]
y['target']=self.ty[self.id]
x['comparison']=self.cx[self.id]
y['comparison']=self.cy[self.id]
image=self.struct[int(self.pid[self.id])].data
#these will need to be changed
gain=1
rdnoise=1
verbose=False
tflux, tflux_err, cflux, cflux_err, ratio, ratio_err, status = \
st.dophot(self. phottype, image, x, y, self.r, self.br1, self.br2, \
gain, rdnoise, self.naxis1, self.naxis2)
if status==0:
self.tflux[self.id]=tflux
self.cflux[self.id]=cflux
self.ratio[self.id]=ratio
def lcpickstar(self, e):
if e.button==1 and e.xdata:
self.id=self.findtime(e.xdata)+1
self.updatedataplot()
if e.button==3 and e.xdata:
self.xt1 = e.xdata
self.yt1 = self.lcy1
def lcdrawbox(self, e):
if e.button==3 and e.xdata:
self.xt2=e.xdata
self.yt2=self.lcy2
xp=[self.xt1, self.xt1, self.xt2, self.xt2]
yp=[self.yt1, self.yt2, self.yt2, self.yt1]
if self.zbox:
self.zbox.set_visible(False)
self.zbox,=self.light_plot.fill(xp, yp, fc='#777777', ec='#FF0000', alpha=0.5,visible=True)
self.lccanvas.draw()
def lczoom(self, e):
"""Handles time axis zoom on the light curve.
Once the 3-button is released, it will capture the new position and replot the zoomed in curve"""
if e.button==3 and e.xdata:
self.xt2=e.xdata
self.yt2=self.lcy2
if self.xt2<self.xt1:
xtemp=self.xt1
self.xt1=self.xt2
self.xt2=xtemp
self.lcx1=self.xt1
self.lcx2=self.xt2
self.lcy1=self.yt1
self.lcy2=self.yt2
if self.lcx2-self.lcx1>0:
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
if self.zbox:
self.zbox.set_visible(False)
self.lccanvas.draw()
def callsave(self):
"""Save a copy of the lc curve to a .ps file"""
self.sroot=Tk.Tk()
self.sroot.wm_title("Save File as:")
TitleLabel = Tk.Label(master=self.sroot, text='Please enter a filename for the output PS file', border=5)
TitleLabel.grid(row=0, column=0, columnspan=2, sticky='ew')
nameLabel = Tk.Label(master=self.sroot, text='Filename:', relief='solid')
nameLabel.grid(row=1, column=0, sticky='ew')
self.nametext=Tk.StringVar(master=self.sroot)
nameEntry = Tk.Entry(master=self.sroot, textvariable=self.nametext)
nameEntry.grid(row=1, column=1, sticky='ew')
nameEntry.focus_set()
self.sroot.bind('<Return>', self._finishcallsave)
return
def _finishcallsave(self, e):
status=0
self.sroot.destroy()
name=self.nametext.get()
if not name: return
if name[-3:]!='.ps': name=name+'.ps'
#remove the file if the name already exists
if saltio.filedoesnotexist(name,self.verbose, self.logfile):
if self.clobber:
os.remove(name)
else:
message = 'ERROR -- SALTVIEW: File ' + name + ' already exists, use clobber=y'
status = saltprint.err(logfile,message)
return
#turn the red dot off in the graph
self.light_point.set_visible(False)
#save the figure
self.lcfig.savefig(name)
#turn the red dot on in the graph
self.light_point.set_visible(True)
def callreset(self):
self.lcx1=self.dtime.min()
self.lcx2=self.dtime.max()
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
self.lccanvas.draw()
def undeleteframe(self, e):
self.goodframes[self.id] = 1
message='SALTPHOT: Extension %i was undeleted' % self.pid[self.id]
saltprint.log(self.logfile, message, self.verbose)
def deleteframe(self, e):
self.newphot=1
self.goodframes[self.id] = 0
message='SALTPHOT: Extension %i was deleted' % self.pid[self.id]
saltprint.log(self.logfile, message, self.verbose)
def callback(self, e):
print(e.x, e.y)
def stop(self):
self.stopplay=False
def call_playone(self, e):
self.playone()
def call_revone(self, e):
self.revone()
def playone(self):
stopid = self.nframes-2
if self.id < (stopid): self.id=self.id+1
self.updatedataplot()
def play(self):
self.stopplay=True
stopid = self.nframes-2
while self.stopplay and self.id < stopid:
self.id = self.id+1
time.sleep(self.sleep)
self.updatedataplot()
self.root.update()
def fplay(self):
self.stopplay=True
stopid = self.nframes-2
while self.stopplay and self.id < stopid:
self.id = self.id+1
self.updatedataplot()
self.root.update()
def revone(self):
if self.id > 0: self.id=self.id-1
self.updatedataplot()
def reverse(self):
self.stopplay=True
while self.stopplay and self.id > 0:
self.id = self.id-1
time.sleep(self.sleep)
self.updatedataplot()
self.root.update()
def freverse(self):
self.stopplay=True
while self.stopplay and self.id > 0:
self.id = self.id-1
self.updatedataplot()
self.root.update()
def callsetfft(self, label):
if label=='FFT':
self.fft=(not self.fft)
self.plotfft()
def plotfft(self):
fftfig=plt.figure(figsize=(8,8),dpi=72)
axfft=fftfig.add_axes([0.10,0.10,0.8,0.50], autoscale_on=True)
mask = (self.dtime > self.lcx1)*(self.dtime<self.lcx2)
tarr=np.compress(mask,self.dtime)
rarr=np.compress(mask,self.ratio)
#ftarr=np.fft.fft(tarr)
ftarr=np.arange(len(tarr))
frarr=np.fft.fft(rarr)
axfft.hold(True)
fftcurve=axfft.plot(ftarr,frarr,linewidth=0.5,linestyle='-',marker='',color='b')
plt.show()
def slide_update(self, val):
self.id=self.findtime(val)
self.updatedataplot()
def plotdataarray(self):
"""Plot the image array
return axes
"""
self.ob_plot = self.slotfig.add_axes([0.10,0.10,0.8,0.80], autoscale_on=True)
plt.setp(plt.gca(),xticks=[],yticks=[])
plt.jet()
self.array=self.struct[int(self.pid[self.id])].data
self.imarr=self.ob_plot.imshow(self.array,origin='lower')
#Plot the apertures
self.cbox,=self.plotbox('#00FF00',self.cx[self.id],self.cy[self.id],self.radius,self.npoint,self.naxis1, self.naxis2)
self.tbox,=self.plotbox('#FFFF00',self.tx[self.id],self.ty[self.id],self.radius,self.npoint,self.naxis1, self.naxis2)
def updatedataplot(self):
"""Handle updating the light curve plot and the data array plot when the
data array image is changed
"""
#update the information panel
self.setinfolabels()
self.ptime=self.dtime[self.id]
self.pratio=self.ratio[self.id]
#Check to make the red button hasn't moved outside of the plotting area
if self.ptime < self.lcx1 or self.ptime > self.lcx2: self.callreset()
#update the red piont on the light curve plot
self.light_point.set_data(np.asarray([self.ptime]), | np.asarray([self.pratio]) | numpy.asarray |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.convert.to_list
def test_EmptyArray():
v2a = ak._v2.contents.emptyarray.EmptyArray()
with pytest.raises(IndexError):
v2a[np.array([0, 1], np.int64)]
def test_NumpyArray():
v2a = ak._v2.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]))
resultv2 = v2a[np.array([0, 1, -2], np.int64)]
assert to_list(resultv2) == [0.0, 1.1, 2.2]
assert v2a.typetracer[np.array([0, 1, -2], np.int64)].form == resultv2.form
v2b = ak._v2.contents.numpyarray.NumpyArray(
| np.arange(2 * 3 * 5, dtype=np.int64) | numpy.arange |
import pandas as pd
import numpy as np
import scipy
import sys
from annoy import AnnoyIndex
from packaging import version
from scipy.spatial import cKDTree
from scipy.sparse import coo_matrix
from umap.umap_ import fuzzy_simplicial_set
from sklearn.neighbors import KDTree
from sklearn.neighbors import DistanceMetric
from sklearn.linear_model import Ridge
try:
from scanpy import logging as logg
except ImportError:
pass
try:
import anndata
except ImportError:
pass
try:
import faiss
except ImportError:
pass
def get_sparse_matrix_from_indices_distances_umap(
knn_indices, knn_dists, n_obs, n_neighbors
):
"""
Copied out of scanpy.neighbors
"""
rows = np.zeros((n_obs * n_neighbors), dtype=np.int64)
cols = np.zeros((n_obs * n_neighbors), dtype=np.int64)
vals = np.zeros((n_obs * n_neighbors), dtype=np.float64)
for i in range(knn_indices.shape[0]):
for j in range(n_neighbors):
if knn_indices[i, j] == -1:
continue # We didn't get the full knn for i
if knn_indices[i, j] == i:
val = 0.0
else:
val = knn_dists[i, j]
rows[i * n_neighbors + j] = i
cols[i * n_neighbors + j] = knn_indices[i, j]
vals[i * n_neighbors + j] = val
result = coo_matrix((vals, (rows, cols)), shape=(n_obs, n_obs))
result.eliminate_zeros()
return result.tocsr()
def compute_connectivities_umap(
knn_indices,
knn_dists,
n_obs,
n_neighbors,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
):
"""
Copied out of scanpy.neighbors
This is from umap.fuzzy_simplicial_set [McInnes18]_.
Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
"""
X = coo_matrix(([], ([], [])), shape=(n_obs, 1))
connectivities = fuzzy_simplicial_set(
X,
n_neighbors,
None,
None,
knn_indices=knn_indices,
knn_dists=knn_dists,
set_op_mix_ratio=set_op_mix_ratio,
local_connectivity=local_connectivity,
)
if isinstance(connectivities, tuple):
# In umap-learn 0.4, this returns (result, sigmas, rhos)
connectivities = connectivities[0]
distances = get_sparse_matrix_from_indices_distances_umap(
knn_indices, knn_dists, n_obs, n_neighbors
)
return distances, connectivities.tocsr()
def create_tree(data, approx, metric, use_faiss, n_trees):
"""
Create a faiss/cKDTree/KDTree/annoy index for nearest neighbour lookup. All undescribed input
as in ``bbknn.bbknn()``. Returns the resulting index.
Input
-----
data : ``numppy.array``
PCA coordinates of a batch's cells to index.
"""
if approx:
ckd = AnnoyIndex(data.shape[1], metric=metric)
for i in np.arange(data.shape[0]):
ckd.add_item(i, data[i, :])
ckd.build(n_trees)
elif metric == "euclidean":
if "faiss" in sys.modules and use_faiss:
ckd = faiss.IndexFlatL2(data.shape[1])
ckd.add(data)
else:
ckd = cKDTree(data)
else:
ckd = KDTree(data, metric=metric)
return ckd
def query_tree(data, ckd, neighbors_within_batch, approx, metric, use_faiss):
"""
Query the faiss/cKDTree/KDTree/annoy index with PCA coordinates from a batch. All undescribed input
as in ``bbknn.bbknn()``. Returns a tuple of distances and indices of neighbours for each cell
in the batch.
Input
-----
data : ``numpy.array``
PCA coordinates of a batch's cells to query.
ckd : faiss/cKDTree/KDTree/annoy index
"""
if approx:
ckdo_ind = []
ckdo_dist = []
for i in np.arange(data.shape[0]):
holder = ckd.get_nns_by_vector(
data[i, :], neighbors_within_batch, include_distances=True
)
ckdo_ind.append(holder[0])
ckdo_dist.append(holder[1])
ckdout = (np.asarray(ckdo_dist), np.asarray(ckdo_ind))
elif metric == "euclidean":
if "faiss" in sys.modules and use_faiss:
D, I = ckd.search(data, neighbors_within_batch)
# sometimes this turns up marginally negative values, just set those to zero
D[D < 0] = 0
# the distance returned by faiss needs to be square rooted to be actual euclidean
ckdout = (np.sqrt(D), I)
else:
ckdout = ckd.query(x=data, k=neighbors_within_batch, n_jobs=-1)
else:
ckdout = ckd.query(data, k=neighbors_within_batch)
return ckdout
def get_graph(
pca, batch_list, neighbors_within_batch, n_pcs, approx, metric, use_faiss, n_trees
):
"""
Identify the KNN structure to be used in graph construction. All input as in ``bbknn.bbknn()``
and ``bbknn.bbknn_pca_matrix()``. Returns a tuple of distances and indices of neighbours for
each cell.
"""
# get a list of all our batches
batches = np.unique(batch_list)
# in case we're gonna be faissing, turn the data to float32
if metric == "euclidean" and not approx and "faiss" in sys.modules and use_faiss:
pca = pca.astype("float32")
# create the output matrices, with the indices as integers and distances as floats
knn_distances = np.zeros((pca.shape[0], neighbors_within_batch * len(batches)))
knn_indices = np.copy(knn_distances).astype(int)
# find the knns using faiss/cKDTree/KDTree/annoy
# need to compare each batch against each batch (including itself)
for to_ind in range(len(batches)):
# this is the batch that will be used as the neighbour pool
# create a boolean mask identifying the cells within this batch
# and then get the corresponding row numbers for later use
batch_to = batches[to_ind]
mask_to = batch_list == batch_to
ind_to = np.arange(len(batch_list))[mask_to]
# create the faiss/cKDTree/KDTree/annoy, depending on approx/metric
ckd = create_tree(
data=pca[mask_to, :n_pcs],
approx=approx,
metric=metric,
use_faiss=use_faiss,
n_trees=n_trees,
)
for from_ind in range(len(batches)):
# this is the batch that will have its neighbours identified
# repeat the mask/row number getting
batch_from = batches[from_ind]
mask_from = batch_list == batch_from
ind_from = np.arange(len(batch_list))[mask_from]
# fish the neighbours out, getting a (distances, indices) tuple back
ckdout = query_tree(
data=pca[mask_from, :n_pcs],
ckd=ckd,
neighbors_within_batch=neighbors_within_batch,
approx=approx,
metric=metric,
use_faiss=use_faiss,
)
# the identified indices are relative to the subsetted PCA matrix
# so we need to convert it back to the original row numbers
for i in range(ckdout[1].shape[0]):
for j in range(ckdout[1].shape[1]):
ckdout[1][i, j] = ind_to[ckdout[1][i, j]]
# save the results within the appropriate rows and columns of the structures
col_range = np.arange(
to_ind * neighbors_within_batch, (to_ind + 1) * neighbors_within_batch
)
knn_indices[ind_from[:, None], col_range[None, :]] = ckdout[1]
knn_distances[ind_from[:, None], col_range[None, :]] = ckdout[0]
return knn_distances, knn_indices
def trimming(cnts, trim):
"""
Trims the graph to the top connectivities for each cell. All undescribed input as in
``bbknn.bbknn()``.
Input
-----
cnts : ``CSR``
Sparse matrix of processed connectivities to trim.
"""
vals = np.zeros(cnts.shape[0])
for i in range(cnts.shape[0]):
# Get the row slice, not a copy, only the non zero elements
row_array = cnts.data[cnts.indptr[i] : cnts.indptr[i + 1]]
if row_array.shape[0] <= trim:
continue
# fish out the threshold value
vals[i] = row_array[np.argsort(row_array)[-1 * trim]]
for iter in range(2):
# filter rows, flip, filter columns using the same thresholds
for i in range(cnts.shape[0]):
# Get the row slice, not a copy, only the non zero elements
row_array = cnts.data[cnts.indptr[i] : cnts.indptr[i + 1]]
if row_array.shape[0] <= trim:
continue
# apply cutoff
row_array[row_array < vals[i]] = 0
cnts.eliminate_zeros()
cnts = cnts.T.tocsr()
return cnts
def bbknn(
adata,
batch_key="batch",
use_rep="X_pca",
approx=True,
metric="angular",
copy=False,
**kwargs
):
"""
Batch balanced KNN, altering the KNN procedure to identify each cell's top neighbours in
each batch separately instead of the entire cell pool with no accounting for batch.
Aligns batches in a quick and lightweight manner.
For use in the scanpy workflow as an alternative to ``scanpi.api.pp.neighbors()``.
Input
-----
adata : ``AnnData``
Needs the PCA computed and stored in ``adata.obsm["X_pca"]``.
batch_key : ``str``, optional (default: "batch")
``adata.obs`` column name discriminating between your batches.
neighbors_within_batch : ``int``, optional (default: 3)
How many top neighbours to report for each batch; total number of neighbours
will be this number times the number of batches.
use_rep : ``str``, optional (default: "X_pca")
The dimensionality reduction in ``.obsm`` to use for neighbour detection. Defaults to PCA.
n_pcs : ``int``, optional (default: 50)
How many dimensions (in case of PCA, principal components) to use in the analysis.
trim : ``int`` or ``None``, optional (default: ``None``)
Trim the neighbours of each cell to these many top connectivities. May help with
population independence and improve the tidiness of clustering. The lower the value the
more independent the individual populations, at the cost of more conserved batch effect.
If ``None``, sets the parameter value automatically to 10 times the total number of
neighbours for each cell. Set to 0 to skip.
approx : ``bool``, optional (default: ``True``)
If ``True``, use annoy's approximate neighbour finding. This results in a quicker run time
for large datasets while also potentially increasing the degree of batch correction.
n_trees : ``int``, optional (default: 10)
Only used when ``approx=True``. The number of trees to construct in the annoy forest.
More trees give higher precision when querying, at the cost of increased run time and
resource intensity.
use_faiss : ``bool``, optional (default: ``True``)
If ``approx=False`` and the metric is "euclidean", use the faiss package to compute
nearest neighbours if installed. This improves performance at a minor cost to numerical
precision as faiss operates on float32.
metric : ``str`` or ``sklearn.neighbors.DistanceMetric``, optional (default: "angular")
What distance metric to use. If using ``approx=True``, the options are "angular",
"euclidean", "manhattan" and "hamming". Otherwise, the options are "euclidean",
a member of the ``sklearn.neighbors.KDTree.valid_metrics`` list, or parameterised
``sklearn.neighbors.DistanceMetric`` `objects
<https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html>`_:
>>> from sklearn import neighbors
>>> neighbors.KDTree.valid_metrics
['p', 'chebyshev', 'cityblock', 'minkowski', 'infinity', 'l2', 'euclidean', 'manhattan', 'l1']
>>> pass_this_as_metric = neighbors.DistanceMetric.get_metric('minkowski',p=3)
set_op_mix_ratio : ``float``, optional (default: 1)
UMAP connectivity computation parameter, float between 0 and 1, controlling the
blend between a connectivity matrix formed exclusively from mutual nearest neighbour
pairs (0) and a union of all observed neighbour relationships with the mutual pairs
emphasised (1)
local_connectivity : ``int``, optional (default: 1)
UMAP connectivity computation parameter, how many nearest neighbors of each cell
are assumed to be fully connected (and given a connectivity value of 1)
copy : ``bool``, optional (default: ``False``)
If ``True``, return a copy instead of writing to the supplied adata.
"""
start = logg.info("computing batch balanced neighbors")
adata = adata.copy() if copy else adata
# basic sanity checks to begin
# is our batch key actually present in the object?
if batch_key not in adata.obs:
raise ValueError("Batch key '" + batch_key + "' not present in `adata.obs`.")
# do we have a computed PCA?
if use_rep not in adata.obsm.keys():
raise ValueError(
"Did not find "
+ use_rep
+ " in `.obsm.keys()`. You need to compute it first."
)
# metric sanity checks
if approx and metric not in ["angular", "euclidean", "manhattan", "hamming"]:
logg.warning(
"unrecognised metric for type of neighbor calculation, switching to angular"
)
metric = "angular"
elif not approx and not (
metric == "euclidean"
or isinstance(metric, DistanceMetric)
or metric in KDTree.valid_metrics
):
logg.warning(
"unrecognised metric for type of neighbor calculation, switching to euclidean"
)
metric = "euclidean"
# prepare bbknn_pca_matrix input
pca = adata.obsm[use_rep]
batch_list = adata.obs[batch_key].values
# call BBKNN proper
bbknn_out = bbknn_pca_matrix(
pca=pca, batch_list=batch_list, approx=approx, metric=metric, **kwargs
)
# store the parameters in .uns['neighbors']['params'], add use_rep and batch_key
adata.uns["neighbors"] = {}
adata.uns["neighbors"]["params"] = bbknn_out[2]
adata.uns["neighbors"]["params"]["use_rep"] = use_rep
adata.uns["neighbors"]["params"]["bbknn"]["batch_key"] = batch_key
# store the graphs in .uns['neighbors'] or .obsp, conditional on anndata version
if version.parse(str(anndata.__version__)) < version.parse("0.7.0"):
adata.uns["neighbors"]["distances"] = bbknn_out[0]
adata.uns["neighbors"]["connectivities"] = bbknn_out[1]
logg.info(
" finished",
time=start,
deep=(
"added to `.uns['neighbors']`\n"
" 'distances', distances for each pair of neighbors\n"
" 'connectivities', weighted adjacency matrix"
),
)
else:
adata.obsp["distances"] = bbknn_out[0]
adata.obsp["connectivities"] = bbknn_out[1]
adata.uns["neighbors"]["distances_key"] = "distances"
adata.uns["neighbors"]["connectivities_key"] = "connectivities"
logg.info(
" finished",
time=start,
deep=(
"added to `.uns['neighbors']`\n"
" `.obsp['distances']`, distances for each pair of neighbors\n"
" `.obsp['connectivities']`, weighted adjacency matrix"
),
)
return adata if copy else None
def bbknn_pca_matrix(
pca,
batch_list,
neighbors_within_batch=3,
n_pcs=50,
trim=None,
approx=True,
n_trees=10,
use_faiss=True,
metric="angular",
set_op_mix_ratio=1,
local_connectivity=1,
):
"""
Scanpy-independent BBKNN variant that runs on a PCA matrix and list of per-cell batch assignments instead of
an AnnData object. Non-data-entry arguments behave the same way as ``bbknn.bbknn()``.
Returns a ``(distances, connectivities, parameters)`` tuple, like what would have been stored in the AnnData object.
The connectivities are the actual neighbourhood graph.
Input
-----
pca : ``numpy.array``
PCA (or other dimensionality reduction) coordinates for each cell, with cells as rows.
batch_list : ``numpy.array`` or ``list``
A list of batch assignments for each cell.
"""
# more basic sanity checks/processing
# do we have the same number of cells in pca and batch_list?
if pca.shape[0] != len(batch_list):
raise ValueError(
"Different cell counts indicated by `pca.shape[0]` and `len(batch_list)`."
)
# convert batch_list to np.array of strings for ease of mask making later
batch_list = np.asarray([str(i) for i in batch_list])
# assert that all batches have at least neighbors_within_batch cells in there
unique, counts = np.unique(batch_list, return_counts=True)
if np.min(counts) < neighbors_within_batch:
raise ValueError(
"Not all batches have at least `neighbors_within_batch` cells in them."
)
# metric sanity checks (duplicating the ones in bbknn(), but without scanpy logging)
if approx and metric not in ["angular", "euclidean", "manhattan", "hamming"]:
print(
"unrecognised metric for type of neighbor calculation, switching to angular"
)
metric = "angular"
elif not approx and not (
metric == "euclidean"
or isinstance(metric, DistanceMetric)
or metric in KDTree.valid_metrics
):
print(
"unrecognised metric for type of neighbor calculation, switching to euclidean"
)
metric = "euclidean"
# obtain the batch balanced KNN graph
knn_distances, knn_indices = get_graph(
pca=pca,
batch_list=batch_list,
n_pcs=n_pcs,
n_trees=n_trees,
approx=approx,
metric=metric,
use_faiss=use_faiss,
neighbors_within_batch=neighbors_within_batch,
)
# sort the neighbours so that they're actually in order from closest to furthest
newidx = np.argsort(knn_distances, axis=1)
knn_indices = knn_indices[
np.arange(np.shape(knn_indices)[0])[:, np.newaxis], newidx
]
knn_distances = knn_distances[
np.arange(np.shape(knn_distances)[0])[:, np.newaxis], newidx
]
# this part of the processing is akin to scanpy.api.neighbors()
dist, cnts = compute_connectivities_umap(
knn_indices,
knn_distances,
knn_indices.shape[0],
knn_indices.shape[1],
set_op_mix_ratio=set_op_mix_ratio,
local_connectivity=local_connectivity,
)
# trimming. compute default range if absent
if trim is None:
trim = 10 * knn_distances.shape[1]
# skip trimming if set to 0, otherwise trim
if trim > 0:
cnts = trimming(cnts=cnts, trim=trim)
# create a collated parameters dictionary
# determine which neighbour computation was used, mirroring create_tree() logic
if approx:
computation = "annoy"
elif metric == "euclidean":
if "faiss" in sys.modules and use_faiss:
computation = "faiss"
else:
computation = "cKDTree"
else:
computation = "KDTree"
# we'll have a zero distance for our cell of origin, and nonzero for every other neighbour computed
params = {
"n_neighbors": len(dist[0, :].data) + 1,
"method": "umap",
"metric": metric,
"n_pcs": n_pcs,
"bbknn": {"trim": trim, "computation": computation},
}
return (dist, cnts, params)
def ridge_regression(
adata, batch_key, confounder_key=[], chunksize=1e8, copy=False, **kwargs
):
"""
Perform ridge regression on scaled expression data, accepting both technical and
biological categorical variables. The effect of the technical variables is removed
while the effect of the biological variables is retained. This is a preprocessing
step that can aid BBKNN integration `(Park, 2020) <https://science.sciencemag.org/content/367/6480/eaay3224.abstract>`_.
Alters the object's ``.X`` to be the regression residuals, and creates ``.layers['X_explained']``
with the expression explained by the technical effect.
Input
-----
adata : ``AnnData``
Needs scaled data in ``.X``.
batch_key : ``list``
A list of categorical ``.obs`` columns to regress out as technical effects.
confounder_key : ``list``, optional (default: ``[]``)
A list of categorical ``.obs`` columns to retain as biological effects.
chunksize : ``int``, optional (default: 1e8)
How many elements of the expression matrix to process at a time. Potentially useful
to manage memory use for larger datasets.
copy : ``bool``, optional (default: ``False``)
If ``True``, return a copy instead of writing to the supplied adata.
kwargs
Any arguments to pass to `Ridge <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html>`_.
"""
start = logg.info("computing ridge regression")
adata = adata.copy() if copy else adata
# just in case the arguments are not provided as lists, convert them to such
# as they need to be lists for downstream application
if not isinstance(batch_key, list):
batch_key = [batch_key]
if not isinstance(confounder_key, list):
confounder_key = [confounder_key]
# construct a helper representation of the batch and biological variables
# as a data frame with one row per cell, with columns specifying the various batch/biological categories
# with values of 1 where the cell is of the category and 0 otherwise (dummy)
# and subsequently identify which of the data frame columns are batch rather than biology (batch_index)
# and subset the data frame to just those columns, in np.array form (dm)
dummy = pd.get_dummies(adata.obs[batch_key + confounder_key], drop_first=False)
if len(batch_key) > 1:
batch_index = np.logical_or.reduce(
np.vstack([dummy.columns.str.startswith(x) for x in batch_key])
)
else:
batch_index = np.vstack([dummy.columns.str.startswith(x) for x in batch_key])[0]
dm = np.array(dummy)[:, batch_index]
# compute how many genes at a time will be processed - aiming for chunksize total elements per
chunkcount = np.ceil(chunksize / adata.shape[0])
# make a Ridge with all the **kwargs passed if need be, and fit_intercept set to False
# (as the data is centered). create holders for results
LR = Ridge(fit_intercept=False, **kwargs)
X_explained = []
X_remain = []
# loop over the gene space in chunkcount-sized chunks
for ind in np.arange(0, adata.shape[1], chunkcount):
# extract the expression and turn to dense if need be
X_exp = adata.X[:, | np.int(ind) | numpy.int |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
pyplr.plr
=========
A module to assist with parametrising and plotting pupillary light responses.
@author: jtm
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class PLR:
'''Class to handle data representing a pupil response to a flash of light.
'''
# TODO: add time stuff
def __init__(self, plr, sample_rate, onset_idx, stim_duration):
'''Initialise the PLR data.
Parameters
----------
plr : arraylike
Data representing a pupil response to a flash of light.
sample_rate : int
Frequency at which the data were sampled.
onset_idx : int
Ordinal index matching the onset of the light stimulus.
stim_duration : int
Duration of the light stimlus in seconds.
Returns
-------
None.
'''
self.plr = plr
self.sample_rate = sample_rate
self.onset_idx = onset_idx
self.stim_duration = stim_duration
def velocity_profile(self):
'''Return the velocity profile of the PLR. Assumes the samples are
evenly spaced, which is not the case with Pupil Labs data. Smoothing
and averaging across multiple PLRs should remove cause for concern.
'''
t = 1 / self.sample_rate
return np.diff(self.plr, prepend=np.nan) / t
def acceleration_profile(self):
'''Return the acceleration profile of a PLR. Assumes the samples are
evenly spaced, which is not the case with Pupil Labs data. Smoothing
and averaging across multiple PLRs should remove cause for concern.
'''
t = 1 / self.sample_rate
vel = self.velocity_profile()
return np.diff(vel, prepend=np.nan) / t
def baseline(self):
'''Return the average pupil size between the start of s and onset_idx.
'''
return np.mean(self.plr[0:self.onset_idx])
def pupil_size_at_onset(self):
'''Return pupil size at stimulus onset.
'''
return self.plr[self.onset_idx]
def latency_idx_a(self):
'''Return the index where pupil size passes 1% change from size at
light onset.
'''
b = self.pupil_size_at_onset()
threshold = b - (b * .01)
lidx = np.argmax(self.plr[self.onset_idx:] < threshold)
lidx += self.onset_idx
return lidx
def latency_idx_b(self):
'''Return the index of peak negative acceleration in the second after
light onset.
'''
acc = self.acceleration_profile()
lidx = | np.argmin(acc[self.onset_idx:self.onset_idx + self.sample_rate]) | numpy.argmin |
import tensorflow as tf
import numpy as np
import random
import cv2
import os
import PySimpleGUI as sg
import dlib
class tf_flags_StopSave():
def __init__(self, stopped=False, save=False):
self.stopped = stopped
self.save = save
class StopTrainingOnWindowCloseAndPause(tf.keras.callbacks.Callback):
""" NewCallback descends from Callback
"""
def __init__(self, window, tf_flags):
""" Save params in constructor
"""
self.window = window
self.tf_flags = tf_flags
def on_train_batch_end(self, batch, logs=None):
event, values = self.window.read(0)
if event == "Exit" or event == sg.WIN_CLOSED or event == '-CANCEL_B-' or event == '-SAVE-':
self.model.stop_training = True
if event == '-CANCEL_B-':
self.tf_flags.stopped = True
if event == '-SAVE-':
self.tf_flags.stopped = True
self.tf_flags.save = True
class F1_Score(tf.keras.metrics.Metric):
def __init__(self, name='f1_score', **kwargs):
super().__init__(name=name, **kwargs)
self.f1 = self.add_weight(name='f1', initializer='zeros')
self.precision_fn = tf.keras.metrics.Precision(thresholds=0.5)
self.recall_fn = tf.keras.metrics.Recall(thresholds=0.5)
def update_state(self, y_true, y_pred, sample_weight=None):
p = self.precision_fn(y_true, y_pred)
r = self.recall_fn(y_true, y_pred)
# since f1 is a variable, we use assign
self.f1.assign(2 * ((p * r) / (p + r + 1e-6)))
def result(self):
return self.f1
def reset_states(self):
# we also need to reset the state of the precision and recall objects
self.precision_fn.reset_states()
self.recall_fn.reset_states()
self.f1.assign(0)
def facial_landmarks(image, predictor):
# image = cv2.imread(filepath)
face_rects = [dlib.rectangle(left=1, top=1, right=len(image) - 1, bottom=len(image) - 1)]
face_landmarks = np.matrix([[p.x, p.y] for p in predictor(image, face_rects[0]).parts()])
return face_landmarks
def conv_block_r9(in_channels, out_channels, pool=False):
inputs = tf.keras.Input((None, None, in_channels))
results = tf.keras.layers.Conv2D(out_channels, kernel_size=(3, 3), padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
if pool: results = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResNet9(**kwargs):
inputs = tf.keras.Input((None, None, 3))
results = conv_block_r9(in_channels=3, out_channels=64)(inputs)
results = conv_block_r9(64, 64, pool=True)(results)
shortcut = conv_block_r9(64, 64, pool=True)(results)
results = conv_block_r9(64, 32)(shortcut)
results = conv_block_r9(32, 64)(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.Dropout(0.5)(results)
shortcut = conv_block_r9(64, 64, pool=True)(results)
results = conv_block_r9(64, 32)(shortcut)
results = conv_block_r9(32, 64)(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.Dropout(0.5)(results)
shortcut = conv_block_r9(64, 64, pool=True)(results)
results = conv_block_r9(64, 32)(shortcut)
results = conv_block_r9(32, 64)(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.Dropout(0.5)(results)
results = tf.keras.layers.MaxPool2D(pool_size=(6, 6))(results)
results = tf.keras.layers.Flatten()(results)
return tf.keras.Model(inputs=inputs, outputs=results, **kwargs)
def EmotionsRN9():
inputs = tf.keras.Input((197, 197, 3))
results = ResNet9(name='resnet9')(inputs)
results = tf.keras.layers.Dense(7, activation=tf.keras.activations.softmax)(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResnetBlock(in_channels, out_channels, down_sample=False):
inputs = tf.keras.Input((None, None, in_channels)) # inputs.shape = (batch, height, width, in_channels)
if down_sample:
shortcut = tf.keras.layers.Conv2D(out_channels, kernel_size=(1, 1), strides=(2, 2), padding='same')(inputs)
shortcut = tf.keras.layers.BatchNormalization()(shortcut)
else:
shortcut = inputs
results = tf.keras.layers.Conv2D(out_channels, kernel_size=(3, 3), strides=(2, 2) if down_sample else (1, 1),
padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
results = tf.keras.layers.Conv2D(out_channels, kernel_size=(3, 3), strides=(1, 1), padding='same')(results)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.ReLU()(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResNet18(**kwargs):
inputs = tf.keras.Input((None, None, 3))
results = tf.keras.layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
results = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 128, down_sample=True)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 256, down_sample=True)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 512, down_sample=True)(results)
results = ResnetBlock(512, 512)(results)
results = tf.keras.layers.GlobalAveragePooling2D()(results) # results.shape = (batch, 512)
return tf.keras.Model(inputs=inputs, outputs=results, **kwargs)
def EmotionsRN18():
inputs = tf.keras.Input((197, 197, 3))
results = ResNet18(name='resnet18')(inputs)
results = tf.keras.layers.Dense(7, activation=tf.keras.activations.softmax)(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResNet34(**kwargs):
inputs = tf.keras.Input((None, None, 3))
results = tf.keras.layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
results = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 128, down_sample=True)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 256, down_sample=True)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 512, down_sample=True)(results)
results = ResnetBlock(512, 512)(results)
results = ResnetBlock(512, 512)(results)
results = tf.keras.layers.GlobalAveragePooling2D()(results) # results.shape = (batch, 512)
return tf.keras.Model(inputs=inputs, outputs=results, **kwargs)
def EmotionsRN34():
inputs = tf.keras.Input((197, 197, 3))
results = ResNet34(name='resnet34')(inputs)
results = tf.keras.layers.Dense(7, activation=tf.keras.activations.softmax)(results)
return tf.keras.Model(inputs=inputs, outputs=results)
# def facial_landmarks(image, predictor):
# # image = cv2.imread(filepath)
# face_rects = [dlib.rectangle(left=1, top=1, right=len(image) - 1, bottom=len(image) - 1)]
# face_landmarks = np.matrix([[p.x, p.y] for p in predictor(image, face_rects[0]).parts()])
# return face_landmarks
# predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
def load_filenames(directory):
emotions_dict = {"anger": 0, "disgust": 1, "fear": 2, "happiness": 3, "neutrality": 4, "sadness": 5, "surprise": 6}
samples = []
for emotion in emotions_dict:
path = directory + "/" + emotion
for file in os.listdir(path):
if file.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp')):
filepath = path + "/" + file
emotion_label = emotions_dict[emotion]
samples.append([filepath, emotion_label])
return samples
def rotate_image(image, deg):
rows, cols, c = image.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
return image
def generator(samples, aug=False, batch_size=32, shuffle_data=True, resize=197, window=None):
"""
Yields the next training batch.
Suppose `samples` is an array [[image1_filename,label1], [image2_filename,label2],...].
"""
num_samples = len(samples)
while True: # Loop forever so the generator never terminates
random.shuffle(samples)
# Get index to start each batch: [0, batch_size, 2*batch_size, ..., max multiple of batch_size <= num_samples]
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = samples[offset:offset + batch_size]
# Initialise X_train and y_train arrays for this batch
X1 = []
# X2 = []
y = []
# For each example
for batch_sample in batch_samples:
# Load image (X) and label (y)
img_path = batch_sample[0]
label = batch_sample[1]
img = cv2.imread(img_path)
img = cv2.resize(img, (resize, resize))
if aug: # augumentations
img = rotate_image(img, random.uniform(-10, 10))
# features = facial_landmarks(img, predictor)
img = img / 255
onehot = [0 for i in range(7)]
onehot[label] += 1
# apply any kind of preprocessing
# Add example to arrays
X1.append(img)
# X2.append(features)
y.append(onehot)
# Make sure they're numpy arrays (as opposed to lists)
X1 = np.array(X1)
# X2 = np.array(X2)
y = np.array(y)
if window:
print('', end='')
window.refresh()
# The generator-y part: yield the next training batch
# yield [X1, X2], y
yield X1, y
def old_generator(samples, predictor, aug=False, batch_size=32, shuffle_data=True, resize=197, window=None):
"""
Yields the next training batch.
Suppose `samples` is an array [[image1_filename,label1], [image2_filename,label2],...].
"""
num_samples = len(samples)
while True: # Loop forever so the generator never terminates
random.shuffle(samples)
# Get index to start each batch: [0, batch_size, 2*batch_size, ..., max multiple of batch_size <= num_samples]
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = samples[offset:offset + batch_size]
# Initialise X_train and y_train arrays for this batch
X1 = []
X2 = []
y = []
# For each example
for batch_sample in batch_samples:
# Load image (X) and label (y)
img_path = batch_sample[0]
label = batch_sample[1]
img = cv2.imread(img_path)
img = cv2.resize(img, (resize, resize))
if aug: # augumentations
img = rotate_image(img, random.uniform(-10, 10))
features = facial_landmarks(img, predictor)
img = img / 255
onehot = [0 for i in range(7)]
onehot[label] += 1
# apply any kind of preprocessing
# Add example to arrays
X1.append(img)
X2.append(features)
y.append(onehot)
# Make sure they're numpy arrays (as opposed to lists)
X1 = np.array(X1)
X2 = np.array(X2)
y = | np.array(y) | numpy.array |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: lab3_Shadings
Description :
Author : zdf's desktop
date: 2019/2/24
-------------------------------------------------
Change Activity:
2019/2/24:23:27
-------------------------------------------------
"""
import numpy as np
import random
from pyglet.gl import *
class Edge:
def __init__(self):
self.ymax = 0
self.ymin = 0
self.xmin = 0
self.slope = 0
class ScreenVertex:
def __init__(self, x, y):
self.x = x
self.y = y
class Pixel:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class Vertex:
def __init__(self, cord, normal=None):
if normal is None:
normal = []
self.cord = cord
self.normal = normal
class Polygon:
def __init__(self):
self.vertex_num = 0
self.vertex_list = []
self.main_color = (255, 255, 255)
self.edge_table = []
self.pixel_list = [] # all pixels inside this polygon(including vertex)
self.normal = np.zeros(3, dtype=np.float64) # the normal vector of polygon
def print(self):
"""
Print attributes of polygon
:return: None
"""
print("Vertex Number:", self.vertex_num)
for i, v in enumerate(self.vertex_list):
print("Vertex", i, ":", v)
print(self.main_color)
print(self.edge_table)
print(self.pixel_list)
print(self.normal)
class Model:
"""
Model contains data of vertices and polygons
"""
def __init__(self):
self.raw_vertex = []
self.raw_polygon = []
self.final_vertex = [] # ScreenVertex
self.final_polygon = [] # viewing polygon under current observer after backface-culling
# self.visible_vertex = [] # viewing vertices under current observer after backface-culling
def load_model(self, path):
"""
load raw data from file to a model
:param path: the data path
:return: None
"""
with open(path) as f:
read_data = f.read()
data = read_data.replace("\t", " ").replace("data", "").split("\n")
try:
data.remove("")
except Exception:
pass
temp = data[0].split(" ")
temp = [x for x in temp if x] # filter the duplicated empty element.
point_number = temp[0]
data = data[1:] # trim the first data
for i, val in enumerate(data):
if i < int(point_number):
self.raw_vertex.append(val.split(" "))
else:
self.raw_polygon.append(val.split(" "))
for i, v in enumerate(self.raw_vertex):
temp = [float(x) for x in v if x]
temp.append(1)
v = Vertex(temp, [])
self.raw_vertex[i] = v
for i, p in enumerate(self.raw_polygon):
temp = [int(x) for x in p if x]
self.raw_polygon[i] = temp
def find_pologon_normal(self):
for p in self.final_polygon:
p.normal = np.zeros(3, dtype=np.float64)
v1 = self.raw_vertex[p.vertex_list[0] - 1].cord
v2 = self.raw_vertex[p.vertex_list[1] - 1].cord
v3 = self.raw_vertex[p.vertex_list[2] - 1].cord
print(v1, v2, v3)
v1 = v1[:-1]
v2 = v2[:-1]
v3 = v3[:-1]
e1 = np.subtract(v2, v1)
e2 = np.subtract(v3, v2)
p.normal = np.cross(e1, e2)
p.normal /= np.linalg.norm(p.normal)
print("p.normal = ", p.normal)
def zoom_model(self, amplifier, shift):
for v in self.final_vertex:
v.x = v.x * amplifier + shift
v.y = v.y * amplifier + shift
def create_edge_table(self):
"""
Creating an edge_table for a certain model
:return: None
"""
for p in self.final_polygon:
num = p.vertex_num # vertices number of this polygon
for i in range(0, num):
'''
Warning:
to Visit proper vertex, you need to
add -1 on index since the vertex are labeled start by 1, not 0.
'''
v1 = self.final_vertex[p.vertex_list[i]]
if i == num - 1:
v2 = self.final_vertex[p.vertex_list[0]]
else:
v2 = self.final_vertex[p.vertex_list[i + 1]]
if int(v1.y) == int(v2.y):
# skip the horizontal edge
continue
e = Edge()
e.ymax = int(max(v1.y, v2.y)) # compare Y value of V1 and V2
e.ymin = int(min(v1.y, v2.y))
e.xmin = v1.x if v1.y < v2.y else v2.x # store the x value of the bottom vertex
e.slope = (v1.x - v2.x) / (v1.y - v2.y) # store the edge slope for coherence
e.ymax -= 1 # dealing with vertex-scanline intersection(shorten edges)
p.edge_table.append(e)
def ymin_cmp(edge):
return edge.ymin
p.edge_table.sort(key=ymin_cmp) # sort edge_table by Y value
print("Finished edge_table creation")
def scan_conversion(self):
"""
making a scan conversion on a certain model
:return: None
"""
print("Start Scan conversion...")
for p in self.final_polygon:
AET = [] # Active edge table
if not p.edge_table: # ignoring empty edge_table
continue
ymin = int(p.edge_table[0].ymin) # ymin value among all edges
ymax = int(max(node.ymax for node in p.edge_table)) # ymax value among all edges
for scanY in range(ymin, ymax + 1): # scanline Y value
for e in p.edge_table:
if e.ymin == scanY: # put edge into AET which intersect with current scanline
AET.append(e)
elif e.ymin > scanY: # already finished since ET are pre-sorted
break
def x_cmp(edge):
return edge.xmin
AET.sort(key=x_cmp) # re-sort AET by X value
for i in range(len(AET) // 2):
for j in range(int(AET[i].xmin), int(AET[i + 1].xmin)):
# for each intersections between scanline and edge
# store all pixels coordinate between them into a pixel list
pixel = Pixel(j, scanY, 0)
p.pixel_list.append(pixel)
for e in AET:
if e.ymax == scanY: # remove edges that no longer intersect with the next scanline
AET.remove(e)
for e in AET:
e.xmin += e.slope # adjust X value by coherence
AET.sort(key=x_cmp) # re-sort AET by X value
print("Finished Scanline conversion")
def illumination_model(self):
diffuse, specular, ambient = [0, 0, 0], [0, 0, 0], [0, 0, 0]
light_intensity = [0.0, 0.0, 0.0]
light_source = [0.5, 1.0, 0.8] # color of light
# light_source[0] = 0.5
h_vector = np.zeros(3, dtype=np.float64)
light_direction = | np.zeros(3, dtype=np.float64) | numpy.zeros |
"""Functions and classes for creating particle grids, counting bins and transition matrices."""
# Imports
import pandas as pd
import numpy as np
import xarray as xr
import networkx as nx
from scipy.interpolate import griddata
from scipy.spatial import cKDTree, SphericalVoronoi
from scipy import sparse
from astropy.coordinates import cartesian_to_spherical
import matplotlib.pyplot as plt
import cartopy as cart
from datetime import datetime
import warnings
import pickle
import os
try:
import stripy
except ImportError:
print("Stripy is not available on this machine.")
def get_cartesian(lon, lat, R=1, mode='rad', ):
"""
Convert spherical coordinates to cartesian coordinates.
Parameters
----------
lon : float, np.array
longitude coordinate(s)
lat : float, np.array
latitude coordinate(s)
R : int, float
radius of sphere
mode : str
Either 'rad' or 'deg', indicating whether coordinates are supplied
in radians or degrees respectively.
Returns
-------
x, y, z : (float, float, float)
Cartesian coordinates
"""
if mode=='deg':
lat, lon = np.radians(lat), np.radians(lon)
x = R * np.cos(lat) * np.cos(lon)
y = R * np.cos(lat) * np.sin(lon)
z = R *np.sin(lat)
return x, y, z
def lonlat_from_pset(pset, timedelta64=None):
"""
Extract latitude and longitude data from particleSet.
Parameters
----------
pset : str
string with path to ``parcels.ParticleSet`` output file
timedelta64 : np.timedelta64
relative timestamp to load data from pset at, relative to start time
Returns
-------
lonlat_init
np.array with initial longitude-latitude pairs
lonlat_final
np.array with final longitude-latitude pairs
"""
ds = xr.open_dataset(pset)
lons = ds['lon'].data
lats = ds['lat'].data
ids = ds['traj'].data
times = ds['time'].data
if np.any(np.diff(times[:,0]).astype(bool)):
warnings.warn("Not all starting times are equal. Behaviour may not be as expected.", Warning)
if timedelta64:
# Determine which trajectory idx to use for searchsorted,
# since it must contain timestamps in the last index.
firstFullTrajectoryIdx = np.searchsorted(~np.isnat(times[:, -1]), True)
# Find index at which trajectories shoudl be investigated
final_tidx = np.searchsorted(times[firstFullTrajectoryIdx,:],
times[firstFullTrajectoryIdx,0] + timedelta64)
if final_tidx == times.shape[1]:
warnings.warn("`final_tidx` lies outside of time window. Choosing last index instead", Warning)
final_tidx = times.shape[1]-1
else:
final_tidx = times.shape[1]-1
lonlatInit = np.dstack((lons[:,0], lats[:,0]))
lonlatFinal = np.dstack((lons[:,final_tidx], lats[:, final_tidx]))
ds.close()
return lonlatInit, lonlatFinal
class particles:
"""
Basic instance of particles object has lists holding the latitudes and longitudes of its points.
Attributes
----------
lats : np.array
list of latitudes (in degrees)
lons : np.array
list of longitudes (in degrees)
lonlat : np.ndarray
2D array holding pairs of latitude and longitude of each particle
n : int
number of gridpoints
idx : np.ndarray
index of each gridpoint
_releaseTime : datetime
release time of particles
"""
def __init__(self, lons, lats, idx = None, releaseTime = None):
assert len(lats) == len(lons), "lats and lons should be of equal size"
self._releaseTime = releaseTime
self.lons = lons
self.lats = lats
self.lonlat = np.dstack((lons, lats)) #First axis corresponds to time
# idx should not be updated since this makes triangle points harder to track down
if idx:
self.idx = idx
else:
self.idx = np.arange(self.n)
@property
def n(self):
"""
Returns
-------
int
number of particles
"""
return self.lonlat.shape[1]
@property
def releaseTimes(self):
"""
Returns
-------
list
Release times of particles
"""
if self._releaseTime:
return [self._releaseTime for part in range(self.n)]
else:
pass
@classmethod
def from_regular_grid(cls, nlon, nlat, minLat=60., maxLat=90., minLon=-180, maxLon=180, **kwargs):
"""
Grid construction by dividing latitude and longitude ranges into a discrete amount of points.
Parameters
----------
nlat : int
number of latitudes.
nlon : int
number of longitudes.
minLat : float
minimum latitude of grid (southern boundary)
maxLat : float
maximum latitude of grid (northern boundary)
minLon : float
minimum longitude of grid (western boundary)
maxLon : float
maximum longitude of grid (eastern boundary)
"""
lonRange = np.linspace(minLon, maxLon, nlon)
latRange = np.linspace(minLat, maxLat, nlat)
lon2D, lat2D = np.meshgrid(lonRange, latRange)
return cls(lon2D.flatten(), lat2D.flatten(), **kwargs)
@classmethod
def from_pickle(cls, pickFile, lonKey='lons', latKey='lats', **kwargs):
"""
Load longitudes and latitudes of particles from pickled dictionary
Parameters
----------
pickFile : str
Path to pickled dictionary
lonKey : str
Key for longitudes in dictionary
latKey : str
Key for latitudes in dictionary
"""
with open(pickFile, 'rb') as pickFile:
lonlat_dict = pickle.load(pickFile)
return cls(lonlat_dict[lonKey], lonlat_dict[latKey], **kwargs)
def remove_on_land(self, fieldset):
"""
Uses the fieldset.landMask to remove particles that are located on land (where u, v == 0 or -1)
Parameters
----------
fieldset : Parcels.FieldSet
should have a landMask attribute (created by fieldSetter)
"""
nBefore = self.n
# Load landmask and initialize mask for particles on land
landMask = fieldset.landMask
try:
landMask = landMask.compute()
except AttributeError:
pass
# Use scipy.interpolate.griddata to have particles adopt value of landmask from nearest neighbor
if fieldset.U.grid.lon.ndim == 1:
mesh = np.meshgrid(fieldset.U.grid.lon, fieldset.U.grid.lat)
lonlatMask = griddata(np.dstack((mesh[0].flatten(),
mesh[1].flatten()))[0,:,:],
landMask.flatten(),
self.lonlat[0,:,:],
method='nearest')
else:
lonlatMask = griddata(np.dstack((fieldset.U.grid.lon.flatten(),
fieldset.U.grid.lat.flatten()))[0,:,:],
landMask.flatten(),
self.lonlat[0,:,:],
method='nearest')
self.lonlat = self.lonlat[:, ~lonlatMask, :]
self.lons = self.lonlat[0, :, 0]
self.lats = self.lonlat[0, :, 1]
nAfter = self.n
self.removedParticleCount = nBefore - nAfter
def add_advected_from_pset(self, *args, **kwargs):
"""
Add final particle locations by loading them from a pset. See `lonlat_from_pset()`.
"""
lonlatFinal = lonlat_from_pset(pset, *args, **kwargs)[1]
self.lonlat = np.concatenate((self.lonlat, lonlat_final), axis=0)
def show(self, tindex = 0, export = None, projection=None, **kwargs):
"""
Create a plot of the particle locations in particles object.
Parameters
----------
tindex : int
Index of lonlat pairs (0 is initial, 1 is final).
export : str
Name of exported figure. A directory 'figures' is created.
projection : cartopy.crs
Projection for showing particle set on.
"""
fig = plt.figure()
if projection:
ax = plt.axes(projection = projection)
else:
ax = plt.axes(projection = cart.crs.PlateCarree())
ax.scatter(self.lonlat[tindex, :, 0], self.lonlat[tindex, :, 1], transform = cart.crs.Geodetic(), **kwargs)
ax.add_feature(cart.feature.COASTLINE)
if export:
if not os.path.exists('figures'):
os.makedirs('figures')
if export[-4] == '.':
plt.savefig(f'figures/{export}', dpi=300)
else:
plt.savefig(f'figures/{export}.png', dpi=300)
return ax
class countBins:
"""
Bins used for counting particles.
Attributes
----------
binType : str
Indicates the type of bin: `regular` or `hexagonal`
"""
def load_communities(self, comFile):
"""
Load communities determined by a community detection algorithm on a regular grid
Parameters
----------
comFile : str
Filename of community file
"""
with open(comFile) as cluFile:
clu = cluFile.read().split('\n')
self.codelength = float(clu[0].split(' ')[3])
header = clu[1].split(' ')[1:]
body = [line.split(' ') for line in clu[2:] if line is not '']
self.communityDF = pd.DataFrame(body, columns=header).astype({"node" : 'int',
"module" : 'int',
"flow" : 'float' }).set_index("node")
communityID = -np.ones(self.n, dtype=int)
communityFlow = -np.ones(self.n, dtype=float)
if hasattr(self, 'oceanMask'):
bindex = self.bindex[self.oceanMask]
else:
bindex = self.bindex
assert self.communityDF.shape[0] == bindex.shape[0], "Number of nodes in `.clu` file must equal the amount of non-empty bins."
for index, row in self.communityDF.iterrows():
# -1 because counting in clu file starts at 1
communityID[bindex[index-1]] = row['module'].astype(int)
communityFlow[bindex[index-1]] = row['flow']
self.communityID = np.ma.masked_equal(communityID, -1)
self.communityFlow = np.ma.masked_equal(communityFlow, -1)
self.communityIdx = {}
for community in np.unique(self.communityID).data[np.unique(self.communityID).data != -1]:
self.communityIdx[community] = {'bindex' : {},
'oceanBindex' : {}}
self.communityIdx[community]['bindex'] = self.bindex[self.communityID == community]
nonZeroCommunityID = self.communityID.data[self.communityID.data != -1]
self.communityIdx[community]['oceanBindex'] = np.arange(nonZeroCommunityID.size)[nonZeroCommunityID == community]
def color_communities(self, colors=4):
"""Associate new colors to existing communities by using graph coloring.
Returns
-------
np.array
Array containing new community IDs, corresponding to different colors.
"""
try:
self.communityNetwork = nx.Graph()
for community in self.adjacencyDict:
for neighbor in self.adjacencyDict[community]:
self.communityNetwork.add_edge(community, neighbor)
# Remove self-loops
self.communityNetwork.remove_edges_from(self.communityNetwork.selfloop_edges())
except NameError:
raise RuntimeError('The counting grid does not yet have an adjacency dictionary for determining the coloring of communities. Try calling the `find_adjacency()` method first.')
self.colorMapping = nx.coloring.greedy_color(self.communityNetwork, strategy='largest_first')
colorID = -np.ones(self.n, dtype=int)
maxColor = max(list(self.colorMapping.values()))
for i in range(len(self.communityID.flatten())):
if not np.ma.is_masked(self.communityID.flatten()[i]):
colorID[i] = self.colorMapping.get(self.communityID.flatten()[i], maxColor + 1)
self.colorID = np.ma.masked_equal(colorID, -1).reshape(self.communityID.shape)
return self.colorID
def calculate_coherence_ratio(self, transMat):
"""
Calculate the coherence ratio for each community.
Parameters
----------
transMat : transMat
Transition matrix to calculate coherence ratios.
Returns
-------
np.array
Array containing the coherence ratio of the community that each bin is part of.
"""
self.coherenceRatioDict = {}
for community, idx in self.communityIdx.items():
denom = np.sum(np.sum(transMat.counter[idx["oceanBindex"], :], axis=1))
if denom != 0:
self.coherenceRatioDict[community] = np.sum(np.sum(transMat.counter[idx["oceanBindex"], :][:, idx["oceanBindex"]], axis=1))/denom
coherenceRatio = -np.ones(self.n, dtype=float)
for i in range(len(self.communityID.flatten())):
if not np.ma.is_masked(self.communityID.flatten()[i]):
coherenceRatio[i] = self.coherenceRatioDict.get(self.communityID.flatten()[i], -1)
self.coherenceRatio = np.ma.masked_equal(coherenceRatio, -1).reshape(self.communityID.shape)
return self.coherenceRatio
def calculate_global_coherence(self):
"""
Calculate the global coherence ratio, which is a weighted average of the coherence ratio.
Returns
-------
float
Global coherence ratio
"""
assert hasattr(self, "coherenceRatio"), "Coherence ratios must be calculated first. Try calling the `calculate_coherence_ratio(transMat)` method."
# Note that this is self.coherenceRatio[~self.coherenceRatio.mask] provides values for each bin, such that the mean of this is a weighted average.
self.globalCoherenceRatio = self.coherenceRatio[~self.coherenceRatio.mask].mean()
return self.globalCoherenceRatio
def calculate_mixing(self, transMat):
"""
Parameters
----------
transMat : transMat
Transition matrix to calculate mixing parameters.
Returns
-------
np.array
Array containing the mixing parameter of the community that each bin is part of.
"""
self.mixingDict = {}
for community, idx in self.communityIdx.items():
subsetCounter = transMat.counter[idx["oceanBindex"], :][:, idx["oceanBindex"]]
subsetSums = np.tile(subsetCounter.sum(axis=1), (subsetCounter.shape[1],1)).T
R = np.divide(subsetCounter, subsetSums, out=np.zeros_like(subsetSums), where=subsetSums!=0)
if R.shape[0] > 1:
self.mixingDict[community] = np.divide(-np.sum(R*np.log(R, out=np.zeros_like(R), where=R!=0)), R.shape[0]*np.log(R.shape[0]))
else:
self.mixingDict[community] = -1
mixing = -np.ones(self.n, dtype=float)
for i in range(len(self.communityID.flatten())):
if not np.ma.is_masked(self.communityID.flatten()[i]):
mixing[i] = self.mixingDict[self.communityID.flatten()[i]]
self.mixing = np.ma.masked_equal(mixing, -1).reshape(self.communityID.shape)
return self.mixing
def calculate_global_mixing(self):
"""
Calculate the global mixing parameter, which is a weighted average version of the mixing parameter.
Returns
-------
float
Global mixing parameter
"""
assert hasattr(self, "mixing"), "Coherence ratios must be calculated first. Try calling the `calculate_coherence_ratio(transMat)` method."
# Note that this is self.mixing[~self.mixing.mask] provides values for each bin, such that the mean of this is a weighted average.
self.globalMixing = self.mixing[~self.mixing.mask].mean()
return self.globalMixing
def nComs(self, cutoff=0):
"""
Returns number of communities larger than `cutoff`.
"""
counts = np.unique(self.communityID, return_counts=True)[2]
return np.sum(counts > cutoff)
class regularCountBins(countBins):
def __init__(self, nlon, nlat, minLat=60., maxLat=90., minLon=-180, maxLon=180, **kwargs):
"""
Grid construction by dividing latitude and longitude ranges into a discrete amount of points.
Parameters
----------
nlat : int
number of latitudes.
nlon : int
number of longitudes.
minLat : float
minimum latitude of grid (southern boundary)
maxLat : float
maximum latitude of grid (northern boundary)
minLon : float
minimum longitude of grid (western boundary)
maxLon : float
maximum longitude of grid (eastern boundary)
"""
self.binType = 'regular'
dlat = (maxLat - minLat)/nlat
dlon = (maxLon - minLon)/nlon
lonOffset = dlon/2
latOffset = dlat/2
self.lonBounds = np.linspace(minLon, maxLon, nlon+1)
self.latBounds = np.linspace(minLat, maxLat, nlat+1)
lonCenters = np.linspace(minLon + lonOffset, maxLon - lonOffset, nlon)
latCenters = np.linspace(minLat + latOffset, maxLat - latOffset, nlat)
self.lonCenters2D, self.latCenters2D = np.meshgrid(lonCenters, latCenters)
self.lonIdx2D, self.latIdx2D = np.meshgrid(np.arange(nlon), np.arange(nlat))
self.gridShape = self.lonIdx2D.shape
self.bindex = (np.arange(len(self.lonIdx2D.flatten())))
self.bindex2D = self.bindex.reshape(self.gridShape)
@property
def n(self):
"""
Returns
-------
int
number of particles
"""
return len(self.bindex)
def particle_count(self, particles, tindex=0):
"""
Returns
-------
array
number of particles per bin
"""
count = np.histogram2d(particles.lonlat[tindex,:,0], particles.lonlat[tindex,:,1], bins=[self.lonBounds, self.latBounds])[0]
if tindex == 0:
self.initCount = count
return count
def find_adjacency(self, mode='Neumann'):
"""
Create an adjacency list: for each node (grid cell), determine which nodes are bordering this node.
Parameters
----------
mode : string
Either 'Neumann' or 'Moore'. Indicates the pixel neighborhood used for determining
neighbors. The Von Neumann neighborhood only considers pixels touching the edges to
be neighbors, while the Moore neighborhood also considers pixels touching the
corners.
Returns
-------
dict
Containing keys corresponding to community IDs and values being `set` objects
containing IDs of bordering communities.
"""
assert self.binType == "regular", "Bin type must be regular."
# Construct empty adjacency dictionary
# Using dictionary so that labels coincide labels created by InfoMap, rather than being
# indices, which might not coincide with labels.
communityID2D = self.communityID.reshape(self.gridShape)
self.adjacencyDict = {}
# Iterate over all cells
for i in range(self.gridShape[0]):
for j in range(self.gridShape[1]):
# Save current community in variable
currentCommunity = int(communityID2D[i,j])
# If the current community doesn't have a key and value yet, add an empty
# set to the dictionary, with the key being the community ID.
if currentCommunity not in self.adjacencyDict:
self.adjacencyDict[currentCommunity] = set()
self.adjacencyDict[currentCommunity].add(int(communityID2D[i, j+1//self.gridShape[1]]))
self.adjacencyDict[currentCommunity].add(int(communityID2D[i, j-1]))
# Careful at northern and southern boundaries.
if i<self.gridShape[0]-1:
self.adjacencyDict[currentCommunity].add(int(communityID2D[i+1, j]))
if mode == 'Moore':
self.adjacencyDict[currentCommunity].add(int(communityID2D[i+1, j+1//self.gridShape[1]]))
self.adjacencyDict[currentCommunity].add(int(communityID2D[i+1, j-1]))
if i>0:
self.adjacencyDict[currentCommunity].add(int(communityID2D[i-1, j]))
if mode == 'Moore':
self.adjacencyDict[currentCommunity].add(int(communityID2D[i-1, j+1//self.gridShape[1]]))
self.adjacencyDict[currentCommunity].add(int(communityID2D[i-1, j-1]))
return self.adjacencyDict
class hexCountBins(countBins):
"""
Basic instance of hexagonal counting bins.
Hexagons are generated from the Voronoi diagram of refined icosahedral grids.
Attributes
----------
bintype : str
Type of bin ('hexagonal')
points : np.array
(N x 3) dimensional array containing cartesian (x, y, z)
coordinates of the vertices of the generating triangulation.
lons : np.array
N longitudes (degrees) of vertices of the generating triangulation.
lats : np.array
N latitudes (degrees) of vertices of the generating triangulation
vertexIndices : np.array
N indices of the vertices
simplices : np.array
N dimensional array holding tuples of length 3, holding the indices
of the vertices of each triangle in the generating triangulation
"""
def __init__(self, points, lons, lats, vertexIndices, simplices):
"""
Basic instance of hexagonal counting bins. Hexagons should be composed of 6 triangles
(5 in case of pentagon)
Parameters
----------
points : np.array
(N x 3) dimensional array containing cartesian (x, y, z)
coordinates of the vertices of the generating triangulation.
lons : np.array
N longitudes (degrees) of vertices of the generating triangulation.
lats : np.array
N latitudes (degrees) of vertices of the generating triangulation
vertexIndices : np.array
N indices of the vertices
simplices : np.array
N dimensional array holding tuples of length 3, holding the indices
of the vertices of each triangle in the generating triangulation
"""
self.binType = 'hexagonal'
self.points = points
self.lons = lons
self.lats = lats
self.vertexIndices = vertexIndices
self.simplices = simplices
@property
def n(self):
"""
Returns
-------
Number of points in the triangulation
"""
return self.bindex.shape[0]
@property
def bindex(self):
"""
Returns
-------
Indices of counting bins
"""
if hasattr(self, "rimBindex") and hasattr(self, "hexBindex"):
return np.concatenate((self.hexBindex, self.shiftedRimBindex))
elif not hasattr(self, "rimBindex") and hasattr(self, "hexBindex"):
return self.hexBindex
else:
raise Exception("Bins do not have attribute `hexBindex`. \n"
+ "Have you calculated the Voronoi diagram yet? \n"
+ "Try calling method `calculate_voronoi()`.")
@classmethod
def from_stripy(cls, refinement):
"""
Create a hexCountBins` instance through a spherical
icosahedral mesh obtained using `stripy`.
Parameters
----------
refinement : int
Refinement level of the mesh. The mesh is recursively refined through bisection
of the edges
Returns
-------
bins : hexCountBins
hexCountBins instance
"""
try:
ico = stripy.spherical_meshes.icosahedral_mesh(refinement_levels = refinement)
bins = cls(ico.points,
np.degrees(ico.lons),
np.degrees(ico.lats),
ico._permutation,
ico.simplices)
return bins
except NameError:
raise NameError("Has the `stripy` module been imported?")
def calculate_neighbors(self):
"""
Create a dictionary with indices of neighbors for each vertex (key)
in the generating triangulation.
"""
self.neighbors = {}
for vertex in self.vertexIndices:
self.neighbors[vertex] = set()
# For each simplex, fill dictionary with simplex and neighbor information
for simplex in self.simplices:
self.neighbors[simplex[0]].add(simplex[1])
self.neighbors[simplex[0]].add(simplex[2])
self.neighbors[simplex[1]].add(simplex[0])
self.neighbors[simplex[1]].add(simplex[2])
self.neighbors[simplex[2]].add(simplex[0])
self.neighbors[simplex[2]].add(simplex[1])
def calculate_voronoi(self, mask = None, innerMaskLevel=0, outerMaskLevel = 0):
"""
Calculate a voronoi diagram from the generating triangulation.
Uses the `scipy.spatial.SphericalVoronoi()` function.
Parameters
----------
mask : hexMask
Mask to apply to the points used for generating the diagram.
This can significantly reduce calculation times if the generating,
triangulation is fine, as only a small number of generating point
can be selected
innerMaskLevel : int
Mask level used to calculate the Voronoi mask, which in turn is
used to select which binCounts to return
outerMaskLevel : int
Mask level used for selecting generator vertices that will be
used in the Voronoi diagram
"""
self.innerMaskLevel = innerMaskLevel
self.outerMaskLevel = outerMaskLevel
self.mask = mask
# Calculate voronoi diagram
if mask:
self.sv = SphericalVoronoi(self.points[mask[outerMaskLevel]])
else:
self.sv = SphericalVoronoi(self.points)
# Sort the vertices of each region so that they are clockwise with respect to the generator
self.sv.sort_vertices_of_regions()
assert self.sv.points.shape[0] == mask.indices[outerMaskLevel].shape[0], \
"Voronoi should contain as many points as there are Trues in the mask."
# Convert the longitudes and latitudes of the generating vertices from cartesian coordinates to spherical
# coordinates in degrees
svTriCenterLats, svTriCenterLons = cartesian_to_spherical(self.sv.points[:, 0], self.sv.points[:, 1], self.sv.points[:, 2])[1:]
self.svTriCenterLats, self.svTriCenterLons = (np.degrees(svTriCenterLats.value), np.degrees(svTriCenterLons.value))
self.svTriCenterLons = np.where(self.svTriCenterLons>180, self.svTriCenterLons-360, self.svTriCenterLons)
# Convert the longitudes and latitudes of the voronoi vertices from cartesian coordinates to spherical
# coordinates in degrees
svVertexLats, svVertexLons = cartesian_to_spherical(self.sv.vertices[:, 0], self.sv.vertices[:, 1], self.sv.vertices[:, 2])[1:]
self.svVertexLats, self.svVertexLons = (np.degrees(svVertexLats.value), np.degrees(svVertexLons.value))
self.svVertexLons = np.where(self.svVertexLons>180, self.svVertexLons-360, self.svVertexLons)
# Create list of voronoi simplices, based only on the generating vertices
# (which may have been masked before using outerMaskLevel)
# Also create a list of their longitudes and latitudes
# by stacking the coordinates of the generator vertices on top of those
# of the Voronoi vertices
svSimplices = []
self.hexBindex = np.arange(self.sv.points.shape[0])
for generatorVertex in self.hexBindex:
region = np.array(self.sv.regions[generatorVertex]) + self.sv.points.shape[0]
nTriangles = len(region)
for t in range(nTriangles):
svSimplices.append([generatorVertex, region[t], region[(t+1)%nTriangles]])
self.svSimplices = np.array(svSimplices)
self.svTriLons = np.hstack((self.svTriCenterLons, self.svVertexLons))
self.svTriLats = np.hstack((self.svTriCenterLats, self.svVertexLats))
assert np.unique(self.svSimplices).max() + 1 == self.svTriLons.shape[0] == self.svTriLats.shape[0], \
"Maximum element of svSimplices must correspond to the last index of svTriLons and svTriLats"
# Create svDomainMask, which is used to select which simplices of generating vertices can be used further
# (simplices with pseudo-'infinite' coordinates can be excluded this way)
self.svDomainMask = mask.mask[innerMaskLevel][mask.indices[outerMaskLevel]][self.svSimplices[:, 0]]
assert self.svDomainMask.shape[0] == self.svSimplices.shape[0], \
"Mask size should match svSimplices size"
def calculate_subsetted_neighbors(self):
"""
Create a dictionary with indices of neighbors for each vertex (key)
in the subsetted triangulation and regular rim.
"""
originalIndices = self.mask.indices[self.outerMaskLevel]
transDict = dict(zip(originalIndices, self.hexBindex))
self.subsettedNeighbors = {}
for generatorVertex in self.hexBindex:
self.subsettedNeighbors[generatorVertex] = set()
neighbors = self.neighbors[originalIndices[generatorVertex]]
for n in neighbors:
try:
self.subsettedNeighbors[generatorVertex].add(transDict[n])
except KeyError:
pass
if hasattr(self, "rimBindex"):
for vertex in self.rimBindex:
self.subsettedNeighbors[self.shiftedRimBindex[vertex]] = set()
self.subsettedNeighbors[self.shiftedRimBindex[vertex]].add(self.shiftedRimBindex[(vertex + 1) % self.nLon])
self.subsettedNeighbors[self.shiftedRimBindex[vertex]].add(self.shiftedRimBindex[(vertex - 1) % self.nLon])
if hasattr(self, "oceanMask"):
for vertex in self.bindex:
if not self.oceanMask[vertex]:
del self.subsettedNeighbors[vertex]
else:
for neighbor in list(self.subsettedNeighbors[vertex]):
if not self.oceanMask[neighbor]:
self.subsettedNeighbors[vertex].remove(neighbor)
def create_KDTree(self):
"""
Create a k-dimensional tree of the (masked) generating vertices (used for interpolation),
since interpolation in the voronoi simplices is by definition
equivalent to finding the nearest generating vertex.
"""
if not hasattr(self, 'sv'):
raise RuntimeError("Cannot create KDTree before calculating the (masked) Spherical voronoi division.")
self.tree = cKDTree(self.sv.points)
def query_tree(self, points, **kwargs):
"""
Check if a k-d tree already exist and query it.
Parameters
----------
points : np.array
(m x 3) dimensional array of m points to query the tree with
"""
if not hasattr(self, "tree"):
self.create_KDTree()
return self.tree.query(points)
def particle_count(self, particles, tindex=0):
"""
Create 'histogram' of particles in hexBins.
Parameters
----------
particles : community.particles
Particles to create a histogram with.
tindex : int
Time index of particles.lonlat to determine the count for.
Returns
-------
count : np.array
Array containing the counts per bin index.
"""
if not hasattr(self, "tree"):
self.create_KDTree()
# Convert spherical coordinates of points to cartesian coordinates
xp, yp, zp = get_cartesian(particles.lonlat[tindex, :, 0], particles.lonlat[tindex, :, 1], mode='deg')
# Query the tree to get the closest point
closest = self.tree.query(np.dstack((xp, yp, zp))[0])[1]
# Count particles per bin
vals, counts = np.unique(closest, return_counts=True)
transdict = dict(zip(vals, counts))
# Arange counts in the correct order of bin indices
if hasattr(self, "svDomainMask"):
count = np.array([transdict[i] if i in transdict.keys() else 0 for i in self.bindex])
else:
count = np.array([transdict[i] if i in transdict.keys() else 0 for i in self.bindex])
if tindex == 0:
self.initCount = count
return count
def find_adjacency(self):
"""
Create an adjacency list: for each node (community), determine which nodes are bordering this node.
Also flags all cells that are on borders.
Returns
-------
dict
Containing keys corresponding to community IDs and values being `set` objects
containing IDs of bordering communities.
"""
if not hasattr(self, "subsettedNeighbors"):
self.calculate_subsetted_neighbors()
# Construct empty adjacency dictionary
# Using dictionary so that labels coincide labels created by InfoMap, rather than being
# indices, which might not coincide with labels.
self.adjacencyDict = {}
# Iterate over all cells
for vertex in self.bindex:
# Save current community in variable
if not np.ma.is_masked(self.communityID[vertex]):
currentCommunity = self.communityID[vertex]
# If the current community doesn't have a key and value yet, add an empty
# set to the dictionary, with the key being the community ID.
if currentCommunity not in self.adjacencyDict:
self.adjacencyDict[currentCommunity] = set()
for neighbor in self.subsettedNeighbors[vertex]:
if not np.ma.is_masked(self.communityID[neighbor]):
self.adjacencyDict[currentCommunity].add(self.communityID[neighbor])
return self.adjacencyDict
def flag_on_boundary(self):
"""
Checks whether a cell is on a boundary between two communities.
Returns
-------
np.array
Array with 1 if cell is on a boundary and 0 if it is not
"""
self.onBoundary = np.zeros(self.bindex.shape)
for vertex in self.bindex:
if not np.ma.is_masked(self.communityID[vertex]):
currentCommunity = self.communityID[vertex]
for neighbor in self.subsettedNeighbors[vertex]:
if self.communityID[neighbor] != currentCommunity \
and not np.ma.is_masked(self.communityID[neighbor]):
self.onBoundary[vertex] = 1
return self.onBoundary
def add_regular_rim(self, minLat=59, maxLat=60, minLon=-180, maxLon=180, nLon=360):
"""
Add a rim of regularly spaced bins to the counting grid. Can be used to check where particles exit the domain.
Parameters
----------
minLat : float
minimum latitude of rim (southern boundary)
maxLat : float
maximum latitude of rim (northern boundary)
minLon : float
minimum longitude of rim (western boundary)
maxLon : float
maximum longitude of rim (eastern boundary)
nLon : int
number of bins in longitudinal direction
"""
self.nLon = nLon
self.rimBound = maxLat
dlat = (maxLat - minLat)
dlon = (maxLon - minLon)/nLon
lonOffset = dlon/2
self.rimLonBounds = np.linspace(minLon, maxLon, nLon+1)
self.rimLatBounds = np.linspace(minLat, maxLat, 2)
lonCenters = np.linspace(minLon + lonOffset, maxLon - lonOffset, nLon)
latCenters = np.linspace(minLat + dlat/2, maxLat - dlat/2)
self.rimBindex = np.arange(nLon)
self.shiftedRimBindex = self.rimBindex + self.hexBindex.shape[0]
def oceanMask_from_particles(self, particles, fieldset=None):
"""
Construct a mask of which bins are (partially) in the ocean.
It is constructed by checking which bins have an initial particle count of at least zero,
so it is therefore an approximation. In case of a rimBin, all bins in the rim are flagged
as bins in the ocean, unless a fieldset is provided, in which case it is checked whether the
there is any overlap between the negation of `fieldset.landMask` and the rimBins.
Parameters
----------
particles : comtools.particles
Particles used to test which cells are ocean (i.e. initially contain particles)
fieldset : parcels.FieldSet
Fieldset with landMask attribute to check if the rimBins contain any ocean cells.
"""
initCount = self.particle_count(particles, tindex=0)
self.oceanMask = initCount > 0
if hasattr(self, "shiftedRimBindex"):
self.oceanMask[self.shiftedRimBindex] = True
if fieldset is not None:
minLatIdx = np.searchsorted(fieldset.U.grid.lat, self.rimLatBounds[0])
maxLatIdx = np.searchsorted(fieldset.U.grid.lat, self.rimLatBounds[1])
for i in self.rimBindex:
minLonIdx = np.searchsorted(fieldset.U.grid.lon, self.rimLonBounds[i])
maxLonIdx = np.searchsorted(fieldset.U.grid.lon, self.rimLonBounds[i])
if not np.any(~fieldset.landMask[minLatIdx:(maxLatIdx+1)%fieldset.landMask.shape[0], minLonIdx:(maxLonIdx+1)%fieldset.landMask.shape[1]]):
self.oceanMask[myBins.shiftedRimBindex[i]] = False
return self.oceanMask
def pointToIdx(self, qlon, qlat):
"""
Use nearest neighbor search to return bindex based on longitude and latitude.
Parameters
----------
qlon : int, float, np.array
Longitude to query
qlat : int, float, np.array
Latitude to query
Returns
-------
np.array with indices.
"""
lons = self.lons[self.mask[self.outerMaskLevel]]
lats = self.lats[self.mask[self.outerMaskLevel]]
binPoints = np.dstack((lons, lats))[0]
return griddata(binPoints, self.bindex, np.dstack((qlon, qlat))[0], method="nearest")
class hexMask:
"""
Mask that can be used to determine which generating vertices in hexCountBins are kept,
given certain constraints in latitude and longitude. Mask can 'grow', by adding neighboring
vertices in successive levels
Attributes
----------
mask : dict
For a given level (integer key), contains an array with mask
indices : dict
For a given level (integer key), contains an array with indices of mask that are True
"""
def __init__(self, hexBins, minLon, maxLon, minLat, maxLat, bleed = 0):
"""
Parameters
----------
hexBins : community.hexCountBins
Hexagonal counting bins to create the mask for
minLon : float
Minimum longitude of generating vertices
maxLon : float
Maximum longitude of generating vertices
minLat : float
Minimum latitude of generating vertices
maxLat : float
Maximum latitude of generating vertices
bleed : float
Increase the constraints in latitude and longitude by this margin
"""
if hasattr(hexBins, "neighbors"):
self.hexBins = hexBins
else:
hexBins.calculate_neighbors()
self.hexBins = hexBins
self.minLon = minLon
self.maxLon = maxLon
self.minLat = minLat
self.maxLat = maxLat
self.mask = {}
self.indices = {}
self.mask[0] = np.logical_and(np.logical_and(self.hexBins.lons > minLon - bleed,
self.hexBins.lons < maxLon + bleed),
np.logical_and(self.hexBins.lats > minLat - bleed,
self.hexBins.lats < maxLat + bleed))
# Determine indices of mask
self.indices[0] = np.array([self.hexBins.vertexIndices[i] for i in np.arange(len(self.mask[0])) if self.mask[0][i]])
def growLevel(self):
"""
Grow the mask by one level: mark neighboring vertices as True
"""
currMax = max(self.mask.keys())
self.mask[currMax + 1] = np.copy(self.mask[currMax])
for i in self.indices[currMax]:
for j in self.hexBins.neighbors[i]:
self.mask[currMax + 1][j] = True
self.indices[currMax + 1] = np.array([self.hexBins.vertexIndices[i] for i in np.arange(len(self.mask[currMax + 1])) if self.mask[currMax + 1][i]])
def growToLevel(self, toLevel):
"""
Grow mask to the desired level
"""
currMax = max(self.mask.keys())
if toLevel <= currMax:
print("Level already reached")
else:
while toLevel > currMax:
self.growLevel()
currMax = max(self.mask.keys())
def __getitem__(self,index):
"""
When mask gets indexed, return the mask array for that level.
"""
try:
return self.mask[index]
except:
raise IndexError("Mask growth level not available")
class transMat:
"""
Basic instance of transition matrix object
Attributes
----------
counter : np.array
Square matrix with [i,j] indicating number particles from bin i to bin j
sums : np.array
Square tiled matrix, with all values in row i equal to the number of particles leaving bin i
data : np.array
Actual transition matrix, with [i,j] indicating probability for a particle
from bin i to bin j (`counter` divided by `sums`)
"""
def __init__(self, counter):
"""
Initialize Transition Matrix using a counter matrix.
Counter is a symmetric matrix with [i,j] corresponding to particles from bin i to bin j
Parameters
----------
counter : np.array
Square matrix with [i,j] indicating number particles from bin i to bin j
"""
self.counter = counter
self.sums = np.tile(self.counter.sum(axis=1), (self.counter.shape[1],1)).T
self.data = np.divide(self.counter, self.sums, out=np.zeros_like(self.sums), where=self.sums!=0)
@classmethod
def from_lonlat(cls, lonlatInit, lonlatFinal, countBins, timedelta64 = None, mask = None, **kwargs):
"""
Create transition matrix from initial and final coordinate pairs, given a `countBins`
Parameters
----------
lonlatInit : (n x 2) np.array
Array containing n initial longitude-latitude pairs
lonlatFinal : np.array
Array containing n final longitude-latitude pairs
countBins : comtools.countBins
Grid containing cells on which the transition matrix is to be created.
timedelta64 : np.timedelta64
Timedelta relating to the elapsed time of the particle run for which the transition
matrix is to be determined. Example: np.timedelta64(30,'D') for 30 days.
mask : np.array with booleans
Array that selects bins to include.
Returns
-------
comtools.transmat
Transition matrix object, including attributes `counter` containing particle
tranistions, and `sums` used for normalization.
"""
try:
assert mask.shape == countBins.bindex.shape, "oceanMask must have same shape as bindex"
except NameError:
pass
# Find initial and final counting bin index for each particle
if countBins.binType == 'regular':
# Search for insertion bindex for initial and final lon and lat. -1 because we are using bounds
# so particles will be inserted on the next bindex.
bindexInit = np.dstack((np.searchsorted(countBins.lonBounds, lonlatInit[0,:,0]),
np.searchsorted(countBins.latBounds, lonlatInit[0,:,1])))[0]-1
bindexFinal = np.dstack((np.searchsorted(countBins.lonBounds, lonlatFinal[0,:,0]),
np.searchsorted(countBins.latBounds, lonlatFinal[0,:,1])))[0]-1
elif countBins.binType == 'hexagonal':
if not hasattr(countBins, "tree"):
countBins.create_KDTree()
if hasattr(countBins, "rimBindex"):
outOfBounds = lonlatFinal[0,:,1] < countBins.rimBound
# Convert spherical coordinates to cartesian
xInit, yInit, zInit = get_cartesian(lonlatInit[0,:,0][~outOfBounds], lonlatInit[0,:,1][~outOfBounds], mode='deg')
xFinal, yFinal, zFinal = get_cartesian(lonlatFinal[0,:,0][~outOfBounds], lonlatFinal[0,:,1][~outOfBounds], mode='deg')
# Check which indices are non NaNs (checking only for x, since NaNs in lonlat propagate in x,y,z equally)
noNaNIndices = np.logical_and(~np.isnan(xInit), ~np.isnan(xFinal))
# Find index of containing Voronoi region by querying tree
bindexInitMain = countBins.tree.query(np.dstack((xInit, yInit, zInit))[0][noNaNIndices])[1]
bindexFinalMain = countBins.tree.query(np.dstack((xFinal, yFinal, zFinal))[0][noNaNIndices])[1]
bindexInitRim = np.searchsorted(countBins.rimLonBounds, lonlatInit[0,:,0][outOfBounds])-1 + countBins.hexBindex.shape[0]
bindexFinalRim = np.searchsorted(countBins.rimLonBounds, lonlatFinal[0,:,0][outOfBounds])-1 + countBins.hexBindex.shape[0]
bindexInit = np.concatenate((bindexInitMain, bindexInitRim))
bindexFinal = np.concatenate((bindexFinalMain, bindexFinalRim))
else:
# Convert spherical coordinates to cartesian
xInit, yInit, zInit = get_cartesian(lonlatInit[0,:,0], lonlatInit[0,:,1], mode='deg')
xFinal, yFinal, zFinal = get_cartesian(lonlatFinal[0,:,0], lonlatFinal[0,:,1], mode='deg')
# Check which indices are non NaNs (checking only for x, since NaNs in lonlat propagate in x,y,z equally)
noNaNIndices = np.logical_and(~ | np.isnan(xInit) | numpy.isnan |
#!/usr/bin/env python3
"""this python file contains the implementation of the code of the paper
'Evolved interactions stabilize many coexisting phases in multicomponent fluids'
The modules contains a few global constants, which set parameters of the algorithm as
described in the paper. They typically do not need to be changed. A good entry point
into the code might be to create a random interaction matrix and a random initial
composition using `random_interaction_matrix` and `get_uniform_random_composition`,
respectively. The function `evolve_dynamics` can then be used to evolve Eq. 4 in the
paper to its stationary state, whose composition the function returns. The returned
composition matrix can be fed into `count_phases` to obtain the number of distinct
phases. An ensemble average over initial conditions is demonstrated in the function
`estimate_performance`, which also uses Eq. 5 of the paper to estimate how well the
particular interaction matrix obtains a given target number of phases. Finally,
`run_evolution` demonstrates the evolutionary optimization over multiple generations.
"""
from typing import List, Tuple
import numpy as np
from numba import njit
from scipy import cluster, spatial
DT_INITIAL: float = 1.0 # initial time step for the relaxation dynamics
TRACKER_INTERVAL: float = 10.0 # interval for convergence check
TOLERANCE: float = 1e-4 # tolerance used to decide when stationary state is reached
CLUSTER_DISTANCE: float = 1e-2 # cutoff value for determining composition clusters
PERFORMANCE_TOLERANCE: float = 0.5 # tolerance used when calculating performance
KILL_FRACTION: float = 0.3 # fraction of population that is replaced each generation
REPETITIONS: int = 64 # number of samples used to estimate the performance
def random_interaction_matrix(
num_comp: int, chi_mean: float = None, chi_std: float = 1
) -> np.ndarray:
"""create a random interaction matrix
Args:
num_comp (int): The component count
chi_mean (float): The mean interaction strength
chi_std (float): The standard deviation of the interactions
Returns:
The full, symmetric interaction matrix
"""
if chi_mean is None:
chi_mean = 3 + 0.4 * num_comp
# initialize interaction matrix
chis = np.zeros((num_comp, num_comp))
# determine random entries
num_entries = num_comp * (num_comp - 1) // 2
chi_vals = np.random.normal(chi_mean, chi_std, num_entries)
# build symmetric matrix from this
i, j = np.triu_indices(num_comp, 1)
chis[i, j] = chi_vals
chis[j, i] = chi_vals
return chis
def mutate(
population: List[np.ndarray], mutation_size: float = 0.1, norm_max: float = np.inf
) -> None:
"""mutate all interaction matrices in a population
Args:
population (list): The interaction matrices of all individuals
mutation_size (float): Magnitude of the perturbation
norm_max (float): The maximal norm the matrix may attain
"""
for chis in population:
num_comp = len(chis)
# add normally distributed random number to independent entries
Δchi = np.zeros((num_comp, num_comp))
num_entries = num_comp * (num_comp - 1) // 2
idx = np.triu_indices_from(Δchi, k=1)
Δchi[idx] = np.random.normal(0, mutation_size, size=num_entries)
chis += Δchi + Δchi.T # preserve symmetry
if np.isfinite(norm_max):
# rescale entries to obey limit
norm = np.mean(np.abs(chis[idx]))
if norm > norm_max:
chis *= norm_max / norm
@njit
def get_uniform_random_composition(num_phases: int, num_comps: int) -> np.ndarray:
"""pick concentrations uniform from allowed simplex (sum of fractions < 1)
Args:
num_phases (int): the number of phases to pick concentrations for
num_comps (int): the number of components to use
Returns:
The fractions of num_comps components in num_phases phases
"""
phis = np.empty((num_phases, num_comps))
for n in range(num_phases):
phi_max = 1.0
for d in range(num_comps):
x = np.random.beta(1, num_comps - d) * phi_max
phi_max -= x
phis[n, d] = x
return phis
@njit
def calc_diffs(phis: np.ndarray, chis: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""calculates chemical potential and pressure
Note that we only calculate the parts that matter in the difference
Args:
phis: The composition of all phases
chis: The interaction matrix
Returns:
The chemical potentials and pressures in all phases
"""
phi_sol = 1 - phis.sum()
if phi_sol < 0:
raise RuntimeError("Solvent has negative concentration")
log_phi_sol = np.log(phi_sol)
mu = np.log(phis)
p = -log_phi_sol
for i in range(len(phis)): # iterate over components
val = chis[i] @ phis
mu[i] += val - log_phi_sol
p += 0.5 * val * phis[i]
return mu, p
@njit
def evolution_rate(phis: np.ndarray, chis: np.ndarray = None) -> np.ndarray:
"""calculates the evolution rate of a system with given interactions
Args:
phis: The composition of all phases
chis: The interaction matrix
Returns:
The rate of change of the composition (Eq. 4)
"""
num_phases, num_comps = phis.shape
# get chemical potential and pressure for all components and phases
mus = np.empty((num_phases, num_comps))
ps = np.empty(num_phases)
for n in range(num_phases): # iterate over phases
mu, p = calc_diffs(phis[n], chis)
mus[n, :] = mu
ps[n] = p
# calculate rate of change of the composition in all phases
dc = np.zeros((num_phases, num_comps))
for n in range(num_phases):
for m in range(num_phases):
delta_p = ps[n] - ps[m]
for i in range(num_comps):
delta_mu = mus[m, i] - mus[n, i]
dc[n, i] += phis[n, i] * (phis[m, i] * delta_mu - delta_p)
return dc
@njit
def iterate_inner(phis: np.ndarray, chis: np.ndarray, dt: float, steps: int) -> None:
"""iterates a system with given interactions
Args:
phis: The composition of all phases
chis: The interaction matrix
dt (float): The time step
steps (int): The step count
"""
for _ in range(steps):
# make a step
phis += dt * evolution_rate(phis, chis)
# check validity of the result
if np.any(np.isnan(phis)):
raise RuntimeError("Encountered NaN")
elif np.any(phis <= 0):
raise RuntimeError("Non-positive concentrations")
elif np.any(phis.sum(axis=-1) <= 0):
raise RuntimeError("Non-positive solvent concentrations")
def evolve_dynamics(chis: np.ndarray, phis_init: np.ndarray) -> np.ndarray:
"""evolve a particular system governed by a specific interaction matrix
Args:
chis: The interaction matrix
phis_init: The initial composition of all phases
Returns:
phis: The final composition of all phases
"""
phis = phis_init.copy()
phis_last = np.zeros_like(phis)
dt = DT_INITIAL
steps_inner = max(1, int(np.ceil(TRACKER_INTERVAL / dt)))
# run until convergence
while not np.allclose(phis, phis_last, rtol=TOLERANCE, atol=TOLERANCE):
phis_last = phis.copy()
# do the inner steps and reduce dt if necessary
while True:
try:
iterate_inner(phis, chis, dt=dt, steps=steps_inner)
except RuntimeError as err:
# problems in the simulation => reduced dt and reset phis
dt /= 2
steps_inner *= 2
phis[:] = phis_last
if dt < 1e-7:
raise RuntimeError(f"{err}\nReached minimal time step.")
else:
break
return phis
def count_phases(phis: np.ndarray) -> int:
"""calculate the number of distinct phases
Args:
phis: The composition of all phases
Returns:
int: The number of phases with distinct composition
"""
# calculate distances between compositions
dists = spatial.distance.pdist(phis)
# obtain hierarchy structure
links = cluster.hierarchy.linkage(dists, method="centroid")
# flatten the hierarchy by clustering
clusters = cluster.hierarchy.fcluster(links, CLUSTER_DISTANCE, criterion="distance")
return int(clusters.max())
def estimate_performance(chis: np.ndarray, target_phase_count: float) -> float:
"""estimate the performance of a given interaction matrix
Args:
chis: The interaction matrix
target_phase_count (float): The targeted phase count
Returns:
float: The estimated performance (between 0 and 1)
"""
num_comp = len(chis)
num_phases = num_comp + 2 # number of initial phases
phase_counts = np.zeros(num_phases + 1)
for _ in range(REPETITIONS):
# choose random initial condition
phis = get_uniform_random_composition(num_phases, num_comp)
# run relaxation dynamics again
try:
phis_final = evolve_dynamics(chis, phis_init=phis)
except RuntimeError as err:
# simulation could not finish
print(f"Simulation failed: {err}")
else:
# determine number of clusters
phase_counts[count_phases(phis_final)] += 1
# determine the phase count weights
sizes = np.arange(num_phases + 1)
arg = (sizes - target_phase_count) / PERFORMANCE_TOLERANCE
weights = np.exp(-0.5 * arg ** 2)
# calculate the performance
return phase_counts @ weights / phase_counts.sum()
def replace_unfit_fraction(
population: List[np.ndarray], performances: np.ndarray
) -> None:
"""replace the individuals with the lowest performance
Args:
population: The individual interaction matrices
performances: The performances of all individuals
"""
pop_size = len(population)
# determine the number of individuals that need to be replaced
kill_count = round(KILL_FRACTION * pop_size)
# kill least fit individuals
kill_idx = | np.argsort(performances) | numpy.argsort |
"""
Mix between a Feedforward Neural Network and Restricted Boltzmann Machine.
Inputs and Outputs are all consolidated and training is a 1-step Gibbs
sample where the error is the difference between the Input/Output feed
and their reconstruction after they bounced back (Gibbs' sample)
"""
# TODO: Profile and optimize performance
import time
import copy
import numpy as np
import sklearn.metrics as mt
from sklearn.preprocessing import MinMaxScaler
__version__ = '1.0'
UNCLAMPED_VALUE = 0.0 # DONE: Tested 0 and 0.5
def relu(input_value, minimum=0, maximum=1):
"""
Apply RELU activation function with option to clip values
:param input_value: Numpy array with input values
:param minimum: Minimum value to clip (default 0)
:param maximum: Maximum value to clip (default 1)
:return: Numpy array with RELU function applied
"""
return np.clip(input_value, minimum, maximum)
class MirNet(object):
"""
Mirror Network that consolidates input and output together
Training is done similarly to Boltzmann machine with
a 1-step Gibbs' sampling (deterministic network)
"""
def __init__(self, hidden_layers=(100,), type='classifier', seed=None,
verbose=False):
"""
Build MirNet basic structure. Loosely structured like Sklean MLP
:param hidden_layers: Tuple describing the architecture
and number of neurons present in each layer
:param type: Network type: 'classifier' (default), 'regressor'
:param seed: Random seed to initialize the network
:param verbose: Verbose mode
"""
if type == "classifier":
self.loss = mt.log_loss
self.activation = relu
elif type == "regressor":
self.loss = mt.mean_squared_error
self.activation = relu
else:
raise Exception("Type %s not recognized" % type)
self.type = type
np.random.seed(seed)
self.epochs = 0
self.hidden_layers = hidden_layers
self.weights = []
self.scaler = MinMaxScaler() # TESTED: self.scaler = StandardScaler()
self.verbose = verbose
def sample(self, input_value, weights):
"""
Calculate 1-step Gibbs sample of the input data vector
:param input_value: Numpy array with values for all first level neurons (including output)
:param weights: List of Numpy arrays with network weights
:return: Two Numpy arrays with neurons value calculated for the positive and negative phase
"""
# Positive phase, from input to last layer
pos_phase = [input_value]
for w in weights:
neurons_input = np.dot(pos_phase[-1], w)
neurons_output = self.activation(neurons_input)
pos_phase = pos_phase + [neurons_output]
# Negative phase, from last to input layer
neg_phase = [pos_phase[-1]]
for w in weights[::-1]:
neurons_input = np.dot(neg_phase[0], | np.transpose(w) | numpy.transpose |
import os
import pickle
import matplotlib.pyplot as plt
import os.path as osp
import numpy as np
from gym import Env
from gym import utils
from gym.spaces import Box
from mujoco_py import load_model_from_path, MjSim
import cv2
"""
Constants associated with the Maze env.
"""
HORIZON = 100
MAX_FORCE = 0.1
FAILURE_COST = 0
GOAL_THRESH = 3e-2
GT_STATE = True
DENSE_REWARD = True
def process_action(a):
return np.clip(a, -MAX_FORCE, MAX_FORCE)
def process_obs(obs):
im = np.transpose(obs, (2, 0, 1))
return im
def get_offline_data(num_transitions, images=False, save_rollouts=False):
env = MazeNavigation()
transitions = []
num_constraints = 0
total = 0
rollouts = []
for i in range(1 * num_transitions // 2):
if i % 20 == 0:
sample = | np.random.uniform(0, 1, 1) | numpy.random.uniform |
import sys,os
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as nplin
import scipy as sp
from scipy.linalg import pinvh as spinv
from scipy.sparse import csr_matrix,csc_matrix,random
from sklearn.preprocessing import OneHotEncoder as OneHotEncoder
from sklearn.linear_model import lasso_path
import Bio
# Code to test sparse inference with FEM
"""
Function to generate sequences for testing. Generate a lot of sequences,
then keep only sequences that are more likely `energetically'.
Expect that this is more biologically realistic than assuming that all possible sequences are part of the sample space.
"""
# In[3]:
def try_1hot():
n_seq,n_positions,n_residues = 5,5,4
np.random.seed(7)
seqs = np.random.randint(0,n_residues,size=(n_seq,n_positions))
enc = OneHotEncoder(n_values=n_residues)
onehot = csr_matrix(enc.fit_transform(seqs)).toarray()
print(onehot)
onehot = csr_matrix(enc.transform(seqs[:3])).toarray()
print(onehot)
onehot = csr_matrix(enc.transform(seqs[3:])).toarray()
print(onehot)
try_1hot()
# In[4]:
def normalize_w(w,n_positions,n_residues):
n_size = n_residues*n_positions
wr_1 = np.mean(w.reshape((n_positions,n_residues,n_size)),axis=1) #w(i,A,jB)
w = (w.reshape((n_positions,n_residues,n_size)) - wr_1[:,np.newaxis,:]).reshape((n_size,n_size)) #w(iA,jB)
return w
def zero_out(w,index,n_residues):
w[index*n_residues:(index+1)*n_residues,index*n_residues:(index+1)*n_residues]=0.0
def split_seqs(seqs,index): #remove column i in the sequence
return np.copy(np.hstack([seqs[:,:index],seqs[:,index+1:]]))
def split_couplings(w,index,n_residues): #remove row block i in the coupling matrix when we're using only column i
return np.copy(np.vstack([w[:index*n_residues],w[(index+1)*n_residues:]]))
print(split_couplings(np.arange(24).reshape((6,4)),1,2))
# In[5]:
def nrgy(onehot,w,b):
nrgy = onehot.multiply(onehot.dot(w) + b).toarray()
# print(nrgy - np.log(2*np.cosh(nrgy)))
return np.sum(nrgy - np.log(2*np.cosh(nrgy)),axis=1) #ln prob
def generate_sequences(n_residues,n_positions,n_seq):
n_size = n_residues*n_positions
n_trial = 10*(n_size) #monte carlo steps to find the right sequences
res_interactions = np.sign(random(n_positions,n_positions,density=0.3).A)
res_interactions = np.kron(res_interactions,np.ones((n_residues,n_residues)))
w = res_interactions*(np.random.rand(n_size,n_size)-0.5)
b = np.zeros((n_size))
#different versions of random matrices
# w = random(n_size,n_size,density=0.3).A -random(n_size,n_size,density=0.3).A
# w /= np.sqrt(float(n_positions))
# w = ((np.random.rand(n_size,n_size))-0.5)/np.sqrt(float(n_positions))#(float(n_positions*n_residues))##*float(n_residues))
# w = (np.random.normal(size=(n_size,n_size)))/(float(n_positions))#*(float(n_positions)))#*float(n_residues))
# b = (np.random.rand(n_size)-0.5)/float(n_residues)
# w = w+w.T #symmetric
for indx in range(n_positions): #no terms W_iA,iB for B != A
zero_out(w,indx,n_residues)
#w[indx*n_residues:(indx+1)*n_residues,indx*n_residues:(indx+1)*n_residues]=0.0
# trial_seq = np.random.randint(0,n_residues,size=(n_seq,n_positions)) #X(s,i)
trial_seq = np.tile(np.random.randint(0,n_residues,size=(n_positions)),(n_seq,1))
print(trial_seq[0])
enc = OneHotEncoder(n_values=n_residues)
onehot = csr_matrix(enc.fit_transform(trial_seq))
old_nrgy = nrgy(onehot,w,b) + n_positions*(n_residues-1)*np.log(2)
for trial in range(n_trial):
# print('before',np.mean(old_nrgy))
index_array = np.random.choice(range(n_positions),size=2,replace=False)
index,index1 = index_array[0],index_array[1]
r_trial = np.random.randint(0,n_residues,size=(n_seq))
r_trial1 = np.random.randint(0,n_residues,size=(n_seq))
mod_seq = np.copy(trial_seq)
mod_seq[:,index] = r_trial
mod_seq[:,index1] = r_trial1
mod_nrgy = nrgy(csr_matrix(enc.fit_transform(mod_seq)),w,b) + n_positions*(n_residues-1)*np.log(2)
# if trial%1000==0: print(seq_change)
seq_change = mod_nrgy-old_nrgy > np.log(np.random.rand(n_seq))
if trial>n_size:
trial_seq[seq_change,index] = r_trial[seq_change]
trial_seq[seq_change,index1] = r_trial1[seq_change]
old_nrgy[seq_change] = mod_nrgy[seq_change]
else:
best_seq = np.argmax(mod_nrgy-old_nrgy)
trial_seq = np.tile(mod_seq[best_seq],(n_seq,1))
old_nrgy = np.tile(mod_nrgy[best_seq],(n_seq))
#
if trial%n_size == 0: print('after', | np.mean(old_nrgy) | numpy.mean |
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
#the function expects documents to be a list of documents. To use just one document, pass [[document]]
def transformTextForTraining(embed_dictionary, length_threshold, documents, y_O, y_C, y_E, y_A, y_N, operation, FastText, friends=None):
vectorizer = CountVectorizer(stop_words="english", analyzer="word")
analyzer = vectorizer.build_analyzer()
tokenizer = vectorizer.build_tokenizer()
string = False
deleted = 0
if type(documents) is str: #single post
string = True
documents = [documents]
text_embeddings = []
i = 0
for document in documents:
words = analyzer(document)
#words = tokenizer(document)
if len(words) < length_threshold and not string:
deleted += 1
y_O = | np.delete(y_O, i) | numpy.delete |
#!/usr/bin/env python
import argparse
import ast
import numpy as np
import re
import torch
import torch.nn as nn
import torch.utils.data as data
from pathlib import Path
from torchvision import datasets, transforms
from typing import Dict, List, Optional
ParamDict = Dict[str, np.ndarray]
class PytorchReshape(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = (-1,) + tuple(shape)
def forward(self, x):
return x.contiguous().view(self.shape)
class PytorchTranspose(nn.Module):
def __init__(self, *dims):
super().__init__()
self.dims = (0,) + tuple(d + 1 for d in dims)
def forward(self, x):
return x.permute(self.dims)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"eran_network", type=Path, help="path to the ERAN network to convert"
)
parser.add_argument(
"-o",
"--output",
type=Path,
default=Path("model.onnx"),
help="path to save the ONNX model",
)
parser.add_argument(
"--input_shape",
type=int,
nargs="+",
default=[1, 28, 28],
help="the input shape to the network (in CHW format)",
)
parser.add_argument(
"--drop_normalization",
action="store_true",
help="do not include any input normalization in the converted model",
)
parser.add_argument(
"--check_cifar_accuracy",
action="store_true",
help="evaluate the converted model on the CIFAR10 test set",
)
parser.add_argument(
"--check_mnist_accuracy",
action="store_true",
help="evaluate the converted model on the MNIST test set",
)
return parser.parse_args()
def parse_layer_params(param_str: str) -> ParamDict:
params = []
pattern = re.compile(r"([a-zA-Z_]+?=.+?),? [a-zA-Z]")
while True:
param_match = re.match(pattern, param_str)
if param_match is None:
params.append(param_str)
break
params.append(param_match.group(1))
param_str = param_str[param_match.end() - 1 :]
param_dict = {}
for param in params:
key, value = param.split("=")
param_dict[key] = np.array(ast.literal_eval(value))
return param_dict
def build_normalize(
parameters: ParamDict, input_shape: List[int], output_shape: List[int]
) -> nn.Module:
output_shape.extend(input_shape)
num_c = input_shape[0]
weights = np.diag(1.0 / parameters["std"])
bias = -parameters["mean"] / parameters["std"]
norm_layer = nn.Conv2d(num_c, num_c, 1, 1)
norm_layer.weight.data = torch.from_numpy(
weights.reshape(num_c, num_c, 1, 1)
).float()
norm_layer.bias.data = torch.from_numpy(bias).float()
return norm_layer
def build_linear(
weights: np.ndarray,
bias: np.ndarray,
activation: str,
input_shape: List[int],
output_shape: List[int],
) -> nn.Module:
flat_input_size = np.product(input_shape)
output_shape.append(bias.shape[0])
flat_output_size = np.product(output_shape)
linear_layer = nn.Linear(flat_input_size, flat_output_size)
linear_layer.weight.data = torch.from_numpy(weights).float()
linear_layer.bias.data = torch.from_numpy(bias).float()
activation_layer: Optional[nn.Module] = None
if activation == "relu":
activation_layer = nn.ReLU()
elif activation == "sigmoid":
activation_layer = nn.Sigmoid()
elif activation == "tanh":
activation_layer = nn.Tanh()
elif activation == "affine":
return linear_layer
else:
raise ValueError(f"Unknown activation type: {activation}")
return nn.Sequential(linear_layer, activation_layer)
def build_conv(
weights: np.ndarray,
bias: np.ndarray,
activation: str,
parameters: ParamDict,
input_shape: List[int],
output_shape: List[int],
) -> nn.Module:
k = parameters["kernel_size"]
if not k.shape or len(k) == 1:
k_h = k_w = k.item()
else:
assert len(k) == 2
k_h, k_w = k
p = parameters.get("padding", np.array([0]))
if not p.shape or len(p) == 1:
p_top = p_left = p_bottom = p_right = p.item()
elif len(p) == 2:
p_top = p_bottom = p[0]
p_left = p_right = p[1]
else:
assert len(p) == 4, f"len(p) = {len(p)} != 4"
p_top, p_left, p_bottom, p_right = p
assert p_top == p_bottom, f"unsupported padding: {p}"
assert p_left == p_right, f"unsupported padding: {p}"
s = parameters.get("stride", np.array([1, 1]))
if not s.shape or len(s) == 1:
s_h = s_w = s.item()
else:
assert len(s) == 2
s_h, s_w = s
in_c, in_h, in_w = input_shape
out_c = parameters["filters"].item()
out_h = int(np.floor(float(in_h - k_h + p_top + p_bottom) / s_h + 1))
out_w = int(np.floor(float(in_w - k_w + p_left + p_right) / s_w + 1))
output_shape.extend([out_c, out_h, out_w])
conv_layer = nn.Conv2d(
input_shape[0],
output_shape[0],
(k_h, k_w),
(s_h, s_w),
(min(p_top, p_bottom), min(p_left, p_right)),
)
conv_layer.weight.data = torch.from_numpy(weights).float().permute(3, 2, 0, 1)
conv_layer.bias.data = torch.from_numpy(bias).float()
activation_layer: Optional[nn.Module] = None
if activation == "relu":
activation_layer = nn.ReLU()
elif activation == "sigmoid":
activation_layer = nn.Sigmoid()
elif activation == "tanh":
activation_layer = nn.Tanh()
elif activation == "affine":
return conv_layer
else:
raise ValueError(f"Unknown activation type: {activation}")
return nn.Sequential(conv_layer, activation_layer)
def build_maxpool(
parameters: ParamDict, input_shape: List[int], output_shape: List[int]
) -> nn.Module:
k = parameters["pool_size"]
if not k.shape or len(k) == 1:
k_h = k_w = k.item()
else:
assert len(k) == 2
k_h, k_w = k
if "padding" in parameters:
raise ValueError("Padding for MaxPool is not currently supported")
p_top = p_left = p_bottom = p_right = 0
s = parameters.get("stride", | np.array([k_h, k_w]) | numpy.array |
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are length of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: <NAME>
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import pc_util
import sunrgbd_utils
from sunrgbd_utils import extract_pc_in_box3d
from model_util_sunrgbd import SunrgbdDatasetConfig
DC = SunrgbdDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 64 # maximum number of objects allowed per scene
MEAN_COLOR_RGB = np.array([0.5,0.5,0.5]) # sunrgbd color is in 0~1
DIST_THRESH = 0.1#0.2
VAR_THRESH = 5e-3
CENTER_THRESH = 0.1
LOWER_THRESH = 1e-6
NUM_POINT = 50
NUM_POINT_LINE = 10
LINE_THRESH = 0.1#0.2
MIND_THRESH = 0.1
NUM_POINT_SEM_THRESHOLD = 1
def check_upright(para_points):
return (para_points[0][-1] == para_points[1][-1]) and (para_points[1][-1] == para_points[2][-1]) and (para_points[2][-1] == para_points[3][-1])
def check_z(plane_equ, para_points):
return np.sum(para_points[:,2] + plane_equ[-1]) / 4.0 < LOWER_THRESH
def clockwise2counter(angle):
'''
@Args:
angle: clockwise from x axis, from 0 to 2*pi,
@Returns:
theta: counter clockwise, -pi / 2 ~ pi / 2, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
'''
return -((angle + np.pi / 2) % np.pi) + np.pi / 2;
def point2line_dist(points, a, b):
'''
@Args:
points: (N, 3)
a / b: (3,)
@Returns:
distance: (N,)
'''
x = b - a
t = np.dot(points - a, x) / np.dot(x, x)
c = a + t[:, None] * np.tile(x, (t.shape[0], 1))
return np.linalg.norm(points - c, axis=1)
def get_linesel(points, corners, direction):
''' corners:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
if direction == 'lower':
sel1 = point2line_dist(points, corners[0], corners[2]) < LINE_THRESH
sel2 = point2line_dist(points, corners[4], corners[6]) < LINE_THRESH
sel3 = point2line_dist(points, corners[0], corners[4]) < LINE_THRESH
sel4 = point2line_dist(points, corners[2], corners[6]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'upper':
sel1 = point2line_dist(points, corners[1], corners[3]) < LINE_THRESH
sel2 = point2line_dist(points, corners[5], corners[7]) < LINE_THRESH
sel3 = point2line_dist(points, corners[1], corners[5]) < LINE_THRESH
sel4 = point2line_dist(points, corners[3], corners[7]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'left':
sel1 = point2line_dist(points, corners[0], corners[1]) < LINE_THRESH
sel2 = point2line_dist(points, corners[2], corners[3]) < LINE_THRESH
return sel1, sel2
elif direction == 'right':
sel1 = point2line_dist(points, corners[4], corners[5]) < LINE_THRESH
sel2 = point2line_dist(points, corners[6], corners[7]) < LINE_THRESH
return sel1, sel2
else:
AssertionError('direction = lower / upper / left')
def get_linesel2(points, ymin, ymax, zmin, zmax, axis=0):
#sel3 = sweep(points, axis, ymax, 2, zmin, zmax)
#sel4 = sweep(points, axis, ymax, 2, zmin, zmax)
sel3 = np.abs(points[:,axis] - ymin) < LINE_THRESH
sel4 = np.abs(points[:,axis] - ymax) < LINE_THRESH
return sel3, sel4
''' ATTENTION: SUNRGBD, size_label is only half the actual size
'''
def params2bbox(center, size, angle):
''' from bbox_center, angle and size to bbox
@Args:
center: (3,)
size: (3,)
angle: -pi ~ pi, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
@Returns:
bbox: 8 x 3, order:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
xsize = size[0]
ysize = size[1]
zsize = size[2]
vx = np.array([np.cos(angle), np.sin(angle), 0])
vy = np.array([-np.sin(angle), np.cos(angle), 0])
vx = vx * np.abs(xsize) / 2
vy = vy * np.abs(ysize) / 2
vz = np.array([0, 0, np.abs(zsize) / 2])
bbox = np.array([\
center - vx - vy - vz, center - vx - vy + vz,
center - vx + vy - vz, center - vx + vy + vz,
center + vx - vy - vz, center + vx - vy + vz,
center + vx + vy - vz, center + vx + vy + vz])
return bbox
class SunrgbdDetectionVotesDataset(Dataset):
def __init__(self, data_path=None, split_set='train', num_points=20000,
use_color=False, use_height=False, use_v1=False,
augment=False, scan_idx_list=None):
assert(num_points<=50000)
self.use_v1 = use_v1
if use_v1:
self.data_path = os.path.join(data_path, 'sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
# self.data_path = os.path.join('/scratch/cluster/yanght/Dataset/sunrgbd/sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
else:
AssertionError("v2 data is not prepared")
self.raw_data_path = os.path.join(ROOT_DIR, 'sunrgbd/sunrgbd_trainval')
self.scan_names = sorted(list(set([os.path.basename(x)[0:6] \
for x in os.listdir(self.data_path)])))
if scan_idx_list is not None:
self.scan_names = [self.scan_names[i] for i in scan_idx_list]
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
scan_name = self.scan_names[idx]
point_color_sem = np.load(os.path.join(self.data_path, scan_name)+'_pc.npz')['pc'] # Nx6
bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy') # K,8
point_votes = np.load(os.path.join(self.data_path, scan_name)+'_votes.npz')['point_votes'] # Nx10
semantics37 = point_color_sem[:, 6]
semantics10 = np.array([DC.class37_2_class10[k] for k in semantics37])
semantics10_multi = [DC.class37_2_class10_multi[k] for k in semantics37]
if not self.use_color:
point_cloud = point_color_sem[:, 0:3]
else:
point_cloud = point_color_sem[:,0:6]
point_cloud[:,3:6] = (point_color_sem[:,3:6]-MEAN_COLOR_RGB)
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
bboxes[:,0] = -1 * bboxes[:,0]
bboxes[:,6] = np.pi - bboxes[:,6]
point_votes[:,[1,4,7]] = -1 * point_votes[:,[1,4,7]]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = sunrgbd_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:,3:6] + MEAN_COLOR_RGB
rgb_color *= (1+0.4*np.random.random(3)-0.2) # brightness change for each channel
rgb_color += (0.1*np.random.random(3)-0.05) # color shift for each channel
rgb_color += np.expand_dims((0.05*np.random.random(point_cloud.shape[0])-0.025), -1) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(np.random.random(point_cloud.shape[0])>0.3,-1)
point_cloud[:,3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
# new items
box3d_angles = np.zeros((MAX_NUM_OBJ,))
point_boundary_mask_z = np.zeros(self.num_points)
point_boundary_mask_xy = np.zeros(self.num_points)
point_boundary_offset_z = np.zeros([self.num_points, 3])
point_boundary_offset_xy = np.zeros([self.num_points, 3])
point_boundary_sem_z = np.zeros([self.num_points, 3+2+1])
point_boundary_sem_xy = np.zeros([self.num_points, 3+1+1])
point_line_mask = np.zeros(self.num_points)
point_line_offset = np.zeros([self.num_points, 3])
point_line_sem = np.zeros([self.num_points, 3+1])
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]*2
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
box3d_angles[i] = bbox[6]
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = sunrgbd_utils.my_compute_box_3d(bbox[0:3], bbox[3:6], bbox[6])
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
semantics37 = semantics37[choices]
semantics10 = semantics10[choices]
semantics10_multi = [semantics10_multi[i] for i in choices]
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
# box angle is -pi to pi
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners = params2bbox(bbox[:3], 2 * bbox[3:6], clockwise2counter(bbox[6]))
# corners_votenet = sunrgbd_utils.my_compute_box_3d(bbox[:3], bbox[3:6], bbox[6])
try:
x_all_cls, ind_all_cls = extract_pc_in_box3d(point_cloud, corners)
except:
continue
ind_all_cls = np.where(ind_all_cls)[0] # T/F to index
# find point with same semantic as bbox, note semantics is 37 cls in sunrgbd
# ind = ind_all_cls[np.where(semantics10[ind_all_cls] == bbox[7])[0]]
ind = []
for j in ind_all_cls:
if bbox[7] in semantics10_multi[j]:
ind.append(j)
ind = np.array(ind)
if ind.shape[0] < NUM_POINT_SEM_THRESHOLD:
pass
else:
x = point_cloud[ind, :3]
###Get bb planes and boundary points
plane_lower_temp = np.array([0,0,1,-corners[6,-1]])
para_points = np.array([corners[1], corners[3], corners[5], corners[7]])
newd = np.sum(para_points * plane_lower_temp[:3], 1)
if check_upright(para_points) and plane_lower_temp[0]+plane_lower_temp[1] < LOWER_THRESH:
plane_lower = np.array([0,0,1,plane_lower_temp[-1]])
plane_upper = np.array([0,0,1,-np.mean(newd)])
else:
import pdb;pdb.set_trace()
print ("error with upright")
if check_z(plane_upper, para_points) == False:
import pdb;pdb.set_trace()
### Get the boundary points here
#alldist = np.abs(np.sum(point_cloud[:,:3]*plane_lower[:3], 1) + plane_lower[-1])
alldist = np.abs(np.sum(x*plane_lower[:3], 1) + plane_lower[-1])
mind = np.min(alldist)
#[count, val] = np.histogram(alldist, bins=20)
#mind = val[np.argmax(count)]
sel = np.abs(alldist - mind) < DIST_THRESH
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,0] >= xmin) & (point_cloud[:,0] <= xmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
## Get lower four lines
line_sel1, line_sel2, line_sel3, line_sel4 = get_linesel(x[sel], corners, 'lower')
if np.sum(line_sel1) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel1]] = 1.0
linecenter = (corners[0] + corners[2]) / 2.0
point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1]
point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel2) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel2]] = 1.0
linecenter = (corners[4] + corners[6]) / 2.0
point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2]
point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel3) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel3]] = 1.0
linecenter = (corners[0] + corners[4]) / 2.0
point_line_offset[ind[sel][line_sel3]] = linecenter - x[sel][line_sel3]
point_line_sem[ind[sel][line_sel3]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel4) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel4]] = 1.0
linecenter = (corners[2] + corners[6]) / 2.0
point_line_offset[ind[sel][line_sel4]] = linecenter - x[sel][line_sel4]
point_line_sem[ind[sel][line_sel4]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
# center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])])
center = (corners[0] + corners[6]) / 2.0
center[2] = np.mean(x[sel][:,2])
sel_global = ind[sel]
point_boundary_mask_z[sel_global] = 1.0
point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[4] - corners[0]), np.linalg.norm(corners[2] - corners[0]), bbox[7]])
point_boundary_offset_z[sel_global] = center - x[sel]
'''
### Check for middle z surfaces
[count, val] = np.histogram(alldist, bins=20)
mind_middle = val[np.argmax(count)]
sel_pre = np.copy(sel)
sel = np.abs(alldist - mind_middle) < DIST_THRESH
if np.abs(np.mean(x[sel_pre][:,2]) - np.mean(x[sel][:,2])) > MIND_THRESH:
### Do not use line for middle surfaces
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
center = (corners[0] + corners[6]) / 2.0
center[2] = np.mean(x[sel][:,2])
# center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])])
sel_global = ind[sel]
point_boundary_mask_z[sel_global] = 1.0
point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[4] - corners[0]), np.linalg.norm(corners[2] - corners[0]), bbox[7]])
point_boundary_offset_z[sel_global] = center - x[sel]
'''
### Get the boundary points here
alldist = np.abs(np.sum(x*plane_upper[:3], 1) + plane_upper[-1])
mind = np.min(alldist)
#[count, val] = np.histogram(alldist, bins=20)
#mind = val[np.argmax(count)]
sel = np.abs(alldist - mind) < DIST_THRESH
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,0] >= xmin) & (point_cloud[:,0] <= xmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
## Get upper four lines
line_sel1, line_sel2, line_sel3, line_sel4 = get_linesel(x[sel], corners, 'upper')
if np.sum(line_sel1) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel1]] = 1.0
linecenter = (corners[1] + corners[3]) / 2.0
point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1]
point_line_sem[ind[sel][line_sel1]] = | np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) | numpy.array |
import numpy as np
from .kernels import GWRKernel, GTWRKernel
from scipy import linalg
def _compute_betas_gwr(y, x, wi):
"""
compute MLE coefficients using iwls routine
Methods: p189, Iteratively (Re)weighted Least Squares (IWLS),
<NAME>., <NAME>., & <NAME>. (2002).
Geographically weighted regression: the analysis of spatially varying relationships.
"""
xT = (x * wi).T
xtx = np.dot(xT, x)
xtx_inv_xt = linalg.solve(xtx, xT)
betas = np.dot(xtx_inv_xt, y)
return betas, xtx_inv_xt
class GWR(object):
"""
Geographically Weighted Regression
Parameters
----------
coords : array-like
n*2, collection of n sets of (x,y) coordinates of
observatons
y : array
n*1, dependent variable
X : array
n*k, independent variable, exlcuding the constant
bw : scalar
bandwidth value consisting of either a distance or N
nearest neighbors; user specified or obtained using
sel
kernel : string
type of kernel function used to weight observations;
available options:
'gaussian'
'bisquare'
'exponential'
fixed : boolean
True for distance based kernel function and False for
adaptive (nearest neighbor) kernel function (default)
constant : boolean
True to include intercept (default) in model and False to exclude
intercept.
Attributes
----------
coords : array-like
n*2, collection of n sets of (x,y) coordinates used for
calibration locations
t : array
n*1, time location
y : array
n*1, dependent variable
X : array
n*k, independent variable, exlcuding the constant
bw : scalar
bandwidth value consisting of either a distance or N
nearest neighbors; user specified or obtained using
sel
kernel : string
type of kernel function used to weight observations;
available options:
'gaussian'
'bisquare'
'exponential'
fixed : boolean
True for distance based kernel function and False for
adaptive (nearest neighbor) kernel function (default)
constant : boolean
True to include intercept (default) in model and False to exclude
intercept
n : integer
number of observations
k : integer
number of independent variables
Examples
--------
>>> import numpy as np
>>> from gtwr.model import GWR
>>> np.random.seed(1)
>>> u = np.array([(i-1)%12 for i in range(1,1729)]).reshape(-1,1)
>>> v = np.array([((i-1)%144)//12 for i in range(1,1729)]).reshape(-1,1)
>>> t = np.array([(i-1)//144 for i in range(1,1729)]).reshape(-1,1)
>>> x1 = np.random.uniform(0,1,(1728,1))
>>> x2 = np.random.uniform(0,1,(1728,1))
>>> epsilon = np.random.randn(1728,1)
>>> beta0 = 5
>>> beta1 = 3 + (u + v + t)/6
>>> beta2 = 3+((36-(6-u)**2)*(36-(6-v)**2)*(36-(6-t)**2))/128
>>> y = beta0 + beta1 * x1 + beta2 * x2 + epsilon
>>> coords = np.hstack([u,v])
>>> X = np.hstack([x1,x2])
>>> gwr = GWR(coords, y, X, 0.8, kernel='gaussian', fixed=True).fit()
>>> print(gtwr.R2)
0.9143147048821345
"""
def __init__(self, coords, y, X, bw, kernel = 'gaussian',
fixed = False, constant = True):
self.coords = coords
self.y = y
self.n = X.shape[0]
self.bw = bw
self.kernel = kernel
self.fixed = fixed
self.constant = constant
if self.constant:
self.X = np.hstack([np.ones((self.n, 1)), X])
else:
self.X = X
self.k = self.X.shape[1]
def _build_wi(self, i, bw):
try:
wi = GWRKernel(i, self.coords, bw, fixed=self.fixed,
function=self.kernel).kernel
except BaseException:
raise # TypeError('Unsupported kernel function ', kernel)
return wi
def _local_fit(self, i, final = True):
wi = self._build_wi(i, self.bw).reshape(-1, 1)
betas, xtx_inv_xt = _compute_betas_gwr(self.y, self.X, wi)
predy = np.dot(self.X[i], betas)[0]
resid = self.y[i] - predy
influ = np.dot(self.X[i], xtx_inv_xt[:, i])
if not final:
return resid * resid, influ
else:
Si = np.dot(self.X[i], xtx_inv_xt).reshape(-1)
CCT = np.diag(np.dot(xtx_inv_xt, xtx_inv_xt.T)).reshape(-1)
Si2 = np.sum(Si**2)
return influ, resid, predy, betas.reshape(-1), CCT, Si2
def fit(self, final = True):
"""
fit GWR models
Attributes
----------
coords : array-like
n*2, collection of n sets of (x,y) coordinates used for
calibration locations
y : array
n*1, dependent variable
X : array
n*k, independent variable, exlcuding the constant
bw : scalar
bandwidth value consisting of either a distance or N
nearest neighbors; user specified or obtained using
sel
kernel : string
type of kernel function used to weight observations;
available options:
'gaussian'
'bisquare'
'exponential'
fixed : boolean
True for distance based kernel function and False for
adaptive (nearest neighbor) kernel function (default)
constant : boolean
True to include intercept (default) in model and False to exclude
intercept
n : integer
number of observations
k : integer
number of independent variables
betas : array
n*k, estimated coefficients
predy : array
n*1, predicted y values
CCT : array
n*k, scaled variance-covariance matrix
df_model : integer
model degrees of freedom
df_resid : integer
residual degrees of freedom
resid : array
n*1, residuals of the repsonse
RSS : scalar
residual sum of sqaures
CCT : array
n*k, scaled variance-covariance matrix
ENP : scalar
effective number of paramters, which depends on
sigma2
tr_S : float
trace of S (hat) matrix
tr_STS : float
trace of STS matrix
R2 : float
R-squared for the entire model (1- RSS/TSS)
adj_R2 : float
adjusted R-squared for the entire model
aic : float
Akaike information criterion
aicc : float
corrected Akaike information criterion to account
to account for model complexity (smaller
bandwidths)
bic : float
Bayesian information criterio
sigma2 : float
sigma squared (residual variance) that has been
corrected to account for the ENP
std_res : array
n*1, standardised residuals
bse : array
n*k, standard errors of parameters (betas)
influ : array
n*1, leading diagonal of S matrix
CooksD : array
n*1, Cook's D
tvalues : array
n*k, local t-statistics
llf : scalar
log-likelihood of the full model; see
pysal.contrib.glm.family for damily-sepcific
log-likelihoods
"""
if not final:
RSS = 0
tr_S = 0
aa = 0
for i in range(self.n):
err2, hat = self._local_fit(i, final = False)
aa += err2 / ((1 - hat) ** 2)
RSS += err2
tr_S += hat
llf = -np.log(RSS) * self.n / \
2 - (1 + np.log(np.pi / self.n * 2)) * self.n / 2
return Notfinal(float(RSS), tr_S, float(llf), float(aa), self.n)
else:
influ, resid, predy = np.empty((self.n, 1)), \
np.empty((self.n, 1)), np.empty((self.n, 1))
betas, CCT = np.empty((self.n, self.k)), np.empty((self.n, self.k))
tr_STS = 0
for i in range(self.n):
influi, residi, predyi, betasi, CCTi, tr_STSi = self._local_fit(i)
influ[i] = influi
resid[i] = residi
predy[i] = predyi
betas[i] = betasi
CCT[i] = CCTi
tr_STS += tr_STSi
return Results(self.coords, None, self.y, self.X, self.bw, None,
self.kernel, self.fixed, self.constant, influ, resid, predy,
betas, CCT, tr_STS, model = 'GWR')
class TWR(object):
"""
Temporally Weighted Regression
Parameters
----------
t : array
n*1, time location
y : array
n*1, dependent variable
X : array
n*k, independent variable, exlcuding the constant
h : scalar
bandwidth value consisting of either a distance or N
nearest neighbors; user specified or obtained using
Sel
kernel : string
type of kernel function used to weight observations;
available options:
'gaussian'
'bisquare'
'exponential'
fixed : boolean
True for distance based kernel function and False for
adaptive (nearest neighbor) kernel function (default)
constant : boolean
True to include intercept (default) in model and False to exclude
intercept.
Attributes
----------
coords : array-like
n*2, collection of n sets of (x,y) coordinates used for
calibration locations
t : array
n*1, time location
y : array
n*1, dependent variable
X : array
n*k, independent variable, exlcuding the constant
h : scalar
bandwidth value consisting of either a distance or N
nearest neighbors; user specified or obtained using
Sel
kernel : string
type of kernel function used to weight observations;
available options:
'gaussian'
'bisquare'
'exponential'
fixed : boolean
True for distance based kernel function and False for
adaptive (nearest neighbor) kernel function (default)
constant : boolean
True to include intercept (default) in model and False to exclude
intercept
n : integer
number of observations
k : integer
number of independent variables
Examples
--------
>>> import numpy as np
>>> from gtwr.model import TWR
>>> np.random.seed(1)
>>> t = np.array([(i-1)//144 for i in range(1,1729)]).reshape(-1,1)
>>> x1 = np.random.uniform(0,1,(1728,1))
>>> x2 = np.random.uniform(0,1,(1728,1))
>>> epsilon = np.random.randn(1728,1)
>>> beta0 = 5
>>> beta1 = 3 + t/6
>>> beta2 = 3+(36-(6-t)**2)/128
>>> y = beta0 + beta1 * x1 + beta2 * x2 + epsilon
>>> X = np.hstack([x1,x2])
>>> twr = TWR(t, y, X, 0.8, kernel='gaussian', fixed=True).fit()
>>> print(twr.R2)
0.697504378621436
"""
def __init__(self, t, y, X, h, kernel = 'gaussian',
fixed = False, constant = True):
self.t = t
self.y = y
self.n = X.shape[0]
self.h = h
self.kernel = kernel
self.fixed = fixed
self.constant = constant
if self.constant:
self.X = np.hstack([np.ones((self.n, 1)), X])
else:
self.X = X
self.k = self.X.shape[1]
def _build_wi(self, i, h):
try:
wi = GWRKernel(i, self.t, h, fixed=self.fixed,
function=self.kernel).kernel
except BaseException:
raise # TypeError('Unsupported kernel function ', kernel)
return wi
def _local_fit(self, i, final = True):
wi = self._build_wi(i, self.h).reshape(-1, 1)
X_derivative = self.X * (self.t-self.t[i])
X_new = np.hstack([self.X, X_derivative])
xT = (X_new * wi).T
xtx_inv_xt = np.dot(np.linalg.inv(np.dot(xT, X_new)), xT)
xstack = np.hstack([self.X[i].reshape(1,self.k),np.zeros((1,self.k))])
predy = (np.dot(np.dot(xstack, xtx_inv_xt), self.y))[0]
resid = self.y[i] - predy
influ = np.dot(xstack, xtx_inv_xt[:,i])[0]
if not final:
return resid * resid, influ
else:
betas = np.dot(xtx_inv_xt, self.y)[:self.k]
zeros = np.zeros((1,self.k))
Si = np.dot(np.hstack([self.X[i].reshape(1,self.k),zeros]), xtx_inv_xt).reshape(-1)
Si2 = np.sum(Si**2)
return influ, resid, predy, betas.reshape(-1), Si2
def fit(self, final = True):
"""
fit GWR models
Attributes
----------
coords : array-like
n*2, collection of n sets of (x,y) coordinates used for
calibration locations
y : array
n*1, dependent variable
X : array
n*k, independent variable, exlcuding the constant
h : scalar
bandwidth value consisting of either a distance or N
nearest neighbors; user specified or obtained using
sel
kernel : string
type of kernel function used to weight observations;
available options:
'gaussian'
'bisquare'
'exponential'
fixed : boolean
True for distance based kernel function and False for
adaptive (nearest neighbor) kernel function (default)
constant : boolean
True to include intercept (default) in model and False to exclude
intercept
n : integer
number of observations
k : integer
number of independent variables
betas : array
n*k, estimated coefficients
predy : array
n*1, predicted y values
CCT : array
n*k, scaled variance-covariance matrix
df_model : integer
model degrees of freedom
df_resid : integer
residual degrees of freedom
resid : array
n*1, residuals of the repsonse
RSS : scalar
residual sum of sqaures
CCT : array
n*k, scaled variance-covariance matrix
ENP : scalar
effective number of paramters, which depends on
sigma2
tr_S : float
trace of S (hat) matrix
tr_STS : float
trace of STS matrix
R2 : float
R-squared for the entire model (1- RSS/TSS)
adj_R2 : float
adjusted R-squared for the entire model
aic : float
Akaike information criterion
aicc : float
corrected Akaike information criterion to account
to account for model complexity (smaller
bandwidths)
bic : float
Bayesian information criterio
sigma2 : float
sigma squared (residual variance) that has been
corrected to account for the ENP
std_res : array
n*1, standardised residuals
bse : array
n*k, standard errors of parameters (betas)
influ : array
n*1, leading diagonal of S matrix
CooksD : array
n*1, Cook's D
tvalues : array
n*k, local t-statistics
llf : scalar
log-likelihood of the full model; see
pysal.contrib.glm.family for damily-sepcific
log-likelihoods
"""
if not final:
RSS = 0
tr_S = 0
aa = 0
for i in range(self.n):
err2, hat = self._local_fit(i, final = False)
aa += err2 / ((1 - hat) ** 2)
RSS += err2
tr_S += hat
llf = - | np.log(RSS) | numpy.log |
import os
import numpy as np
import pandas as pd
import pickle
import glob
import shutil
import logging
import re, sys, joblib, bz2
import multiprocessing as mp
import tensorflow as tf
from joblib import Parallel, delayed
from Fuzzy_clustering.ver_tf2.CNN_tf_core_3d import CNN_3d
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# from scipy.interpolate import interp2d
# from util_database import write_database
# from Fuzzy_clustering.ver_tf2.Forecast_model import forecast_model
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
from Fuzzy_clustering.ver_tf2.CNN_predict_3d import CNN_3d_predict
def optimize_cnn(cnn, kernels, hsize, cnn_max_iterations, cnn_learning_rate, gpu, filters):
flag = False
for _ in range(3):
try:
acc_old_cnn, scale_cnn, model_cnn = cnn.train_cnn(max_iterations=cnn_max_iterations,
learning_rate=cnn_learning_rate, kernels=kernels, h_size=hsize, gpu_id=gpu,filters=filters)
flag=True
except:
filters = int(filters/2)
pass
if not flag:
acc_old_cnn=np.inf
scale_cnn=None
model_cnn=None
return acc_old_cnn, kernels, hsize, scale_cnn, model_cnn, cnn.pool_size, cnn.trial, cnn_learning_rate
def predict(q, H, model):
tf.config.set_soft_device_placement(True)
pred = model.predict(H)
q.put((pred[0]))
class cnn_3d_model():
def __init__(self, static_data, rated, cluster_dir):
self.static_data_all = static_data
self.static_data = static_data['CNN']
self.rated = rated
self.cluster = os.path.basename(cluster_dir)
self.cluster_cnn_dir = os.path.join(cluster_dir, 'CNN_3d')
self.model_dir = os.path.join(self.cluster_cnn_dir, 'model')
self.cluster_dir = cluster_dir
self.istrained = False
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
try:
self.load(self.model_dir)
except:
pass
def train_cnn(self, X, y):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.model_dir, 'log_train_' + self.cluster + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
print('CNN training...begin for %s ', self.cluster)
logger.info('CNN training...begin for %s ', self.cluster)
if len(y.shape)==1:
y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = split_continuous(X, y, test_size=0.15, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, random_state=42)
results =[]
for trial in [0, 3]:
if trial != 0:
pool_size = [1, 2, 2]
else:
pool_size = [2, 1]
cnn = CNN_3d(self.static_data, self.rated, X_train, y_train, X_val, y_val, X_test, y_test, pool_size, trial=trial)
self.acc_cnn = np.inf
gpus = np.tile(self.static_data['gpus'], 4)
if trial==0:
kernels=[
# [2, 2],
[2, 4],
[4, 2],
# [4, 4]
]
else:
kernels = [
[2, 4, 4],
# [2, 2, 2],
[3, 2, 2],
# [3, 4, 4]
]
# res = optimize_cnn(cnn, kernels[0], self.static_data['h_size'],
# self.static_data['max_iterations'],
# self.static_data['learning_rate'],
# gpus[0],int(self.static_data['filters']))
res = Parallel(n_jobs=len(self.static_data['gpus']))(
delayed(optimize_cnn)(cnn, kernels[k], self.static_data['h_size'],
self.static_data['max_iterations'],
self.static_data['learning_rate'],
gpus[int(k)], int(self.static_data['filters'])) for k in range(2))
results += res
for r in results:
logger.info("kernel: %s accuracy cnn: %s", r[1], r[0])
acc_cnn = np.array([r[0] for r in results])
self.acc_cnn, self.best_kernel, hsize, self.scale_cnn, model_cnn, self.pool_size, self.trial, lr= results[acc_cnn.argmin()]
self.model = model_cnn
train_res = pd.DataFrame.from_dict(model_cnn['error_func'], orient='index')
train_res.to_csv(os.path.join(self.model_dir, 'train_result.csv'), header=None)
cnn = CNN_3d(self.static_data, self.rated, X_train, y_train, X_val, y_val, X_test, y_test, self.pool_size,
trial=self.trial)
self.acc_cnn = np.inf
gpus = np.tile(self.static_data['gpus'], 4)
h_size = [
[1024, 256],
[512, 128],
]
results1 = Parallel(n_jobs=len(self.static_data['gpus']))(
delayed(optimize_cnn)(cnn, self.best_kernel, h_size[k],
self.static_data['max_iterations'],
self.static_data['learning_rate'],
gpus[int(k)], int(self.static_data['filters'])) for k in range(2))
for r in results1:
logger.info("num neurons: 1st %s and 2nd %s with accuracy cnn: %s",
*r[2], r[0])
results +=results1
acc_cnn = np.array([r[0] for r in results])
self.acc_cnn, self.best_kernel, self.best_h_size, self.scale_cnn, model_cnn, self.pool_size, self.trial, self.lr= results[acc_cnn.argmin()]
self.model = model_cnn
train_res = pd.DataFrame.from_dict(model_cnn['error_func'], orient='index')
train_res.to_csv(os.path.join(self.model_dir, 'train_result_hsize.csv'), header=None)
self.save(self.model_dir)
# self.acc_cnn = np.inf
# gpus = np.tile(self.static_data['gpus'], 4)
# lrs = [0.5e-5, 1e-4]
#
#
# results1 = Parallel(n_jobs=len(self.static_data['gpus']))(
# delayed(optimize_cnn)(cnn, self.best_kernel, self.best_h_size,
# self.static_data['max_iterations'],
# lrs[k],
# gpus[k], int(self.static_data['filters'])) for k in [0, 1])
# for r in results1:
# logger.info("Learning rate: %s accuracy cnn: %s", r[7], r[0])
#
# results +=results1
# acc_cnn = np.array([r[0] for r in results])
# self.acc_cnn, self.best_kernel, self.best_h_size, self.scale_cnn, model_cnn, self.pool_size, self.trial, self.lr = results[acc_cnn.argmin()]
# self.model = model_cnn
# self.save(self.model_dir)
logger.info("Best kernel: %s", self.best_kernel)
logger.info("accuracy cnn: %s", self.acc_cnn)
logger.info("num neurons: 1st %s and 2nd %s", *self.best_h_size)
logger.info("with accuracy cnn: %s", self.acc_cnn)
logger.info("Best learning rate: %s", self.lr)
logger.info("Total accuracy cnn: %s", self.acc_cnn)
logger.info('\n')
self.istrained = True
self.save(self.model_dir)
return self.to_dict()
def to_dict(self):
dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data', 'model_dir', 'temp_dir', 'cluster_cnn_dir', 'cluster_dir', 'model']:
dict[k] = self.__dict__[k]
return dict
def train_cnn_TL(self, X, y, model, gpu):
if len(y.shape)==1:
y = y.reshape(-1, 1)
print('CNN training...begin for %s ', self.cluster)
X_train, X_test, y_train, y_test = split_continuous(X, y, test_size=0.15, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, random_state=42)
cnn = CNN_3d(self.static_data, self.rated, X_train, y_train, X_val, y_val, X_test, y_test, model['pool_size'], model['trial'])
self.acc_cnn = np.inf
gpus = np.tile(gpu, 2)
if not 'lr' in model.keys():
model['lr'] = 5e-5
results = Parallel(n_jobs=len(self.static_data['gpus']))(
delayed(optimize_cnn)(cnn, model['best_kernel'], model['best_h_size'],
self.static_data['max_iterations'],
model['lr'],
gpus[k], int(self.static_data['filters'])) for k in [0])
self.acc_cnn, self.best_kernel, self.best_h_size, self.scale_cnn, model_cnn, self.pool_size, self.trial, self.lr = results[0]
self.model = model_cnn
self.save(self.model_dir)
self.istrained = True
self.save(self.model_dir)
return self.to_dict()
def predict(self, X):
cnn = CNN_3d_predict(self.static_data_all, self.rated, self.cluster_dir)
return cnn.predict(X)
def move_files(self, path1, path2):
for filename in glob.glob(os.path.join(path1, '*.*')):
shutil.copy(filename, path2)
def compute_metrics(self, pred, y, rated):
if rated == None:
rated = y.ravel()
else:
rated = 1
err = np.abs(pred.ravel() - y.ravel()) / rated
sse = np.sum(np.square(pred.ravel() - y.ravel()))
rms = np.sqrt(np.mean( | np.square(err) | numpy.square |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import treecorr
import os
import coord
import fitsio
from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog, timer
from test_helper import is_ccw, is_ccw_3d
@timer
def test_log_binning():
import math
# Test some basic properties of the base class
def check_arrays(nnn):
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u)
np.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v)
#print('logr = ',nnn.logr1d)
np.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) )
np.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size)
np.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d)
np.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d)
assert len(nnn.logr) == nnn.nbins
#print('u = ',nnn.u1d)
np.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) )
np.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size)
np.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size)
np.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d)
np.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d)
#print('v = ',nnn.v1d)
np.testing.assert_equal(nnn.v1d.shape, (2*nnn.nvbins,) )
np.testing.assert_almost_equal(nnn.v1d[0], -nnn.max_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins], nnn.min_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins-1], -nnn.min_v - 0.5*nnn.vbin_size)
np.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d)
np.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d)
def check_defaultuv(nnn):
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == np.ceil(1./nnn.ubin_size)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == np.ceil(1./nnn.vbin_size)
# Check the different ways to set up the binning:
# Omit bin_size
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, bin_type='LogRUV')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, n for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20,
min_u=0.2, max_u=0.9, nubins=12,
min_v=0., max_v=0.2, nvbins=2)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 12
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 2
check_arrays(nnn)
# Omit min_sep
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify max, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1,
max_u=0.9, nubins=3, ubin_size=0.05,
max_v=0.4, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert np.isclose(nnn.ubin_size, 0.05)
assert np.isclose(nnn.min_u, 0.75)
assert nnn.max_u == 0.9
assert nnn.nubins == 3
assert np.isclose(nnn.vbin_size, 0.05)
assert np.isclose(nnn.min_v, 0.2)
assert nnn.max_v == 0.4
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit max_sep
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1,
min_u=0.7, nubins=4, ubin_size=0.05,
min_v=0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.bin_size == 0.1
assert nnn.nbins == 20
assert nnn.min_u == 0.7
assert np.isclose(nnn.ubin_size, 0.05)
assert nnn.nubins == 4
assert nnn.min_v == 0.2
assert nnn.max_v == 0.4
assert np.isclose(nnn.vbin_size, 0.05)
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit nbins
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=0.1, max_v=0.3, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.bin_size <= 0.1
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 24
assert np.isclose(nnn.ubin_size, 0.7/24)
assert nnn.min_v == 0.1
assert nnn.max_v == 0.3
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only one of min/max v are set, respect that
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, ubin_size=0.03,
min_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.2
assert nnn.max_u == 1.
assert nnn.nubins == 27
assert np.isclose(nnn.ubin_size, 0.8/27)
assert nnn.min_v == 0.2
assert nnn.max_v == 1.
assert nnn.nvbins == 12
assert np.isclose(nnn.vbin_size, 0.8/12)
check_arrays(nnn)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
max_u=0.2, ubin_size=0.03,
max_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.
assert nnn.max_u == 0.2
assert nnn.nubins == 7
assert np.isclose(nnn.ubin_size, 0.2/7)
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only vbin_size is set for v, automatically figure out others.
# (And if necessary adjust the bin_size down a bit.)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.3, vbin_size=0.3)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 4
assert np.isclose(nnn.ubin_size, 0.25)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 4
assert np.isclose(nnn.vbin_size, 0.25)
check_arrays(nnn)
# If only nvbins is set for v, automatically figure out others.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
nubins=5, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 5
assert np.isclose(nnn.ubin_size,0.2)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 5
assert np.isclose(nnn.vbin_size,0.2)
check_arrays(nnn)
# If both nvbins and vbin_size are set, set min/max automatically
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.1, nubins=5,
vbin_size=0.1, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.ubin_size == 0.1
assert nnn.nubins == 5
assert nnn.max_u == 1.
assert np.isclose(nnn.min_u,0.5)
assert nnn.vbin_size == 0.1
assert nnn.nvbins == 5
assert nnn.min_v == 0.
assert np.isclose(nnn.max_v,0.5)
check_arrays(nnn)
assert_raises(TypeError, treecorr.NNNCorrelation)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, bin_size=0.1)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Log')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Linear')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='TwoD')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Invalid')
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.9, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=-0.1, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.1, max_u=1.3)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.9, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=-0.1, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=1.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
split_method='invalid')
# Check the use of sep_units
# radians
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5.)
np.testing.assert_almost_equal(nnn._max_sep, 20.)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# arcsec
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
# Note that logr is in the separation units, not radians.
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# arcmin
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# degrees
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# hours
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# Check bin_slop
# Start with default behavior
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Explicitly set bin_slop=1.0 does the same thing.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Use a smaller bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.2
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.02)
np.testing.assert_almost_equal(nnn.bu, 0.006)
np.testing.assert_almost_equal(nnn.bv, 0.014)
# Use bin_slop == 0
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.0)
np.testing.assert_almost_equal(nnn.bu, 0.0)
np.testing.assert_almost_equal(nnn.bv, 0.0)
# Bigger bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 2.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.2)
np.testing.assert_almost_equal(nnn.bu, 0.06)
np.testing.assert_almost_equal(nnn.bv, 0.14)
# With bin_size > 0.1, explicit bin_slop=1.0 is accepted.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.4)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# But implicit bin_slop is reduced so that b = 0.1
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
np.testing.assert_almost_equal(nnn.bin_slop, 0.25)
# Separately for each of the three parameters
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.05,
min_u=0., max_u=0.9, ubin_size=0.3,
min_v=0., max_v=0.17, vbin_size=0.17)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.05
assert np.isclose(nnn.ubin_size, 0.3)
assert np.isclose(nnn.vbin_size, 0.17)
np.testing.assert_almost_equal(nnn.b, 0.05)
np.testing.assert_almost_equal(nnn.bu, 0.1)
np.testing.assert_almost_equal(nnn.bv, 0.1)
np.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr
@timer
def test_direct_count_auto():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k])
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j])
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
nz = np.where((ddd.ntri > 0) | (true_ntri > 0))
print('non-zero at:')
print(nz)
print('d1 = ',ddd.meand1[nz])
print('d2 = ',ddd.meand2[nz])
print('d3 = ',ddd.meand3[nz])
print('rnom = ',ddd.rnom[nz])
print('u = ',ddd.u[nz])
print('v = ',ddd.v[nz])
print('ddd.ntri = ',ddd.ntri[nz])
print('true_ntri = ',true_ntri[nz])
print('diff = ',ddd.ntri[nz] - true_ntri[nz])
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Check that running via the corr3 script works correctly.
file_name = os.path.join('data','nnn_direct_data.dat')
with open(file_name, 'w') as fid:
for i in range(ngal):
fid.write(('%.20f %.20f\n')%(x[i],y[i]))
L = 10*s
nrand = ngal
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rcat = treecorr.Catalog(x=rx, y=ry)
rand_file_name = os.path.join('data','nnn_direct_rand.dat')
with open(rand_file_name, 'w') as fid:
for i in range(nrand):
fid.write(('%.20f %.20f\n')%(rx[i],ry[i]))
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0, rng=rng)
rrr.process(rcat)
zeta, varzeta = ddd.calculateZeta(rrr)
# Semi-gratuitous check of BinnedCorr3.rng access.
assert rrr.rng is rng
assert ddd.rng is not rng
# First do this via the corr3 function.
config = treecorr.config.read_config('configs/nnn_direct.yaml')
logger = treecorr.config.setup_logger(0)
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
print('corr3_output = ',corr3_output)
print('corr3_output.dtype = ',corr3_output.dtype)
print('rnom = ',ddd.rnom.flatten())
print(' ',corr3_output['r_nom'])
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
print('unom = ',ddd.u.flatten())
print(' ',corr3_output['u_nom'])
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
print('vnom = ',ddd.v.flatten())
print(' ',corr3_output['v_nom'])
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
print('DDD = ',ddd.ntri.flatten())
print(' ',corr3_output['DDD'])
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('RRR = ',rrr.ntri.flatten())
print(' ',corr3_output['RRR'])
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten(), rtol=1.e-3)
print('zeta = ',zeta.flatten())
print('from corr3 output = ',corr3_output['zeta'])
print('diff = ',corr3_output['zeta']-zeta.flatten())
diff_index = np.where(np.abs(corr3_output['zeta']-zeta.flatten()) > 1.e-5)[0]
print('different at ',diff_index)
print('zeta[diffs] = ',zeta.flatten()[diff_index])
print('corr3.zeta[diffs] = ',corr3_output['zeta'][diff_index])
print('diff[diffs] = ',zeta.flatten()[diff_index] - corr3_output['zeta'][diff_index])
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Now calling out to the external corr3 executable.
# This is the only time we test the corr3 executable. All other tests use corr3 function.
import subprocess
corr3_exe = get_script_name('corr3')
p = subprocess.Popen( [corr3_exe,"configs/nnn_direct.yaml","verbose=0"] )
p.communicate()
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Also check compensated
drr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
rdd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
drr.process(cat, rcat)
rdd.process(rcat, cat)
zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd)
config['nnn_statistic'] = 'compensated'
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True, skip_header=1)
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('rrr.tot = ',rrr.tot)
print('ddd.tot = ',ddd.tot)
print('drr.tot = ',drr.tot)
print('rdd.tot = ',rdd.tot)
rrrf = ddd.tot / rrr.tot
drrf = ddd.tot / drr.tot
rddf = ddd.tot / rdd.tot
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten() * rrrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DRR'], drr.ntri.flatten() * drrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['RDD'], rdd.ntri.flatten() * rddf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Repeat with binslop = 0, since the code flow is different from bture=True
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
# Here, we get 6x as much, since each triangle is discovered 6 times.
ddd.clear()
ddd.process(cat,cat,cat, num_threads=2)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, 6*true_ntri)
# With the real CrossCorrelation class, each of the 6 correlations should end up being
# the same thing (without the extra factor of 6).
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
dddc.process(cat,cat,cat, num_threads=2)
# All 6 correlations are equal.
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(d.ntri, true_ntri)
# Or with 2 argument version, finds each triangle 3 times.
ddd.process(cat,cat, num_threads=2)
np.testing.assert_array_equal(ddd.ntri, 3*true_ntri)
# Again, NNNCrossCorrelation gets it right in each permutation.
dddc.process(cat,cat, num_threads=2)
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
np.testing.assert_array_equal(d.ntri, true_ntri)
# Invalid to omit file_name
config['verbose'] = 0
del config['file_name']
with assert_raises(TypeError):
treecorr.corr3(config)
config['file_name'] = 'data/nnn_direct_data.dat'
# OK to not have rand_file_name
# Also, check the automatic setting of output_dots=True when verbose=2.
# It's not too annoying if we also set max_top = 0.
del config['rand_file_name']
config['verbose'] = 2
config['max_top'] = 0
treecorr.corr3(config)
data = np.genfromtxt(config['nnn_file_name'], names=True, skip_header=1)
np.testing.assert_array_equal(data['ntri'], true_ntri.flatten())
assert 'zeta' not in data.dtype.names
# Check a few basic operations with a NNNCorrelation object.
do_pickle(ddd)
ddd2 = ddd.copy()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, 2*ddd.ntri)
np.testing.assert_allclose(ddd2.weight, 2*ddd.weight)
np.testing.assert_allclose(ddd2.meand1, 2*ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, 2*ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, 2*ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, 2*ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, 2*ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, 2*ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, 2*ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, 2*ddd.meanv)
ddd2.clear()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, ddd.ntri)
np.testing.assert_allclose(ddd2.weight, ddd.weight)
np.testing.assert_allclose(ddd2.meand1, ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, ddd.meanv)
ascii_name = 'output/nnn_ascii.txt'
ddd.write(ascii_name, precision=16)
ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd3.read(ascii_name)
np.testing.assert_allclose(ddd3.ntri, ddd.ntri)
np.testing.assert_allclose(ddd3.weight, ddd.weight)
np.testing.assert_allclose(ddd3.meand1, ddd.meand1)
np.testing.assert_allclose(ddd3.meand2, ddd.meand2)
np.testing.assert_allclose(ddd3.meand3, ddd.meand3)
np.testing.assert_allclose(ddd3.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd3.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd3.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd3.meanu, ddd.meanu)
np.testing.assert_allclose(ddd3.meanv, ddd.meanv)
with assert_raises(TypeError):
ddd2 += config
ddd4 = treecorr.NNNCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd4
ddd5 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd5
ddd6 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd6
ddd7 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u-0.1, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd7
ddd8 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u+0.1, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd8
ddd9 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins*2,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd9
ddd10 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v-0.1, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd10
ddd11 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v+0.1, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd11
ddd12 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins*2)
with assert_raises(ValueError):
ddd2 += ddd12
# Check that adding results with different coords or metric emits a warning.
cat2 = treecorr.Catalog(x=x, y=y, z=x)
with CaptureLog() as cl:
ddd13 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd13.process_auto(cat2)
ddd13 += ddd2
print(cl.output)
assert "Detected a change in catalog coordinate systems" in cl.output
with CaptureLog() as cl:
ddd14 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd14.process_auto(cat2, metric='Arc')
ddd14 += ddd2
assert "Detected a change in metric" in cl.output
fits_name = 'output/nnn_fits.fits'
ddd.write(fits_name)
ddd15 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd15.read(fits_name)
np.testing.assert_allclose(ddd15.ntri, ddd.ntri)
np.testing.assert_allclose(ddd15.weight, ddd.weight)
np.testing.assert_allclose(ddd15.meand1, ddd.meand1)
np.testing.assert_allclose(ddd15.meand2, ddd.meand2)
np.testing.assert_allclose(ddd15.meand3, ddd.meand3)
np.testing.assert_allclose(ddd15.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd15.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd15.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd15.meanu, ddd.meanu)
np.testing.assert_allclose(ddd15.meanv, ddd.meanv)
@timer
def test_direct_count_cross():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if brute=True
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3 = treecorr.Catalog(x=x3, y=y3)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_321
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
dddc.process(cat1, cat2, cat3)
#print('true_ntri_123 = ',true_ntri_123)
#print('diff = ',dddc.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_321)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Error to have cat3, but not cat2
with assert_raises(ValueError):
ddd.process(cat1, cat3=cat3)
# Check a few basic operations with a NNCrossCorrelation object.
do_pickle(dddc)
dddc2 = dddc.copy()
dddc2 += dddc
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc2, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.meand1, 2*d1.meand1)
np.testing.assert_allclose(d2.meand2, 2*d1.meand2)
np.testing.assert_allclose(d2.meand3, 2*d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, 2*d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, 2*d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, 2*d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, 2*d1.meanu)
np.testing.assert_allclose(d2.meanv, 2*d1.meanv)
dddc2.clear()
dddc2 += dddc
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc2, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
with assert_raises(TypeError):
dddc2 += {} # not an NNNCrossCorrelation
with assert_raises(TypeError):
dddc2 += ddd # not an NNNCrossCorrelation
dddc4 = treecorr.NNNCrossCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
dddc2 += dddc4 # binning doesn't match
# Test I/O
ascii_name = 'output/nnnc_ascii.txt'
dddc.write(ascii_name, precision=16)
dddc3 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc3.read(ascii_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc3, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
fits_name = 'output/nnnc_fits.fits'
dddc.write(fits_name)
dddc4 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc4.read(fits_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc4, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
try:
import h5py
except ImportError:
print('Skipping hdf5 output file, since h5py not installed.')
return
hdf5_name = 'output/nnnc_hdf5.hdf5'
dddc.write(hdf5_name)
dddc5 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc5.read(hdf5_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc5, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
@timer
def test_direct_count_cross12():
# Check the 1-2 cross correlation
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_122 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_212 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_221 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x2[k])**2 + (y1[i]-y2[k])**2)
djk = np.sqrt((x2[j]-x2[k])**2 + (y2[j]-y2[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x2[k],y2[k])
true_ntri = true_ntri_122
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x2[k],y2[k])
true_ntri = true_ntri_212
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x2[k],y2[k],x1[i],y1[i])
true_ntri = true_ntri_221
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[k],y2[k],x2[j],y2[j])
true_ntri = true_ntri_122
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x2[k],y2[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_212
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x2[k],y2[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_221
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
dddc.process(cat1, cat2)
#print('true_ntri_122 = ',true_ntri_122)
#print('diff = ',dddc.n1n2n3.ntri - true_ntri_122)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Split into patches to test the list-based version of the code.
cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10)
cat2 = treecorr.Catalog(x=x2, y=y2, npatch=10)
ddd.process(cat1, cat2)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
dddc.process(cat1, cat2)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221)
@timer
def test_direct_spherical():
# Repeat in spherical coords
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
bin_size = 0.2
nrbins = 10
nubins = 5
nvbins = 5
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', brute=True)
ddd.process(cat, num_threads=2)
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
rad_min_sep = min_sep * coord.degrees / coord.radians
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2 + (z[k]-z[i])**2)
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/rad_min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / bin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / bin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_spherical.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
@timer
def test_direct_arc():
# Repeat the spherical test with metric='Arc'
ngal = 5
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Large angles this time.
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
max_sep = 180.
nrbins = 50
nubins = 5
nvbins = 5
bin_size = np.log((max_sep / min_sep)) / nrbins
ubin_size = 0.2
vbin_size = 0.2
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', brute=True)
ddd.process(cat, metric='Arc')
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
c = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra, dec)]
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = c[i].distanceTo(c[j]) / coord.degrees
d23 = c[j].distanceTo(c[k]) / coord.degrees
d31 = c[k].distanceTo(c[i]) / coord.degrees
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / ubin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / vbin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_arc.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
@timer
def test_direct_partial():
# Test the two ways to only use parts of a catalog:
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1a = treecorr.Catalog(x=x1, y=y1, first_row=28, last_row=84)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2a = treecorr.Catalog(x=x2, y=y2, first_row=48, last_row=99)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3a = treecorr.Catalog(x=x3, y=y3, first_row=22, last_row=67)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddda = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
ddda.process(cat1a, cat2a, cat3a)
#print('ddda.ntri = ',ddda.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(27,84):
for j in range(47,99):
for k in range(21,67):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_321
assert d1 >= d2 >= d3
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
print('true_ntri = ',true_ntri_sum)
print('diff = ',ddda.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddda.ntri, true_ntri_sum)
# Now with real CrossCorrelation
ddda = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
ddda.process(cat1a, cat2a, cat3a)
#print('132 = ',ddda.n1n3n2.ntri)
#print('true 132 = ',true_ntri_132)
#print('213 = ',ddda.n2n1n3.ntri)
#print('true 213 = ',true_ntri_213)
#print('231 = ',ddda.n2n3n1.ntri)
#print('true 231 = ',true_ntri_231)
#print('311 = ',ddda.n3n1n2.ntri)
#print('true 312 = ',true_ntri_312)
#print('321 = ',ddda.n3n2n1.ntri)
#print('true 321 = ',true_ntri_321)
np.testing.assert_array_equal(ddda.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddda.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddda.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddda.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddda.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddda.n3n2n1.ntri, true_ntri_321)
# Now check that we get the same thing with all the points, but with w=0 for the ones
# we don't want.
w1 = np.zeros(ngal)
w1[27:84] = 1.
w2 = np.zeros(ngal)
w2[47:99] = 1.
w3 = np.zeros(ngal)
w3[21:67] = 1.
cat1b = treecorr.Catalog(x=x1, y=y1, w=w1)
cat2b = treecorr.Catalog(x=x2, y=y2, w=w2)
cat3b = treecorr.Catalog(x=x3, y=y3, w=w3)
dddb = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.ntri = ',dddb.ntri)
#print('diff = ',dddb.ntri - true_ntri_sum)
np.testing.assert_array_equal(dddb.ntri, true_ntri_sum)
dddb = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.n1n2n3.ntri = ',dddb.n1n2n3.ntri)
#print('diff = ',dddb.n1n2n3.ntri - true_ntri)
np.testing.assert_array_equal(dddb.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(dddb.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(dddb.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(dddb.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(dddb.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(dddb.n3n2n1.ntri, true_ntri_321)
@timer
def test_direct_3d_auto():
# This is the same as test_direct_count_auto, but using the 3d correlations
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(312, s, (ngal,) )
y = rng.normal(728, s, (ngal,) )
z = rng.normal(-932, s, (ngal,) )
r = np.sqrt( x*x + y*y + z*z )
dec = np.arcsin(z/r)
ra = np.arctan2(y,x)
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2 + (z[i]-z[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw_3d(x[i],y[i],z[i],x[j],y[j],z[j],x[k],y[k],z[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw_3d(x[j],y[j],z[j],x[i],y[i],z[i],x[k],y[k],z[k])
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw_3d(x[j],y[j],z[j],x[k],y[k],z[k],x[i],y[i],z[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw_3d(x[i],y[i],z[i],x[k],y[k],z[k],x[j],y[j],z[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw_3d(x[k],y[k],z[k],x[i],y[i],z[i],x[j],y[j],z[j])
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw_3d(x[k],y[k],z[k],x[j],y[j],z[j],x[i],y[i],z[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
# Here, we get 6x as much, since each triangle is discovered 6 times.
ddd.clear()
ddd.process(cat,cat,cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, 6*true_ntri)
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
dddc.process(cat,cat,cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri)
# Also compare to using x,y,z rather than ra,dec,r
cat = treecorr.Catalog(x=x, y=y, z=z)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
@timer
def test_direct_3d_cross():
# This is the same as test_direct_count_cross, but using the 3d correlations
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(312, s, (ngal,) )
y1 = rng.normal(728, s, (ngal,) )
z1 = rng.normal(-932, s, (ngal,) )
r1 = np.sqrt( x1*x1 + y1*y1 + z1*z1 )
dec1 = np.arcsin(z1/r1)
ra1 = np.arctan2(y1,x1)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, r=r1, ra_units='rad', dec_units='rad')
x2 = rng.normal(312, s, (ngal,) )
y2 = rng.normal(728, s, (ngal,) )
z2 = rng.normal(-932, s, (ngal,) )
r2 = np.sqrt( x2*x2 + y2*y2 + z2*z2 )
dec2 = np.arcsin(z2/r2)
ra2 = np.arctan2(y2,x2)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, r=r2, ra_units='rad', dec_units='rad')
x3 = rng.normal(312, s, (ngal,) )
y3 = rng.normal(728, s, (ngal,) )
z3 = rng.normal(-932, s, (ngal,) )
r3 = np.sqrt( x3*x3 + y3*y3 + z3*z3 )
dec3 = np.arcsin(z3/r3)
ra3 = np.arctan2(y3,x3)
cat3 = treecorr.Catalog(ra=ra3, dec=dec3, r=r3, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2 + (z2[j]-z3[k])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2 + (z1[i]-z3[k])**2)
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw_3d(x1[i],y1[i],z1[i],x2[j],y2[j],z2[j],x3[k],y3[k],z3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw_3d(x2[j],y2[j],z2[j],x1[i],y1[i],z1[i],x3[k],y3[k],z3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw_3d(x2[j],y2[j],z2[j],x3[k],y3[k],z3[k],x1[i],y1[i],z1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw_3d(x1[i],y1[i],z1[i],x3[k],y3[k],z3[k],x2[j],y2[j],z2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw_3d(x3[k],y3[k],z3[k],x1[i],y1[i],z1[i],x2[j],y2[j],z2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw_3d(x3[k],y3[k],z3[k],x2[j],y2[j],z2[j],x1[i],y1[i],z1[i])
true_ntri = true_ntri_321
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('true_ntri = ',true_ntri_123)
#print('diff = ',ddd.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321)
# Repeat with binslop = 0
ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop = 0: ddd.n1n2n3.ntri = ',ddd.n1n2n3.ntri)
#print('diff = ',ddd.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321)
# And again with no top-level recursion
ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.n1n2n3.ntri = ',ddd.n1n2n3n.ntri)
#print('true_ntri = ',true_ntri_123)
#print('diff = ',ddd.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321)
# Also compare to using x,y,z rather than ra,dec,r
cat1 = treecorr.Catalog(x=x1, y=y1, z=z1)
cat2 = treecorr.Catalog(x=x2, y=y2, z=z2)
cat3 = treecorr.Catalog(x=x3, y=y3, z=z3)
ddd.process(cat1, cat2, cat3)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
| np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132) | numpy.testing.assert_array_equal |
import pdb
import numpy as np
import scipy as sp
import scipy.optimize as op
import util
import matplotlib.pyplot as plt
import time
# Laplace Inference -----------------------------------------------------------
def negLogPosteriorUnNorm(xbar, ybar, C_big, d_big, K_bigInv, xdim, ydim):
xbar = np.ndarray.flatten(np.asarray(xbar))
ybar = np.ndarray.flatten(np.asarray(ybar))
T = int(len(d_big)/ydim)
C_big = np.asarray(C_big)
d_big = np.asarray(d_big)
K_bigInv = np.asarray(K_bigInv)
A = np.dot(C_big.T, xbar) + d_big
Aexp = np.exp(A)
L1 = np.dot(Aexp, np.ones(ydim*T))
L2 = - np.dot(ybar, A.T)
L3 = 0.5*np.dot(xbar,np.dot(K_bigInv,xbar))
L = L1 + L2 + L3
# pdb.set_trace()
return L
def negLogPosteriorUnNorm_grad(xbar, ybar, C_big, d_big, K_bigInv, xdim, ydim):
xbar = np.asarray(xbar)
ybar = np.asarray(ybar)
A = np.dot(C_big.T, xbar) + d_big
A = np.float64(A)
Aexp = np.exp(A)
dL1 = np.dot(Aexp,C_big.T)
dL2 = - np.dot(ybar, C_big.T)
dL3 = np.dot(xbar, K_bigInv)
dL = dL1 + dL2 + dL3
return dL
def negLogPosteriorUnNorm_hess(xbar, ybar, C_big, d_big, K_bigInv, xdim, ydim):
xbar = np.asarray(xbar)
ybar = | np.asarray(ybar) | numpy.asarray |
#!/usr/bin/env python
__author__ = "<NAME>"
__email__ = "mncosta(at)isr(dot)tecnico(dot)ulisboa(dot)pt"
import numpy as np
from sklearn.linear_model import HuberRegressor
import math
from random import randint
import cvxopt as cvx
from RiskPerception.OpticalFlow import getWeightFromOFDistance, calcDistance
from RiskPerception.Objects import getOFWeightFromObjects
from RiskPerception.CONFIG import CVX_SUPRESS_PRINT,\
HUBER_LOSS_EPSILON,\
RANSAC_MINIMUM_DATAPOINTS,\
RANSAC_NUMBER_ITERATIONS, \
RANSAC_MINIMUM_RATIO_INLIERS,\
RANSAC_MINIMUM_ERROR_ANGLE,\
RANSAC_RATIO_INCREASE_ETA,\
ITERATIVE_OBJECT_WEIGHTS_ITERATIONS,\
MAXIMUM_INLIERS_ANGLE,\
EXPONENTIAL_DECAY_NBR_WEIGHTS,\
EXPONENTIAL_DECAY_INITIAL,\
EXPONENTIAL_DECAY_TAU
def l1_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l1-norm optimization problem."""
cvx.solvers.options['show_progress'] = not CVX_SUPRESS_PRINT
# Non-Weighted optimization:
if w_i is None:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(a_i)], [cvx.matrix(b_i)]])
q = cvx.matrix(c_i * -1)
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# Weighted optimization:
else:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(np.multiply(a_i, w_i))],
[cvx.matrix(np.multiply(b_i, w_i))]])
q = cvx.matrix(np.multiply(w_i, c_i * -1))
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# return resulting point
return (x0, y0)
def l2_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l2-norm optimization problem."""
# Non-Weighted optimization:
if w_i is None:
aux1 = -2 * ((np.sum(np.multiply(b_i, b_i))) * (
np.sum(np.multiply(a_i, a_i))) / float(
np.sum(np.multiply(a_i, b_i))) - (np.sum(np.multiply(a_i, b_i))))
aux2 = 2 * ((np.sum(np.multiply(b_i, b_i))) * (
np.sum(np.multiply(a_i, c_i))) / float(
np.sum(np.multiply(a_i, b_i))) - (np.sum(np.multiply(b_i, c_i))))
x0 = aux2 / float(aux1)
y0 = (-(np.sum(np.multiply(a_i, c_i))) - (
np.sum(np.multiply(a_i, a_i))) * x0) / float(
np.sum(np.multiply(a_i, b_i)))
# Weighted optimization:
else:
aux1 = -2 * ((np.sum(np.multiply(np.multiply(b_i, b_i), w_i))) * (
np.sum(np.multiply(np.multiply(a_i, a_i), w_i))) / float(
np.sum(np.multiply(np.multiply(a_i, b_i), w_i))) - (
np.sum(np.multiply(np.multiply(a_i, b_i), w_i))))
aux2 = 2 * ((np.sum(np.multiply(np.multiply(b_i, b_i), w_i))) * (
np.sum(np.multiply(np.multiply(a_i, c_i), w_i))) / float(
np.sum(np.multiply(np.multiply(a_i, b_i), w_i))) - (
np.sum(np.multiply(np.multiply(b_i, c_i), w_i))))
x0 = aux2 / float(aux1)
y0 = (-(np.sum(np.multiply(np.multiply(a_i, c_i), w_i))) - (
np.sum(np.multiply(np.multiply(a_i, a_i), w_i))) * x0) / float(
np.sum(np.multiply(np.multiply(a_i, b_i), w_i)))
# return resulting point
return (x0, y0)
def huber_loss_optimization(a_i, b_i, c_i, w_i=None):
"""Solve Huber loss optimization problem."""
for k in range(5):
try:
# Non-Weighted optimization:
if w_i is None:
huber = HuberRegressor(fit_intercept=True, alpha=0.0,
max_iter=100, epsilon=HUBER_LOSS_EPSILON)
X = -1 * np.concatenate(
(a_i.reshape(a_i.shape[0], 1),
b_i.reshape(b_i.shape[0], 1)), axis=1)
y = c_i
huber.fit(X, y)
# Get results
x0, y0 = huber.coef_ + np.array([0., 1.]) * huber.intercept_
# Weighted optimization:
else:
huber = HuberRegressor(fit_intercept=True, alpha=0.0,
max_iter=100, epsilon=HUBER_LOSS_EPSILON)
X = -1 * np.concatenate(
(a_i.reshape(a_i.shape[0], 1),
b_i.reshape(b_i.shape[0], 1)), axis=1)
y = c_i
sampleWeight = w_i
huber.fit(X, y, sample_weight=sampleWeight)
# Get results
x0, y0 = huber.coef_ + np.array([0., 1.]) * huber.intercept_
except ValueError:
pass
else:
# return resulting point
return x0, y0
else:
return None, None
def select_subset(OFVectors):
"""Select a subset of a given set."""
subset = np.array([]).reshape(0, 4)
for i in range(RANSAC_MINIMUM_DATAPOINTS):
idx = randint(0, (OFVectors.shape)[0] - 1)
subset = np.vstack((subset, np.array([OFVectors[idx]])))
return subset
def fit_model(subset):
"""Return a solution for a given subset of points."""
# Initialize some empty variables
a_i = np.array([])
b_i = np.array([])
c_i = np.array([])
# Save the lines coeficients of the form a*x + b*y + c = 0 to the variables
for i in range(subset.shape[0]):
a1, b1, c1, d1 = subset[i]
pt1 = (a1, b1)
# So we don't divide by zero
if (a1 - c1) == 0:
continue
a = float(b1 - d1) / float(a1 - c1)
b = -1
c = (b1) - a * a1
denominator = float(a ** 2 + 1)
a_i = np.append(a_i, a / denominator)
b_i = np.append(b_i, b / denominator)
c_i = np.append(c_i, c / denominator)
# Solve a optimization problem with Minimum Square distance as a metric
(x0, y0) = l2_norm_optimization(a_i, b_i, c_i)
# Return FOE
return (x0, y0)
def get_intersect_point(a1, b1, c1, d1, x0, y0):
"""Get the point on the lines that passes through (a1,b1) and (c1,d1) and s closest to the point (x0,y0)."""
a = 0
if (a1 - c1) != 0:
a = float(b1 - d1) / float(a1 - c1)
c = b1 - a * a1
# Compute the line perpendicular to the line of the OF vector that passes throught (x0,y0)
a_aux = 0
if a != 0:
a_aux = -1 / a
c_aux = y0 - a_aux * x0
# Get intersection of the two lines
x1 = (c_aux - c) / (a - a_aux)
y1 = a_aux * x1 + c_aux
return (x1, y1)
def find_angle_between_lines(x0, y0, a1, b1, c1, d1):
"""Finds the angle between two lines."""
# Line 1 : line that passes through (x0,y0) and (a1,b1)
# Line 2 : line that passes through (c1,d1) and (a1,b1)
angle1 = 0
angle2 = 0
if (a1 - x0) != 0:
angle1 = float(b1 - y0) / float(a1 - x0)
if (a1 - c1) != 0:
angle2 = float(b1 - d1) / float(a1 - c1)
# Get angle in degrees
angle1 = math.degrees(math.atan(angle1))
angle2 = math.degrees(math.atan(angle2))
ang_diff = angle1 - angle2
# Find angle in the interval [0,180]
if math.fabs(ang_diff) > 180:
ang_diff = ang_diff - 180
# Return angle between the two lines
return ang_diff
def find_inliers_outliers(x0, y0, OFVectors):
"""Find set of inliers and outliers of a given set of optical flow vectors and the estimated FOE."""
# Initialize some varaiables
inliers = np.array([])
nbr_inlier = 0
# Find inliers with the angle method
# For each vector
for i in range((OFVectors.shape)[0]):
a1, b1, c1, d1 = OFVectors[i]
# Find the angle between the line that passes through (x0,y0) and (a1,b1) and the line that passes through (c1,d1) and (a1,b1)
ang_diff = find_angle_between_lines((x0, y0), (a1, b1, c1, d1))
# If the angle is below a certain treshold consider it a inlier
if -RANSAC_MINIMUM_ERROR_ANGLE < ang_diff < RANSAC_MINIMUM_ERROR_ANGLE:
# Increment number of inliers and add save it
nbr_inlier += 1
inliers = np.append(inliers, i)
# Compute the ratio of inliers to overall number of optical flow vectors
ratioInliersOutliers = float(nbr_inlier) / (OFVectors.shape)[0]
# Return set of inliers and ratio of inliers to overall set
return inliers, ratioInliersOutliers
def RANSAC(OFVectors):
"""Estimate the FOE of a set of optical flow (OF) vectors using RANSAC."""
# Initialize some variables
savedRatio = 0
FOE = (0, 0)
inliersModel = np.array([])
# Repeat iterations for a number of times
for i in range(RANSAC_NUMBER_ITERATIONS):
# Randomly initial select OF vectors
subset = select_subset(OFVectors)
# Estimate a FOE for the set of OF vectors
(x0, y0) = fit_model(subset)
# Find the inliers of the set for the estimated FOE
inliers, ratioInliersOutliers = find_inliers_outliers((x0, y0), OFVectors)
# If ratio of inliers is bigger than the previous iterations, save current solution
if savedRatio < ratioInliersOutliers:
savedRatio = ratioInliersOutliers
inliersModel = inliers
FOE = (x0, y0)
# If ratio is acceptable, stop iterating and return the found solution
if savedRatio > RANSAC_MINIMUM_RATIO_INLIERS and RANSAC_MINIMUM_RATIO_INLIERS != 0:
break
# Return the estimated FOE, the found inliers ratio and the set of inliers
return FOE, savedRatio, inliersModel
def RANSAC_ImprovedModel(OFVectors):
"""Estimate the FOE of a set of optical flow (OF) vectors using a form of RANSAC method."""
# Initialize some variables
FOE = (0, 0)
savedRatio = 0
inliersModel = np.array([])
# Repeat iterations for a number of times
for i in range(RANSAC_NUMBER_ITERATIONS):
# Randomly select initial OF vectors
subset = select_subset(OFVectors)
# Estimate a FOE for the set of OF vectors
(x0, y0) = fit_model(subset)
# Find the inliers of the set for the estimated FOE
inliers, ratioInliersOutliers = find_inliers_outliers((x0, y0),
OFVectors)
# Initialize some varaibles
iter = 0
ratioInliersOutliers_old = 0
# While the ratio of inliers keeps on increasing
while ((inliers.shape)[
0] != 0 and ratioInliersOutliers - ratioInliersOutliers_old > RANSAC_RATIO_INCREASE_ETA):
# Repeat iterations for a number of times
if iter > RANSAC_NUMBER_ITERATIONS:
break
iter += 1
# Select a new set of OF vectors that are inliers tot he estimated FOE
for i in range((inliers.shape)[0]):
subset = np.vstack(
(subset, np.array([OFVectors[int(inliers[i])]])))
# Estimate a FOE for the new set of OF vectors
(x0, y0) = fit_model(subset)
# Save the previous iteration ratio if inliers
ratioInliersOutliers_old = ratioInliersOutliers
# Find the inliers of the set for the estimated FOE
inliers, ratioInliersOutliers = find_inliers_outliers((x0, y0),
OFVectors)
# If ratio of inliers is bigger than the previous iterations, save current solution
if savedRatio < ratioInliersOutliers:
savedRatio = ratioInliersOutliers
inliersModel = inliers
FOE = (x0, y0)
# If ratio is acceptable, stop iterating and return the found solution
if savedRatio > RANSAC_MINIMUM_RATIO_INLIERS and RANSAC_MINIMUM_RATIO_INLIERS != 0:
break
# If ratio is acceptable, stop iterating and return the found solution
if savedRatio > RANSAC_MINIMUM_RATIO_INLIERS and RANSAC_MINIMUM_RATIO_INLIERS != 0:
break
# Return the estimated FOE, the found inliers ratio and the set of inliers
return FOE, savedRatio, inliersModel
def vectorOFRightDirection(OFvect, FOE):
"""Returns True if OF vector is pointing away from the FOE, False otherwise."""
# Get points of optical flow vector
a1, b1, c1, d1 = OFvect
# If left side of FOE
if a1 <= FOE[0]:
if c1 <= a1:
return False
# If right side of FOE
else:
if c1 >= a1:
return False
return True
def improveOFObjectsWeights(OF, objects, framenbr, FOE, currResult,
dist_intervals=None, dist_avg_int=None,
dist_max_int=None):
"""Iteratively check weights coming from objects."""
staticObjects = objects[objects[:, 0] == str(framenbr)].copy()
staticObjects[:, 6] = 0
a_i = np.array([])
b_i = np.array([])
c_i = np.array([])
w_i = np.array([])
(x0, y0) = currResult
for i in range((OF.shape)[0]):
a1, b1, c1, d1 = OF[i]
# So we don't divide by zero
if (a1 - c1) == 0:
continue
a = float(b1 - d1) / float(a1 - c1)
b = -1
c = (b1) - a*a1
lengthLine = math.sqrt((a1-c1)**2 + (b1-d1)**2)
distToFOE = calcDistance((a1, b1), FOE)
for j in range(dist_intervals.shape[0] - 1):
if dist_intervals[j] < distToFOE < dist_intervals[j + 1]:
break
distance_weight = (
getWeightFromOFDistance((lengthLine), (dist_avg_int[j]),
(dist_max_int[j])))
if getOFWeightFromObjects(objects, (a1, b1), framenbr) != 0:
for object in staticObjects:
if (float(object[1]) <= float(a1) <= float(object[3])) and (
float(object[2]) <= float(b1) <= float(object[4])):
if (-MAXIMUM_INLIERS_ANGLE <
find_angle_between_lines((x0,y0), (a1, b1, c1,d1)) <
MAXIMUM_INLIERS_ANGLE) and \
vectorOFRightDirection((a1, b1, c1, d1), FOE):
object_weight = 1
object[6] = str(float(object[6]) + 1)
else:
object_weight = 0
object[6] = str(float(object[6]) - 1)
else:
object_weight = 1
weight = distance_weight * object_weight
denominator = float(a ** 2 + 1)
a_i = np.append(a_i, a / denominator)
b_i = np.append(b_i, b / denominator)
c_i = np.append(c_i, c / denominator)
w_i = np.append(w_i, [weight])
return a_i, b_i, c_i, w_i, staticObjects
def iterative_improve_on_object_weights(optimization_method, OF, objects,
framenbr, FOE, curr_result,
dist_intervals, dist_avg_int,
dist_max_int):
for i in range(ITERATIVE_OBJECT_WEIGHTS_ITERATIONS):
a_i, b_i, c_i, w_i, staticObjects = \
improveOFObjectsWeights(OF,
objects,
framenbr,
FOE,
curr_result,
dist_intervals=dist_intervals,
dist_avg_int=dist_avg_int,
dist_max_int=dist_max_int)
(x0, y0) = optimization_method(a_i, b_i, c_i, w_i)
if x0 is None and y0 is None:
return curr_result
return (x0, y0)
def negative_exponential_decay(x, initial = None, tau = None):
"""Returns the value of a negative exponential [f(x) = No * e^-(t*x) ]."""
if initial is None:
initial = EXPONENTIAL_DECAY_INITIAL
if tau is None:
tau = EXPONENTIAL_DECAY_TAU
return initial * math.exp(-1 * tau * x)
def generate_weights(nbr_weights):
"""Generate negative exponential weights."""
weights = np.array([])
for i in range(nbr_weights):
weights = np.append(weights, negative_exponential_decay(i))
return weights
def points_history(points, newPoint):
"""Refresh the points history. Delete the oldest point and add the new point."""
points = np.delete(points, (points.shape)[1] - 1, axis=1)
points = | np.insert(points, 0, newPoint, axis=1) | numpy.insert |
from collections import defaultdict
import json
import math
import enum
from time import sleep
import networkx as nx
import numpy as np
from typing import Dict, List, Set, Tuple
from networkx.algorithms.dag import descendants
from numpy import ndarray, vectorize
def sigmoidal(x: float):
# print(x)
if (x < -4):
x = -4
elif x > 4:
x = 4
return 1 / (1 + math.exp(-4.9*x))
class NodeLocation:
x: int
y: int
z: int
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __str__(self):
return str((self.x, self.y, self.z))
def __repr__(self) -> str:
return str((self.x, self.y, self.z))
def __hash__(self) -> int:
return ((self.x, self.y, self.z)).__hash__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
class ConnectionLocation:
x1: int
y1: int
z1: int
x2: int
y2: int
z2: int
weight: float
def __init__(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, weight: float):
self.x1 = x1
self.y1 = y1
self.z1 = z1
self.x2 = x2
self.y2 = y2
self.z2 = z2
self.weight = weight
def getConnectionIndex(source: NodeLocation, target: NodeLocation):
if source.z == 0:
return 0
if source.z == 1 and target.z == 2:
return 1
if source.z == 1 and target.z == 6:
return 2
if source.z == 2 and target.z == 6:
return 3
if source.z == 2 and target.z == 3:
return 4
if source.z == 3 and target.z == 4:
return 5
if source.z == 3 and target.z == 6:
return 6
if source.z == 4 and target.z == 5:
return 7
if source.z == 4 and target.z == 6:
return 8
if source.z == 5 and target.z == 6:
return 9
if source.z == 2 and target.z == 2:
return 10
if source.z == 3 and target.z == 3:
return 11
if source.z == 4 and target.z == 4:
return 12
if source.z == 5 and target.z == 5:
return 13
else:
print("test???")
# Identify input, hidden and output nodes
def constructNetwork(nodes: List[NodeLocation], connections: List[ConnectionLocation], layerShapes: List[List[int]], bias: NodeLocation = None):
# computationOrder: List[NodeLocation] = list()
# ndarray()
inputNodes = list(filter(lambda n: n.z == 0, nodes))
# if bias is not None:
# inputNodes.append(bias)
outputNodes = list(filter(lambda n: n.z == 3, nodes))
print("Node values initializing...")
print("input nodes:")
print(len(inputNodes))
print("outputnodes")
print(len(outputNodes))
# for node in nodes:
# nodeValuePre[node] = 0
# nodeValuePost[node] = 0
print("constructing graph representation")
# data = list(map(lambda c: (NodeLocation(c.x1, c.y1, c.z1),
# NodeLocation(c.x2, c.y2, c.z2), c.weight), connections))
print("construct graph")
connection = [np.zeros(layerShapes[1] + layerShapes[0]),
np.zeros(layerShapes[2] + layerShapes[1]),
| np.zeros(layerShapes[6] + layerShapes[1]) | numpy.zeros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.