code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import numpy, array #,rpy2
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import least_squares
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
#import notch
from numpy.fft import fft, fftfreq
# We know/can calculate frequency peak, use this to guess where picks will be.
# maybe have a sliding window that reports peak values.
def peakPicker(data, omega, dt):
# compute window based on omega and dt
# make sure you are not aliased, grab every other peak
window = (2*numpy.pi) / (omega*dt)
data = numpy.array(data)
peaks = []
troughs = []
times = []
times2 = []
indices = []
ws = 0
we = window
ii = 0
for i in range((int)(len(data)/window)):
# initially was just returning this I think avg is better
#times.append( (ws + numpy.abs(data[ws:we]).argmax()) * dt )
peaks.append(numpy.max(data[ws:we]))
times.append( (ws + data[ws:we].argmax()) * dt )
indices.append( ii + data[ws:we].argmax() )
troughs.append(numpy.min(data[ws:we]))
times2.append( (ws + (data[ws:we]).argmin()) * dt )
indices.append( ii + data[ws:we].argmin() )
ws += window
we += window
ii += (int)(we-ws)
#return numpy.array(peaks), numpy.array(times)
# Averaging peaks does a good job of removing bias in noise
return (numpy.array(peaks)-numpy.array(troughs))/2., \
(numpy.array(times)+numpy.array(times2))/2., \
indices
#################################################
# Regress for T2 using rpy2 interface
def regressCurve(peaks,times,sigma2=1,intercept=True):
# TODO, if regression fails, it might be because there is no exponential
# term, maybe do a second regression then on a linear model.
b1 = 0 # Bias
b2 = 0 # Linear
rT2 = 0.3 # T2 regressed
r = robjects.r
# Variable shared between R and Python
robjects.globalenv['b1'] = b1
robjects.globalenv['b2'] = b2
robjects.globalenv['rT2'] = rT2
robjects.globalenv['sigma2'] = sigma2
value = robjects.FloatVector(peaks)
times = robjects.FloatVector(numpy.array(times))
# my_weights = robjects.RVector(value/sigma2)
# robjects.globalenv['my_weigts'] = my_weights
# if sigma2 != 0:
# print "weighting"
# tw = numpy.array(peaks)/sigma2
# my_weights = robjects.RVector( tw/numpy.max(tw) )
# else:
# my_weights = robjects.RVector(numpy.ones(len(peaks)))
# robjects.globalenv['my_weights'] = my_weights
if (intercept):
my_list = robjects.r('list(b1=50, b2=1e2, rT2=0.03)')
my_lower = robjects.r('list(b1=0, b2=0, rT2=.005)')
my_upper = robjects.r('list(b1=20000, b2=2000, rT2=.700)')
else:
my_list = robjects.r('list(b2=1e2, rT2=0.3)')
my_lower = robjects.r('list(b2=0, rT2=.005)')
my_upper = robjects.r('list(b2=2000, rT2=.700)')
my_cont = robjects.r('nls.control(maxiter=1000, warnOnly=TRUE, printEval=FALSE)')
if (intercept):
#fmla = robjects.RFormula('value ~ b1 + exp(-times/rT2)')
fmla = robjects.Formula('value ~ b1 + b2*exp(-times/rT2)')
#fmla = robjects.RFormula('value ~ b1 + b2*times + exp(-times/rT2)')
else:
fmla = robjects.Formula('value ~ b2*exp(-times/rT2)')
env = fmla.getenvironment()
env['value'] = value
env['times'] = times
# ugly, but I get errors with everything else I've tried
my_weights = robjects.r('rep(1,length(value))')
for ii in range(len(my_weights)):
my_weights[ii] *= peaks[ii]/sigma2
Error = False
#fit = robjects.r.nls(fmla,start=my_list,control=my_cont,weights=my_weights)
if (sigma2 != 1):
print("SIGMA 2")
#fit = robjects.r.tryCatch(robjects.r.suppressWarnings(robjects.r.nls(fmla,start=my_list,control=my_cont,algorithm="port", \
# weights=my_weights)), 'silent=TRUE')
fit = robjects.r.tryCatch(robjects.r.nls(fmla,start=my_list,control=my_cont))#, \
# weights=my_weights))
else:
try:
fit = robjects.r.tryCatch(robjects.r.nls(fmla,start=my_list,control=my_cont,algorithm="port"))#,lower=my_lower,upper=my_upper))
except:
print("regression issue pass")
Error = True
# If failure fall back on zero regression values
if not Error:
#Error = fit[3][0]
report = r.summary(fit)
b1 = 0
b2 = 0
rT2 = 1
if (intercept):
if not Error:
b1 = r['$'](report,'par')[0]
b2 = r['$'](report,'par')[1]
rT2 = r['$'](report,'par')[2]
#print report
#print r['$'](report,'convergence')
#print r['convergence'] #(report,'convergence')
#print r['$'](report,'par')[13]
#print r['$'](report,'par')[14]
else:
print("ERROR DETECTED, regressed values set to default")
b1 = 1e1
b2 = 1e-2
rT2 = 1e-2
#print r['$'](report,'par')[0]
#print r['$'](report,'par')[1]
#print r['$'](report,'par')[2]
return [b1,b2,rT2]
else:
if not Error:
rT2 = r['$'](report,'par')[1]
b2 = r['$'](report,'par')[0]
else:
print("ERROR DETECTED, regressed values set to default")
return [b2, rT2]
#################################################
# Regress for T2 using rpy2 interface
def regressCurve2(peaks,times,sigma2=[None],intercept=True):
if sigma2[0] != None:
my_weights = robjects.FloatVector( sigma2 )
# TODO, if regression fails, it might be because there is no exponential
# term, maybe do a second regression then on a linear model.
b1 = 0 # Bias
b2 = 0 # Linear
bb2 = 0 # Linear
rT2 = 0.3 # T2 regressed
rrT2 = 1.3 # T2 regressed
r = robjects.r
# Variable shared between R and Python
robjects.globalenv['b1'] = b1
robjects.globalenv['b2'] = b2
robjects.globalenv['rT2'] = rT2
robjects.globalenv['bb2'] = b2
robjects.globalenv['rrT2'] = rT2
#robjects.globalenv['sigma2'] = sigma2
value = robjects.FloatVector(peaks)
times = robjects.FloatVector(numpy.array(times))
if (intercept):
my_list = robjects.r('list(b1=.50, b2=1e2, rT2=0.03, bb2=1e1, rrT2=1.3)')
my_lower = robjects.r('list(b1=0, b2=0, rT2=.005, bb2=0, rrT2=.005 )')
my_upper = robjects.r('list(b1=2000, b2=2000, rT2=.700, bb2=2000, rrT2=1.3 )')
else:
my_list = robjects.r('list(b2=.5, rT2=0.3, bb2=.5, rrT2=1.3)')
my_lower = robjects.r('list(b2=0, rT2=.005, bb2=0, rrT2=.005)')
my_upper = robjects.r('list(b2=1, rT2=2.6, bb2=1, rrT2=2.6)')
my_cont = robjects.r('nls.control(maxiter=1000, warnOnly=TRUE, printEval=FALSE)')
if (intercept):
#fmla = robjects.RFormula('value ~ b1 + exp(-times/rT2)')
fmla = robjects.Formula('value ~ b1 + b2*exp(-times/rT2) + bb2*exp(-times/rrT2)')
#fmla = robjects.RFormula('value ~ b1 + b2*times + exp(-times/rT2)')
else:
fmla = robjects.Formula('value ~ b2*exp(-times/rT2) + bb2*exp(-times/rrT2)')
env = fmla.getenvironment()
env['value'] = value
env['times'] = times
# ugly, but I get errors with everything else I've tried
Error = False
#fit = robjects.r.nls(fmla,start=my_list,control=my_cont,weights=my_weights)
if (sigma2[0] != None):
#print("SIGMA 2")
#fit = robjects.r.tryCatch(robjects.r.suppressWarnings(robjects.r.nls(fmla,start=my_list,control=my_cont,algorithm="port", \
# weights=my_weights)), 'silent=TRUE')
fit = robjects.r.tryCatch(robjects.r.nls(fmla,start=my_list,control=my_cont,algorithm='port',weights=my_weights,lower=my_lower,upper=my_upper))#, \
# weights=my_weights))
else:
try:
fit = robjects.r.tryCatch(robjects.r.nls(fmla,start=my_list,control=my_cont,algorithm="port"))#,lower=my_lower,upper=my_upper))
except:
print("regression issue pass")
Error = True
# If failure fall back on zero regression values
if not Error:
#Error = fit[3][0]
report = r.summary(fit)
b1 = 0
b2 = 0
rT2 = 1
if (intercept):
if not Error:
b1 = r['$'](report,'par')[0]
b2 = r['$'](report,'par')[1]
rT2 = r['$'](report,'par')[2]
#print report
#print r['$'](report,'convergence')
#print r['convergence'] #(report,'convergence')
#print r['$'](report,'par')[13]
#print r['$'](report,'par')[14]
else:
print("ERROR DETECTED, regressed values set to default")
b1 = 1e1
b2 = 1e-2
rT2 = 1e-2
#print r['$'](report,'par')[0]
#print r['$'](report,'par')[1]
#print r['$'](report,'par')[2]
return [b1,b2,rT2, bb2, rrT2]
else:
if not Error:
rT2 = r['$'](report,'par')[1]
b2 = r['$'](report,'par')[0]
rrT2 = r['$'](report,'par')[3]
bb2 = r['$'](report,'par')[2]
else:
print("ERROR DETECTED, regressed values set to default")
return [b2, rT2, bb2, rrT2]
def fun(x, t, y):
""" Cost function for regression, single exponential, no DC term
x[0] = A0
x[1] = zeta
x[2] = df
x[3] = T2
"""
# concatenated real and imaginary parts
pre = np.concatenate((-x[0]*np.sin(2.*np.pi*x[2]*t + x[1])*np.exp(-t/x[3]), \
+x[0]*np.cos(2.*np.pi*x[2]*t + x[1])*np.exp(-t/x[3])))
return y-pre
def fun2(x, t, y):
""" Cost function for regression, single exponential, no DC term
x[0] = A0
x[1] = zeta
x[2] = T2
"""
# concatenated real and imaginary parts
pre = np.concatenate((x[0]*np.cos(x[1])*np.exp(-t/x[2]), \
-1.*x[0]*np.sin(x[1])*np.exp(-t/x[2])))
return y-pre
def quadratureDetect2(X, Y, tt, x0="None"):
""" Pure python quadrature detection using Scipy.
X = real part of NMR signal
Y = imaginary component of NMR signal
tt = time
"""
print("Pure Python Quad Det", "TODO look at loss functions and method")
# Loss functions, linear, soft_l1, huber, cauchy, arctan
# df
loss = 'cauchy' # 'soft_l1'
method = 'trf' # trf, dogbox, lm
if x0=="None":
x0 = np.array( [1., 0., 0., .2] ) # A0, zeta, df, T2
res_lsq = least_squares(fun, x0, args=(tt, np.concatenate((X, Y))), loss=loss, f_scale=1.0,\
bounds=( [1., -np.pi, -5, .005] , [1000., np.pi, 5, .800] ),
method=method
)
x = res_lsq.x
print ("df", x[0], x[1], x[2], x[3])
else:
res_lsq = least_squares(fun, x0, args=(tt, np.concatenate((X, Y))), loss=loss, f_scale=1.0,\
bounds=( [1., -np.pi, -5, .005] , [1000., np.pi, 5, .800] ),
method=method
)
#bounds=( [0., 0, -20, .0] , [1., np.pi, 20, .6] ))
x = res_lsq.x
return res_lsq.success, x[0], x[2], x[1], x[3]
# no df
#x = np.array( [1., 0., 0.2] )
#res_lsq = least_squares(fun2, x, args=(tt, np.concatenate((X, Y))), loss='soft_l1', f_scale=0.1)
#x = res_lsq.x
#return conv, E0,df,phi,T2
#return res_lsq.success, x[0], 0, x[1], x[2]
def quadratureDetect(X, Y, tt, CorrectFreq=False, BiExp=False, CorrectDC=False):
r = robjects.r
if CorrectDC:
robjects.r('''
Xc1 <- function(E01, df, tt, phi, T2_1, DC) {
DC + E01*cos(2*pi*df*tt + phi) * exp(-tt/T2_1)
}
Yc1 <- function(E01, df, tt, phi, T2_1, DC) {
DC - E01*sin(2*pi*df*tt + phi) * exp(-tt/T2_1)
}
''')
else:
robjects.r('''
Xc1 <- function(E01, df, tt, phi, T2_1) {
E01*cos(2*pi*df*tt + phi) * exp(-tt/T2_1)
}
Yc1 <- function(E01, df, tt, phi, T2_1) {
-E01*sin(2*pi*df*tt + phi) * exp(-tt/T2_1)
}
''')
# bi-exponential
if CorrectDC:
robjects.r('''
Xc2 <- function(E01, E02, df, tt, phi, T2_1, T2_2, DC) {
DC + E01*cos(2*pi*df*tt + phi) * exp(-tt/T2_1) +
DC + E02*cos(2*pi*df*tt + phi) * exp(-tt/T2_2)
}
Yc2 <- function(E01, E02, df, tt, phi, T2_1, T2_2, DC) {
DC - E01*sin(2*pi*df*tt + phi) * exp(-tt/T2_1) +
DC - E02*sin(2*pi*df*tt + phi) * exp(-tt/T2_2)
}
''')
else:
robjects.r('''
Xc2 <- function(E01, E02, df, tt, phi, T2_1, T2_2) {
E01*cos(2*pi*df*tt + phi) * exp(-tt/T2_1) +
E02*cos(2*pi*df*tt + phi) * exp(-tt/T2_2)
}
Yc2 <- function(E01, E02, df, tt, phi, T2_1, T2_2) {
-E01*sin(2*pi*df*tt + phi) * exp(-tt/T2_1) +
-E02*sin(2*pi*df*tt + phi) * exp(-tt/T2_2)
}
''')
# Make 0 vector
Zero = robjects.FloatVector(numpy.zeros(len(X)))
# Fitted Parameters
E01 = 0.
E02 = 0.
df = 0.
phi = 0.
T2_1 = 0.
T2_2 = 0.
DC = 0.
robjects.globalenv['DC'] = DC
robjects.globalenv['E01'] = E01
robjects.globalenv['E02'] = E02
robjects.globalenv['df'] = df
robjects.globalenv['phi'] = phi
robjects.globalenv['T2_1'] = T2_1
robjects.globalenv['T2_2'] = T2_2
XY = robjects.FloatVector(numpy.concatenate((X,Y)))
# Arrays
tt = robjects.FloatVector(numpy.array(tt))
X = robjects.FloatVector(numpy.array(X))
Y = robjects.FloatVector(numpy.array(Y))
Zero = robjects.FloatVector(numpy.array(Zero))
if BiExp:
if CorrectDC:
fmla = robjects.Formula('XY ~ c(Xc2( E01, E02, df, tt, phi, T2_1, T2_2, DC ), Yc2( E01, E02, df, tt, phi, T2_1, T2_2, DC ))')
if CorrectFreq:
start = robjects.r('list(E01=.100, E02=.01, df=0, phi=0. , T2_1=.100, T2_2=.01, DC=0.0)')
lower = robjects.r('list(E01=1e-6, E02=1e-6, df=-50, phi=-3.14 , T2_1=.001, T2_2=.001, DC=0.0)')
upper = robjects.r('list(E01=1.00, E02=1.0, df=50, phi=3.14 , T2_1=.800, T2_2=.8, DC=0.5)')
else:
start = robjects.r('list(E01=.100, E02=.01, phi=0.9 , T2_1=.100, T2_2=.01, DC=0.0)')
lower = robjects.r('list(E01=1e-6, E02=1e-6, phi=-3.14 , T2_1=.001, T2_2=.001, DC=0.0)')
upper = robjects.r('list(E01=1.00, E02=1.0, phi=3.14 , T2_1=.800, T2_2=.8, DC=0.5)')
else:
fmla = robjects.Formula('XY ~ c(Xc2( E01, E02, df, tt, phi, T2_1, T2_2 ), Yc2( E01, E02, df, tt, phi, T2_1, T2_2))')
if CorrectFreq:
start = robjects.r('list(E01=.100, E02=.01, df=0, phi=0. , T2_1=.100, T2_2=.01)')
lower = robjects.r('list(E01=1e-6, E02=1e-6, df=-50, phi=-3.14 , T2_1=.001, T2_2=.001)')
upper = robjects.r('list(E01=1.00, E02=1.0, df=50, phi=3.14 , T2_1=.800, T2_2=.8)')
else:
start = robjects.r('list(E01=.100, E02=.01, phi=0.9 , T2_1=.100, T2_2=.01)')
lower = robjects.r('list(E01=1e-6, E02=1e-6, phi=-3.14 , T2_1=.001, T2_2=.001)')
upper = robjects.r('list(E01=1.00, E02=1.0, phi=3.14 , T2_1=.800, T2_2=.8)')
else:
if CorrectDC:
fmla = robjects.Formula('XY ~ c(Xc1( E01, df, tt, phi, T2_1, DC), Yc1( E01, df, tt, phi, T2_1,DC))')
if CorrectFreq:
start = robjects.r('list(E01=.100, df=0 , phi=0. , T2_1=.100, DC=0.0)')
lower = robjects.r('list(E01=1e-6, df=-50., phi=-3.14, T2_1=.001, DC=0.0)')
upper = robjects.r('list(E01=1.00, df=50. , phi=3.14 , T2_1=.800, DC=0.5)')
else:
start = robjects.r('list(E01=.100, phi= 0. , T2_1=.100, DC=0.0)')
lower = robjects.r('list(E01=1e-6, phi=-3.13, T2_1=.001, DC=0.0)')
upper = robjects.r('list(E01=1.00, phi= 3.13, T2_1=.800, DC=0.5)')
else:
fmla = robjects.Formula('XY ~ c(Xc1( E01, df, tt, phi, T2_1), Yc1( E01, df, tt, phi, T2_1))')
if CorrectFreq:
start = robjects.r('list(E01=.100, df=0 , phi=0. , T2_1=.100)')
lower = robjects.r('list(E01=1e-6, df=-50. , phi=-3.14 , T2_1=.001)')
upper = robjects.r('list(E01=1.00, df=50. , phi=3.14 , T2_1=.800)')
else:
start = robjects.r('list(E01=.100, phi= 0. , T2_1=.100)')
lower = robjects.r('list(E01=1e-6, phi=-3.13, T2_1=.001)')
upper = robjects.r('list(E01=1.00, phi= 3.13, T2_1=.800)')
env = fmla.getenvironment()
env['Zero'] = Zero
env['X'] = X
env['Y'] = Y
env['XY'] = XY
env['tt'] = tt
cont = robjects.r('nls.control(maxiter=10000, warnOnly=TRUE, printEval=FALSE)')
fit = robjects.r.tryCatch(robjects.r.nls(fmla, start=start, control=cont, lower=lower, upper=upper, algorithm='port')) #, \
#fit = robjects.r.tryCatch(robjects.r.nls(fmla, start=start, control=cont)) #, \
report = r.summary(fit)
conv = r['$'](fit,'convergence')[0]
#if conv:
# print (report)
# print ("conv", conv)
print ("Conv", r['$'](fit,'convergence')) # T2
print (report)
if BiExp:
if CorrectFreq:
E0 = r['$'](report,'par')[0] # E01
E0 += r['$'](report,'par')[1] # E02
df = r['$'](report,'par')[2] # offset
phi = r['$'](report,'par')[3] # phase
T2 = r['$'](report,'par')[4] # T2
else:
E0 = r['$'](report,'par')[0] # E01
E0 += r['$'](report,'par')[1] # E02
phi = r['$'](report,'par')[2] # phase
T2 = r['$'](report,'par')[3] # T2
else:
if CorrectFreq:
E0 = r['$'](report,'par')[0] # E01
df = r['$'](report,'par')[1] # offset
phi = r['$'](report,'par')[2] # phase
T2 = r['$'](report,'par')[3] # T2
else:
E0 = r['$'](report,'par')[0] # E01
phi = r['$'](report,'par')[1] # phase
T2 = r['$'](report,'par')[2] # T2
#phi = 0.907655876627
#phi = 0
#print ("df", df)# = 0
return conv, E0,df,phi,T2
#################################################
# Regress for T2 using rpy2 interface
def regressSpec(w, wL, X): #,sigma2=1,intercept=True):
# compute s
s = -1j*w
# TODO, if regression fails, it might be because there is no exponential
# term, maybe do a second regression then on a linear model.
a = 0 # Linear
rT2 = 0.1 # T2 regressed
r = robjects.r
# Variable shared between R and Python
robjects.globalenv['a'] = a
robjects.globalenv['rT2'] = rT2
robjects.globalenv['wL'] = wL
robjects.globalenv['nb'] = 0
s = robjects.ComplexVector(numpy.array(s))
XX = robjects.ComplexVector(X)
Xr = robjects.FloatVector(numpy.real(X))
Xi = robjects.FloatVector(numpy.imag(X))
Xa = robjects.FloatVector(numpy.abs(X))
Xri = robjects.FloatVector(numpy.concatenate((Xr,Xi)))
#my_lower = robjects.r('list(a=.001, rT2=.001, nb=.0001)')
my_lower = robjects.r('list(a=.001, rT2=.001)')
#my_upper = robjects.r('list(a=1.5, rT2=.300, nb =100.)')
my_upper = robjects.r('list(a=1.5, rT2=.300)')
#my_list = robjects.r('list(a=.2, rT2=0.03, nb=.1)')
my_list = robjects.r('list(a=.2, rT2=0.03)')
my_cont = robjects.r('nls.control(maxiter=5000, warnOnly=TRUE, printEval=FALSE)')
#fmla = robjects.Formula('Xri ~ c(a*Re((wL) / (wL^2+(s+1/rT2)^2 )), a*Im((wL)/(wL^2 + (s+1/rT2)^2 )))') # envelope
##fmla = robjects.Formula('Xri ~ c(a*Re((wL) / (wL^2+(s+1/rT2)^2 )), a*Im((wL)/(wL^2 + (s+1/rT2)^2 )))') # envelope
#fmla = robjects.Formula('XX ~ a*(wL) / (wL^2 + (s+1/rT2)^2 )') # complex
#fmla = robjects.Formula('Xa ~ abs(a*(wL) / (wL^2 + (s+1/rT2)^2 )) + nb') # complex
fmla = robjects.Formula('Xa ~ abs(a*(wL) / (wL^2 + (s+1/rT2)^2 ))') # complex
env = fmla.getenvironment()
env['s'] = s
env['Xr'] = Xr
env['Xa'] = Xa
env['Xi'] = Xi
env['Xri'] = Xri
env['XX'] = XX
#fit = robjects.r.tryCatch(robjects.r.nls(fmla,start=my_list, control=my_cont)) #, lower=my_lower, algorithm='port')) #, \
fit = robjects.r.tryCatch(robjects.r.nls(fmla, start=my_list, control=my_cont, lower=my_lower, upper=my_upper, algorithm='port')) #, \
report = r.summary(fit)
#print report
#print r.warnings()
a = r['$'](report,'par')[0]
rT2 = r['$'](report,'par')[1]
nb = r['$'](report,'par')[2]
return a, rT2, nb
#################################################
# Regress for T2 using rpy2 interface
def regressSpecComplex(w, wL, X): #,sigma2=1,intercept=True):
# compute s
s = -1j*w
# TODO, if regression fails, it might be because there is no exponential
# term, maybe do a second regression then on a linear model.
a = 1 # Linear
rT2 = 0.1 # T2 regressed
r = robjects.r
phi2 = 0 # phase
wL2 = wL
# Variable shared between R and Python
robjects.globalenv['a'] = a
robjects.globalenv['rT2'] = rT2
robjects.globalenv['wL'] = wL
robjects.globalenv['wL2'] = 0
robjects.globalenv['nb'] = 0
robjects.globalenv['phi2'] = phi2
s = robjects.ComplexVector(numpy.array(s))
XX = robjects.ComplexVector(X)
Xr = robjects.FloatVector(numpy.real(X))
Xi = robjects.FloatVector(numpy.imag(X))
Xa = robjects.FloatVector(numpy.abs(X))
Xri = robjects.FloatVector(numpy.concatenate((X.real,X.imag)))
robjects.r('''
source('kernel.r')
''')
#Kw = robjects.globalenv['Kwri']
#print (numpy.shape(X))
#my_lower = robjects.r('list(a=.001, rT2=.001, nb=.0001)')
#my_lower = robjects.r('list(a=.001, rT2=.001)') # Working
my_lower = robjects.r('list(a=.001, rT2=.001, phi2=-3.14, wL2=wL-5)')
#my_upper = robjects.r('list(a=1.5, rT2=.300, nb =100.)')
my_upper = robjects.r('list(a=3.5, rT2=.300, phi2=3.14, wL2=wL+5)')
#my_list = robjects.r('list(a=.2, rT2=0.03, nb=.1)')
my_list = robjects.r('list(a=.2, rT2=0.03, phi2=0, wL2=wL)')
my_cont = robjects.r('nls.control(maxiter=5000, warnOnly=TRUE, printEval=FALSE)')
#fmla = robjects.Formula('Xri ~ c(a*Re((wL) / (wL^2+(s+1/rT2)^2 )), a*Im((wL)/(wL^2 + (s+1/rT2)^2 )))') # envelope
#fmla = robjects.Formula('Xi ~ Im(a*(sin(phi2)*s + ((1/rT2)*sin(phi2)) + wL*cos(phi2)) / (wL^2+(s+1/rT2)^2 ))') # envelope
#fmla = robjects.Formula('Xri ~ c(Re(a*(sin(phi2)*s + ((1/rT2)*sin(phi2)) + wL*cos(phi2)) / (wL^2+(s+1/rT2)^2 )), Im(a*(sin(phi2)*s + ((1/rT2)*sin(phi2)) + wL*cos(phi2)) / (wL^2+(s+1/rT2)^2 )))') # envelope
#fmlar = robjects.Formula('Xr ~ (Kwr(a, phi2, s, rT2, wL)) ') # envelope
#fmlai = robjects.Formula('Xi ~ (Kwi(a, phi2, s, rT2, wL)) ') # envelope
fmla = robjects.Formula('Xri ~ c(Kwr(a, phi2, s, rT2, wL2), Kwi(a, phi2, s, rT2, wL2) ) ') # envelope
#fmla = robjects.Formula('Xri ~ (Kwri(a, phi2, s, rT2, wL)) ') # envelope
#fmla = robjects.Formula('Xa ~ (abs(a*(sin(phi2)*s + ((1/rT2)*sin(phi2)) + wL*cos(phi2)) / (wL^2+(s+1/rT2)^2 )))') # envelope
#fmla = robjects.Formula('XX ~ a*(wL) / (wL^2 + (s+1/rT2)^2 )') # complex
#fmla = robjects.Formula('Xa ~ abs(a*(wL) / (wL^2 + (s+1/rT2)^2 )) + nb') # complex
#fmla = robjects.Formula('Xri ~ c(a*Re((wL) / (wL^2+(s+1/rT2)^2 )), a*Im((wL)/(wL^2 + (s+1/rT2)^2 )))') # envelope
# self.Gw[iw, iT2] = ((np.sin(phi2) * (alpha + 1j*self.w[iw]) + self.wL*np.cos(phi2)) / \
# (self.wL**2 + (alpha+1.j*self.w[iw])**2 ))
# self.Gw[iw, iT2] = ds * self.sc*((np.sin(phi2)*( alpha + 1j*self.w[iw]) + self.wL*np.cos(phi2)) / \
# (self.wL**2 + (alpha+1.j*self.w[iw])**2 ))
# Works Amplitude Only!
#fmla = robjects.Formula('Xa ~ abs(a*(wL) / (wL^2 + (s+1/rT2)^2 ))') # complex
env = fmla.getenvironment()
env['s'] = s
env['Xr'] = Xr
env['Xa'] = Xa
env['Xi'] = Xi
env['Xri'] = Xri
env['XX'] = XX
fit = robjects.r.tryCatch(robjects.r.nls(fmla,start=my_list, control=my_cont)) #, lower=my_lower, algorithm='port')) #, \
#fitr = robjects.r.tryCatch(robjects.r.nls(fmlar, start=my_list, control=my_cont, lower=my_lower, upper=my_upper, algorithm='port')) #, \
#env = fmlai.getenvironment()
#fiti = robjects.r.tryCatch(robjects.r.nls(fmlai, start=my_list, control=my_cont, lower=my_lower, upper=my_upper, algorithm='port')) #, \
#reportr = r.summary(fitr)
#reporti = r.summary(fiti)
report = r.summary(fit)
#print( report )
#exit()
#print( reportr )
#print( reporti )
#exit()
#print r.warnings()
#a = (r['$'](reportr,'par')[0] + r['$'](reporti,'par')[0]) / 2.
#rT2 = (r['$'](reportr,'par')[1] + r['$'](reporti,'par')[1]) / 2.
#nb = (r['$'](reportr,'par')[2] + r['$'](reporti,'par')[2]) / 2.
a = r['$'](report,'par')[0]
rT2 = r['$'](report,'par')[1]
nb = r['$'](report,'par')[2] #phi2
print ("Python wL2", r['$'](report,'par')[3] )
print ("Python zeta", r['$'](report,'par')[2] )
return a, rT2, nb
###################################################################
###################################################################
###################################################################
if __name__ == "__main__":
dt = .0001
T2 = .1
omega = 2000.*2*numpy.pi
phi = .0
T = 8.*T2
t = numpy.arange(0, T, dt)
# Synthetic data, simple single decaying sinusoid
# with a single decay parameter and gaussian noise added
data = numpy.exp(-t/T2) * numpy.sin(omega * t + phi) + numpy.random.normal(0,.05,len(t)) \
+ numpy.random.randint(-1,2,len(t))*numpy.random.exponential(.2,len(t))
cdata = numpy.exp(-t/T2) * numpy.sin(omega * t + phi) #+ numpy.random.normal(0,.25,len(t))
#data = numpy.random.normal(0,.25,len(t))
sigma2 = numpy.std(data[::-len(data)/4])
#sigma2 = numpy.var(data[::-len(data)/4])
print("sigma2", sigma2)
[peaks,times,indices] = peakPicker(data, omega, dt)
[b1,b2,rT2] = regressCurve(peaks,times)
print("rT2 nonweighted", rT2)
[b1,b2,rT2] = regressCurve(peaks,times,sigma2)
print("rT2 weighted", rT2)
envelope = numpy.exp(-t/T2)
renvelope = numpy.exp(-t/rT2)
#outf = file('regress.txt','w')
#for i in range(len(times)):
# outf.write(str(times[i]) + " " + str(peaks[i]) + "\n")
#outf.close()
plt.plot(t,data, 'b')
plt.plot(t,cdata, 'g', linewidth=1)
plt.plot(t,envelope, color='violet', linewidth=4)
plt.plot(t,renvelope, 'r', linewidth=4)
plt.plot(times, numpy.array(peaks), 'bo', markersize=8, alpha=.25)
plt.legend(['noisy data','clean data','real envelope','regressed env','picks'])
plt.savefig("regression.pdf")
# FFT check
fourier = fft(data)
plt.figure()
freq = fftfreq(len(data), d=dt)
plt.plot(freq, (fourier.real))
plt.show()
# TODO do a bunch in batch mode to see if T2 estimate is better with or without
# weighting and which model is best.
# TODO try with real data
# TODO test filters (median, FFT, notch)
# It looks like weighting is good for relatively low sigma, but for noisy data
# it hurts us. Check | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/decay-old.py | decay-old.py |
from PyQt5.QtCore import *
import numpy as np
import scipy.signal as signal
import pylab
import sys
import scipy
from scipy import stats
import copy
import struct, glob
from scipy.io.matlab import mio
import pandas as pd
from numpy import pi
from math import floor
import matplotlib as mpl
from matplotlib.ticker import FuncFormatter
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import matplotlib.ticker
from matplotlib.ticker import MaxNLocator
import seaborn as sns
from akvo.tressel.SlidesPlot import deSpine
import multiprocessing
import itertools
import padasip as pa
import akvo.tressel.adapt as adapt
#import akvo.tressel.cadapt as adapt # cython for more faster
import akvo.tressel.decay as decay
import akvo.tressel.pca as pca
import akvo.tressel.rotate as rotate
import akvo.tressel.cmaps as cmaps
import akvo.tressel.harmonic as harmonic
import cmocean # colormaps for geophysical data
# not needed with latest matplotlib
#plt.register_cmap(name='viridis', cmap=cmaps.viridis)
#plt.register_cmap(name='inferno', cmap=cmaps.inferno)
#plt.register_cmap(name='inferno_r', cmap=cmaps.inferno_r)
#plt.register_cmap(name='magma', cmap=cmaps.magma)
#plt.register_cmap(name='magma_r', cmap=cmaps.magma_r)
def xxloadGMRBinaryFID( rawfname, info ):
""" Reads a single binary GMR file and fills into DATADICT
"""
#################################################################################
# figure out key data indices
# Pulse
nps = (int)((info["prePulseDelay"])*info["samp"])
npul = (int)(self.pulseLength[0]*self.samp) #+ 100
# Data
nds = nps+npul+(int)((self.deadTime)*self.samp); # indice pulse 1 data starts
nd1 = (int)(1.*self.samp) # samples in first pulse
invGain = 1./self.RxGain
invCGain = self.CurrentGain
pulse = "Pulse 1"
chan = self.DATADICT[pulse]["chan"]
rchan = self.DATADICT[pulse]["rchan"]
rawFile = open( rawfname, 'rb')
T = N_samp * self.dt
TIMES = np.arange(0, T, self.dt) - .0002 # small offset in GMR DAQ?
for ipm in range(self.nPulseMoments):
buf1 = rawFile.read(4)
buf2 = rawFile.read(4)
N_chan = struct.unpack('>i', buf1 )[0]
N_samp = struct.unpack('>i', buf2 )[0]
DATA = np.zeros([N_samp, N_chan+1])
for ichan in range(N_chan):
DATADUMP = rawFile.read(4*N_samp)
for irec in range(N_samp):
DATA[irec,ichan] = struct.unpack('>f', DATADUMP[irec*4:irec*4+4])[0]
return DATA, TIMES
class SNMRDataProcessor(QObject):
""" Revised class for preprocessing sNMR Data.
Derived types can read GMR files
"""
def __init__(self):
QObject.__init__(self)
self.numberOfMoments = 0
self.numberOfPulsesPerMoment = 0
self.pulseType = "NONE"
self.transFreq = 0
self.pulseLength = np.zeros(1)
self.nPulseMoments = 0
self.dt = 0
def mfreqz(self, b,a=1):
""" Plots the frequency response of a filter specified with a and b weights
"""
import scipy.signal as signal
pylab.figure(124)
w,h = signal.freqz(b,a)
w /= max(w)
w *= .5/self.dt
h_dB = 20 * pylab.log10 (abs(h))
pylab.subplot(211)
#pylab.plot(w/max(w),h_dB)
pylab.plot(w,h_dB)
pylab.ylim(-150, 5)
pylab.ylabel('Magnitude (dB)')
#pylab.xlabel(r'Normalized Frequency (x$\pi$rad/sample)')
pylab.xlabel(r'Hz')
pylab.title(r'Frequency response')
pylab.subplot(212)
h_Phase = pylab.unwrap(pylab.arctan2(pylab.imag(h), pylab.real(h)))
#pylab.plot(w/max(w),h_Phase)
pylab.plot(w,h_Phase)
pylab.ylabel('Phase (radians)')
pylab.xlabel(r'Hz')
#pylab.xlabel(r'Normalized Frequency (x$\pi$rad/sample)')
pylab.title(r'Phase response')
pylab.subplots_adjust(hspace=0.5)
def mfreqz2(self, b, a, canvas):
"for analysing filt-filt"
import scipy.signal as signal
canvas.reAx2(False,False)
canvas.ax1.tick_params(axis='both', which='major', labelsize=8)
canvas.ax2.tick_params(axis='both', which='major', labelsize=8)
#canvas.ax2.tick_params(axis='both', which='minor', labelsize=6)
#pylab.figure(124)
w,h = signal.freqz(b,a)
w /= max(w)
w *= .5/self.dt
h_dB = 20 * pylab.log10(abs(h*h) + 1e-16)
#ab.subplot(211)
#pylab.plot(w/max(w),h_dB)
canvas.ax1.plot(w,h_dB)
canvas.ax1.set_ylim(-150, 5)
canvas.ax1.set_ylabel('Magnitude [db]', fontsize=8)
#pylab.xlabel(r'Normalized Frequency (x$\pi$rad/sample)')
canvas.ax1.set_xlabel(r'[Hz]', fontsize=8)
canvas.ax1.set_title(r'Frequency response', fontsize=8)
canvas.ax1.grid(True)
tt = np.arange(0, .02, self.dt)
impulse = signal.dimpulse((self.filt_z, self.filt_p, self.filt_k, self.dt), t=tt)
#impulse = signal.dstep((self.filt_z, self.filt_p, self.filt_k, self.dt), t=tt)
#print impulse
#for ii in range(len(impulse[1])):
impulse_dB = 20.*np.log10(np.abs(np.array(impulse[1][0])))
#canvas.ax2.plot(np.array(impulse[0]), impulse_dB)
canvas.ax2.plot(np.array(impulse[0]), impulse[1][0])
#h_Phase = pylab.unwrap(pylab.arctan2(pylab.imag(h), pylab.real(h)))
#canvas.ax2.plot(w,h_Phase)
canvas.ax2.set_ylabel('response [%]', fontsize=8)
canvas.ax2.set_xlabel(r'time [s]', fontsize=8)
canvas.ax2.set_title(r'impulse response', fontsize=8)
#canvas.ax2.grid(True)
canvas.draw()
# search for last
return impulse #[np.where(impulse[1][0] > .01)[-1]]
class GMRDataProcessor(SNMRDataProcessor):
# slots
progressTrigger = pyqtSignal("int")
doneTrigger = pyqtSignal()
enableDSPTrigger = pyqtSignal()
updateProcTrigger = pyqtSignal()
def __init__(self):
SNMRDataProcessor.__init__(self)
self.maxBusV = 0.
self.samp = 50000. # sampling frequency
self.dt = 2e-5 # sampling rate
self.deadTime = .0055 # instrument dead time before measurement
self.prePulseDelay = 0.05 # delay before pulse
self.windead = 0. # FD window filter dead time
self.pulseType = -1
self.transFreq = -1
self.maxBusV = -1
self.pulseLength = -1
self.interpulseDelay = -1 # for T2, Spin Echo
self.repetitionDelay = -1 # delay between first pulse
self.nPulseMoments = -1 # Number of pulse moments per stack
self.TuneCapacitance = -1 # tuning capac in uF
self.nTransVersion = -1 # Transmitter version
self.nDAQVersion = -1 # DAQ software version
self.nInterleaves = -1 # num interleaves
# self.nReceiveChannels = 4 # Num receive channels
self.RotatedAmplitude = False
# self.DATA = np.zeros(1) # Numpy array to hold all data, dimensions resized based on experiment
# self.PULSES = np.zeros(1) # Numpy array to hold all data, dimensions resized based on experiment
def Print(self):
print ("pulse type", self.pulseType)
print ("maxBusV", self.maxBusV)
print ("inner pulse delay", self.interpulseDelay)
print ("tuning capacitance", self.TuneCapacitance)
print ("sampling rate", self.samp)
print ("dt", self.dt)
print ("dead time", self.deadTime)
print ("pre pulse delay", self.prePulseDelay)
print ("number of pulse moments", self.nPulseMoments)
print ("pulse Length", self.pulseLength)
print ("trans freq", self.transFreq)
def readHeaderFile(self, FileName):
HEADER = np.loadtxt(FileName)
pulseTypeDict = {
1 : lambda: "FID",
2 : lambda: "T1",
3 : lambda: "SPINECHO",
4 : lambda: "4PhaseT1"
}
pulseLengthDict = {
1 : lambda x: np.ones(1) * x,
2 : lambda x: np.ones(2) * x,
3 : lambda x: np.array([x, 2.*x]),
4 : lambda x: np.ones(2) * x
}
self.pulseType = pulseTypeDict.get((int)(HEADER[0]))()
self.transFreq = HEADER[1]
self.maxBusV = HEADER[2]
self.pulseLength = pulseLengthDict.get((int)(HEADER[0]))(1e-3*HEADER[3])
self.interpulseDelay = 1e-3*HEADER[4] # for T2, Spin Echo
self.repetitionDelay = HEADER[5] # delay between first pulse
self.nPulseMoments = (int)(HEADER[6]) # Number of pulse moments per stack
self.TuneCapacitance = HEADER[7] # tuning capacitance in uF
self.nTransVersion = HEADER[8] # Transmitter version
self.nDAQVersion = HEADER[9] # DAQ software version
self.nInterleaves = HEADER[10] # num interleaves
self.Instrument = "GMR"
self.gain()
# default
self.samp = 50000. # sampling frequency
self.dt = 2e-5 # sampling rate
# newer header files contain 64 entries
if self.nDAQVersion >= 2:
#self.deadtime = HEADER[11]
#self.unknown = HEADER[12]
#self.PreAmpGain = HEADER[13]
self.samp = HEADER[14] # sampling frequency
self.dt = 1./self.samp # sampling rate
self.deadTime = .0055 # instrument dead time before measurement
self.prePulseDelay = 0.05 # delay before pulse
#exit()
def gain(self):
#######################################################
# Circuit gain
# From MRSMatlab
w = 2*np.pi*self.transFreq
# 1e6 due to uF of reported capacitance
L_coil = 1e6/(self.TuneCapacitance*(w**2))
R_coil = 1.
Z1_in = .5 + 1j*.5*w
Z2_in = 1./(1j*w*.000001616)
Z_eq_inv = (1./Z1_in) + (1./Z2_in)
Zeq = 1./Z_eq_inv
Zsource = R_coil + 1j*w*L_coil
voltage_in = Zeq / (Zsource + Zeq)
self.circuitGain = np.abs(voltage_in)
self.circuitPhase_deg = (180/np.pi)+np.angle(voltage_in)
circuitImpedance_ohms = np.abs(Zsource + Zeq)
######################################################
# PreAmp gain
if self.nTransVersion == 4:
self.PreAmpGain = 1000.
elif self.nTransVersion == 1 or self.nTransVersion == 2 or self.nTransVersion == 3 or self.nTransVersion == 6:
self.PreAmpGain = 500.
else:
print ("unsupported transmitter version")
exit(1)
# Total Receiver Gain
self.RxGain = self.circuitGain * self.PreAmpGain
#####################################################
# Current gain
if floor(self.nDAQVersion) == 1:
self.CurrentGain = 150.
elif floor(self.nDAQVersion) == 2:
self.CurrentGain = 180.
def updateProgress(self):
pass
def TDSmartStack(self, outlierTest, MADcutoff, canvas):
fs = 10 # fontsize
#print("Line 300 in mrsurvey")
Stack = {}
# align for stacking and modulate
for pulse in self.DATADICT["PULSES"]:
stack = np.zeros(( len(self.DATADICT[pulse]["chan"]), self.DATADICT["nPulseMoments"],\
len(self.DATADICT["stacks"]), len(self.DATADICT[pulse]["TIMES"]) ))
for ipm in range(self.DATADICT["nPulseMoments"]):
istack = 0
for sstack in self.DATADICT["stacks"]:
if self.pulseType == "FID" or pulse == "Pulse 2":
if self.Instrument == "MIDI 2":
mod = 1
elif self.Instrument == "GMR" and floor(self.nDAQVersion) < 2:
mod = 1
else:
mod = (-1.)**(ipm%2) * (-1.)**(sstack%2)
elif self.pulseType == "T1":
#mod = (-1.)**(sstack%2)
#mod = (-1)**(ipm%2) * (-1)**(sstack%2)
#mod = (-1)**(ipm%2) * (-1.**(((sstack-1)/2)%2))
#print("mod", mod, ipm, sstack, (-1.)**(ipm%2), -1.0**(((sstack-1)/2)%2 ))
#mod = (-1.)**((ipm+1)%2) * (-1.**(((sstack)/2)%2))
#mod = (-1.)**((ipm-1)%2) * (-1.)**((sstack-1)%2)
#mod = 1 # (-1.**(((sstack-1)/2)%2))
# These two give great noise estimate
#qcycler = np.array([1,-1,-1,1])
#scycler = np.array([1,-1,1,-1])
qcycler = np.array([ 1, 1])
scycler = np.array([ 1, 1])
mod = qcycler.take([ipm], mode='wrap')*scycler.take([sstack], mode='wrap')
#mod = (-1.)**(ipm%2) * (-1.)**(sstack%2)
elif self.pulseType == "4PhaseT1":
mod = (-1.)**(ipm%2) * (-1.**(((sstack-1)/2)%2))
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
stack[ichan,ipm,istack,:] += mod*self.DATADICT[pulse][chan][ipm][sstack]
ichan += 1
istack += 1
Stack[pulse] = stack
#########################################
# simple stack and plot of simple stack #
#########################################
canvas.reAxH2(np.shape(stack)[0], False, False)
axes = canvas.fig.axes
SimpleStack = {}
VarStack = {}
for pulse in self.DATADICT["PULSES"]:
SimpleStack[pulse] = {}
VarStack[pulse] = {}
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
SimpleStack[pulse][chan] = 1e9*np.average( Stack[pulse][ichan], 1 )
VarStack[pulse][chan] = 1e9*np.std( Stack[pulse][ichan], 1 )
ax1 = axes[ 2*ichan ]
#ax1.get_yaxis().get_major_formatter().set_useOffset(False)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax1.yaxis.set_major_formatter(y_formatter)
ax1.plot( 1e3*self.DATADICT[pulse]["TIMES"], np.average( SimpleStack[pulse][chan], 0 )) #, color='darkblue' )
ax1.set_title("Ch." + str(chan) + ": avg FID", fontsize=fs)
ax1.set_xlabel(r"time (ms)", fontsize=fs)
if ichan == 0:
ax1.set_ylabel(r"signal (nV)", fontsize=fs)
else:
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax1.get_yaxis().get_offset_text(), visible=False)
# if ichan == 1:
# canvas.ax2.plot( 1e3*self.DATADICT[pulse]["TIMES"], np.average( SimpleStack[pulse][chan], 0 ), color='darkblue' )
# canvas.ax2.set_title("Ch." + str(chan) + ": total average FID", fontsize=8)
# canvas.ax2.set_xlabel(r"time [ms]", fontsize=8)
# if ichan == 2:
# canvas.ax3.plot( 1e3*self.DATADICT[pulse]["TIMES"], np.average( SimpleStack[pulse][chan], 0 ), color='darkblue' )
# canvas.ax3.set_title("Ch." + str(chan) + ": total average FID", fontsize=8)
# canvas.ax3.set_xlabel(r"time [ms]", fontsize=8)
# if ichan == 3:
# canvas.ax4.plot( 1e3*self.DATADICT[pulse]["TIMES"], np.average( SimpleStack[pulse][chan], 0 ), color='darkblue' )
# canvas.ax4.set_title("Ch." + str(chan) + ": total average FID", fontsize=8)
# canvas.ax4.set_xlabel(r"time [ms]", fontsize=8)
ichan += 1
#########################
# Oulier rejectig stack #
#########################
if outlierTest == "MAD":
MADStack = {}
VarStack = {}
#1.4826 is assumption of gaussian noise
madstack = np.zeros(( len(self.DATADICT[pulse]["chan"]),\
self.DATADICT["nPulseMoments"], len(self.DATADICT[pulse]["TIMES"]) ))
varstack = np.zeros(( len(self.DATADICT[pulse]["chan"]),\
self.DATADICT["nPulseMoments"], len(self.DATADICT[pulse]["TIMES"]) ))
for pulse in self.DATADICT["PULSES"]:
MADStack[pulse] = {}
VarStack[pulse] = {}
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
ax1 = axes[ 2*ichan ]
for ipm in range(self.DATADICT["nPulseMoments"]):
# # brutal loop over time, can this be vectorized?
# for it in range(len(self.DATADICT[pulse]["TIMES"])):
# x = 1e9 *Stack[pulse][ichan,ipm,:,it]
# MAD = 1.4826 * np.median( np.abs(x-np.median(x)) )
# good = 0
# for istack in self.DATADICT["stacks"]:
# if (np.abs(x[istack-1]-np.median(x))) / MAD < 2:
# good += 1
# madstack[ ichan, ipm, it ] += x[istack-1]
# else:
# pass
# madstack[ichan, ipm, it] /= good
# percent = int(1e2* (float)(ipm) / (float)(self.DATADICT["nPulseMoments"]) )
# self.progressTrigger.emit(percent)
# Vectorized version of above...much, much faster
x = 1e9*copy.deepcopy(Stack[pulse][ichan][ipm,:,:]) # stack and time indices
tile_med = np.tile( np.median(x, axis=0), (np.shape(x)[0],1))
MAD = MADcutoff * np.median(np.abs(x - tile_med), axis=0)
tile_MAD = np.tile( MAD, (np.shape(x)[0],1))
good = np.abs(x-tile_med)/tile_MAD < 2. # 1.4826 # 2
madstack[ichan][ipm] = copy.deepcopy( np.ma.masked_array(x, good != True).mean(axis=0) )
varstack[ichan][ipm] = copy.deepcopy( np.ma.masked_array(x, good != True).std(axis=0) )
# reporting
percent = int(1e2* (float)((ipm)+ichan*self.DATADICT["nPulseMoments"]) /
(float)(self.DATADICT["nPulseMoments"] * len(self.DATADICT[pulse]["chan"])))
self.progressTrigger.emit(percent)
ax1.plot( 1e3*self.DATADICT[pulse]["TIMES"], np.average( madstack[ichan], 0 ))# , color='darkred')
MADStack[pulse][chan] = madstack[ichan]
VarStack[pulse][chan] = varstack[ichan]
ichan += 1
self.DATADICT["stack"] = MADStack
else:
self.DATADICT["stack"] = SimpleStack
#########################################
# Plot Fourier Transform representation #
#########################################
# canvas.fig.subplots_adjust(right=0.8)
# cbar_ax = canvas.fig.add_axes([0.85, 0.1, 0.015, 0.355])
# cbar_ax.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
im2 = []
im1 = []
for pulse in self.DATADICT["PULSES"]:
ichan = 0
axes = canvas.fig.axes
vvmin = 1e10
vvmax = 0
for chan in self.DATADICT[pulse]["chan"]:
ax1 = axes[2*ichan ]
ax2 = axes[2*ichan+1] # TODO fix hard coded number
if outlierTest == "MAD":
X = np.fft.rfft( MADStack[pulse][chan][0,:] )
nu = np.fft.fftfreq(len( MADStack[pulse][chan][0,:]), d=self.dt)
else:
X = np.fft.rfft( SimpleStack[pulse][chan][0,:] )
nu = np.fft.fftfreq(len( SimpleStack[pulse][chan][0,:]), d=self.dt)
nu = nu[0:len(X)]
nu[-1] = np.abs(nu[-1])
df = nu[1] - nu[0]
of = 0
istart = int((self.transFreq-50.)/df)
iend = int((self.transFreq+50.)/df)
of = nu[istart]
def freqlabel(xxx, pos):
return '%1.0f' %(of + xxx*df)
formatter = FuncFormatter(freqlabel)
SFFT = np.zeros( (self.DATADICT["nPulseMoments"], len(X)), dtype=np.complex64 )
SFFT[0,:] = X
for ipm in range(1, self.DATADICT["nPulseMoments"]):
if outlierTest == "MAD":
SFFT[ipm,:] = np.fft.rfft( MADStack[pulse][chan][ipm,:] )
else:
SFFT[ipm,:] = np.fft.rfft( SimpleStack[pulse][chan][ipm,:] )
# convert to dB and add colorbars
#db = 20.*np.log10(np.abs(SFFT[:,istart:iend]))
db = (np.abs(SFFT[:,istart:iend]))
#db = (np.real(SFFT[:,istart:iend]))
#db = (np.imag(SFFT[:,istart:iend]))
#dbr = (np.real(SFFT[:,istart:iend]))
#db = (np.imag(SFFT[:,istart:iend]))
vvmin = min(vvmin, np.min(db) + 1e-16 )
vvmax = max(vvmax, np.max(db) + 1e-16 )
im2.append(ax2.matshow( db, aspect='auto', cmap=cmocean.cm.ice, vmin=vvmin, vmax=vvmax))
#im1.append(ax1.matshow( dbr, aspect='auto')) #, vmin=vvmin, vmax=vvmax))
#im2.append(ax2.matshow( db, aspect='auto', vmin=vvmin, vmax=vvmax))
#im2 = ax2.matshow( db, aspect='auto', cmap=cmocean.cm.ice, vmin=vvmin, vmax=vvmax)
if ichan == 0:
#ax2.set_ylabel(r"$q$ (A $\cdot$ s)", fontsize=8)
ax2.set_ylabel(r"pulse index", fontsize=10)
#ax1.set_ylabel(r"FID (nV)", fontsize=8)
else:
#ax2.yaxis.set_ticklabels([])
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.xaxis.set_major_formatter(formatter)
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_major_locator(MaxNLocator(3))
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax2.yaxis.set_major_formatter(y_formatter)
#if chan == self.DATADICT[pulse]["chan"][-1]:
#cb2 = canvas.fig.colorbar(im2, cax=cbar_ax, format='%1.0e')
#cb2 = canvas.fig.colorbar(im2[0], ax=ax2, format='%1.0e', orientation='horizontal')
#cb2 = canvas.fig.colorbar(im2, ax=ax2, format='%1.0e', orientation='horizontal')
#cb2.ax.tick_params(axis='both', which='major', labelsize=8)
#cb2.set_label("signal (dB)", fontsize=8)
ichan += 1
canvas.fig.subplots_adjust(hspace=.35, wspace=.15, left=.15, right=.8 )#left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
deSpine(ax1)
#cb1 = canvas.fig.colorbar(im, ax=axes[0::2], format='%1.0e', orientation='horizontal', shrink=.35, aspect=30)
#cb1.ax.tick_params(axis='both', which='major', labelsize=8)
#cb1.set_label("$\mathcal{V}_N$ (nV)", fontsize=8)
cb2 = canvas.fig.colorbar(im2[-1], ax=axes[1::2], format='%1.0e', orientation='horizontal', shrink=.35, aspect=30)
cb2.ax.tick_params(axis='both', which='major', labelsize=fs)
cb2.set_label(r"$\left| \mathcal{V}_N \right|$ (nV)", fontsize=fs)
#canvas.fig.tight_layout()
canvas.draw()
self.doneTrigger.emit()
def harmonicModel(self, nF, \
f0, f0K1, f0KN, f0Ks, f0ns, \
f1, f1K1, f1KN, f1Ks, \
Nsearch, Bounds, procRefs, \
plot, canvas):
""" nF = number of base frequencies, must be 1 or 2
f0 = first base frequency
f0K1 = first harmonic to model for first base frequency
f0KN = last harmonic to model for the first base frequency
f0Ks = subharmonic spacing, set to 1 for no subharmonics.
f0Ns = number of segments for f0
f1 = second base frequency
f1K1 = first harmonic to model for second base frequency
f1KN = last harmonic to model for the second base frequency
f1Ks = subharmonic spacing for the second base frequency, set to 1 for no subharmonics.
Nsearch = the number of harmonics to use when determining base frequency
bounds = 1/2 the width of the space where baseline frequency will be searched
procRefs = should the reference loops be processed as well
plot = should Akvo plot the results
canvas = mpl plotting axis
"""
TDPlot = True
fs = 10
if plot:
canvas.reAx2(shy=False)
canvas.ax1.set_ylabel(r"signal (nV)", fontsize=fs)
canvas.ax2.set_ylabel(r"signal (nV)", fontsize=fs)
if TDPlot:
canvas.ax2.set_xlabel(r"time (s)", fontsize=fs)
else:
canvas.ax2.set_xlabel(r"frequency (Hz)", fontsize=fs)
canvas.ax1.set_yscale('log')
canvas.ax2.set_yscale('log')
# Data
iFID = 0
# stores previous f0 as starting point in non-linear search
f0p = {}
f1p = {}
for pulse in self.DATADICT["PULSES"]:
for rchan in self.DATADICT[pulse]["rchan"]:
f0p[rchan] = f0
f1p[rchan] = f1+1e-1
for chan in self.DATADICT[pulse]["chan"]:
f0p[chan] = f0
f1p[chan] = f1+1e-1
for pulse in self.DATADICT["PULSES"]:
Nseg = int( np.floor(len( self.DATADICT[pulse]["TIMES"] ) / f0ns) )
for istack in self.DATADICT["stacks"]:
for ipm in range(self.DATADICT["nPulseMoments"]):
if plot:
canvas.softClear()
mmaxr = 0
mmaxd = 0
if procRefs:
for ichan in self.DATADICT[pulse]["rchan"]:
if TDPlot:
canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack], alpha=.5)
mmaxr = max( mmaxr, np.max(1e9*self.DATADICT[pulse][ichan][ipm][istack]))
else:
ww = np.fft.fftfreq(len(self.DATADICT[pulse][ichan][ipm][istack]), d=self.dt)
X = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
canvas.ax1.plot(np.abs(ww[0:len(X)]), np.abs(X), alpha=.5)
canvas.ax1.set_prop_cycle(None)
canvas.ax1.set_ylim(-mmaxr, mmaxr)
for ichan in self.DATADICT[pulse]["chan"]:
if TDPlot:
canvas.ax2.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack], alpha=.5)
mmaxd = max( mmaxd, np.max(1e9*self.DATADICT[pulse][ichan][ipm][istack]))
else:
ww = np.fft.fftfreq(len(self.DATADICT[pulse][ichan][ipm][istack]), d=self.dt)
X = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
canvas.ax2.plot(np.abs(ww[0:len(X)]), np.abs(X), alpha=.5)
canvas.ax2.set_prop_cycle(None)
canvas.ax2.set_ylim(-mmaxd, mmaxd)
if procRefs:
for ichan in self.DATADICT[pulse]["rchan"]:
if nF == 1:
for iseg in range(f0ns):
if iseg < f0ns-1:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg], f0p[ichan] = \
harmonic.minHarmonic( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg], \
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg:(iseg+1)*Nseg], \
f0p[ichan], f0K1, f0KN, f0Ks, Bounds, Nsearch )
else:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::], f0p[ichan] = \
harmonic.minHarmonic( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::], \
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg::], \
f0p[ichan], f0K1, f0KN, f0Ks, Bounds, Nsearch )
elif nF == 2:
for iseg in range(f0ns):
if iseg < f0ns-1:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg], f0p[ichan], f1p[ichan] = \
harmonic.minHarmonic2( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg],\
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg:(iseg+1)*Nseg], \
f0p[ichan], f0K1, f0KN, f0Ks, \
f1p[ichan], f1K1, f1KN, f1Ks, Bounds, Nsearch )
else:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::], f0p[ichan], f1p[ichan] = \
harmonic.minHarmonic2( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::],\
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg::], \
f0p[ichan], f0K1, f0KN, f0Ks, \
f1p[ichan], f1K1, f1KN, f1Ks, Bounds, Nsearch )
# plot
if plot:
if TDPlot:
canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack], \
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " rchan=" + str(ichan))
else:
ww = np.fft.fftfreq(len(self.DATADICT[pulse][ichan][ipm][istack]), d=self.dt)
X = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
canvas.ax1.plot(np.abs(ww[0:len(X)]), np.abs(X),\
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " rchan=" + str(ichan))
for ichan in self.DATADICT[pulse]["chan"]:
if nF == 1:
for iseg in range(f0ns):
if iseg < f0ns-1:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg], f0p[ichan] = \
harmonic.minHarmonic( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg],
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg:(iseg+1)*Nseg], \
f0p[ichan], f0K1, f0KN, f0Ks, Bounds, Nsearch )
else:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::], f0p[ichan] = \
harmonic.minHarmonic( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::],
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg::], \
f0p[ichan], f0K1, f0KN, f0Ks, Bounds, Nsearch )
elif nF == 2:
for iseg in range(f0ns):
if iseg < f0ns-1:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg], f0p[ichan], f1p[ichan] = \
harmonic.minHarmonic2( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg:(iseg+1)*Nseg],\
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg:(iseg+1)*Nseg], \
f0p[ichan], f0K1, f0KN, f0Ks, \
f1p[ichan], f1K1, f1KN, f1Ks, Bounds, Nsearch )
else:
self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::], f0p[ichan], f1p[ichan] = \
harmonic.minHarmonic2( self.DATADICT[pulse][ichan][ipm][istack][iseg*Nseg::],\
self.samp, self.DATADICT[pulse]["TIMES"][iseg*Nseg::], \
f0p[ichan], f0K1, f0KN, f0Ks, \
f1p[ichan], f1K1, f1KN, f1Ks, Bounds, Nsearch )
# plot
if plot:
if TDPlot:
canvas.ax2.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack], \
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " chan=" + str(ichan))
else:
ww = np.fft.fftfreq(len(self.DATADICT[pulse][ichan][ipm][istack]), d=self.dt)
X = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
canvas.ax2.plot(np.abs(ww[0:len(X)]), np.abs(X), \
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " chan=" + str(ichan))
if plot:
if procRefs:
canvas.ax1.legend(prop={'size':fs}, loc='upper right')
plt.setp(canvas.ax1.get_xticklabels(), visible=False)
canvas.ax2.legend(prop={'size':fs}, loc='upper right')
deSpine(canvas.ax1)
deSpine(canvas.ax2)
canvas.fig.tight_layout()
canvas.draw()
percent = (int)(1e2*((ipm+istack*self.nPulseMoments)/(self.nPulseMoments*len(self.DATADICT["stacks"]))))
self.progressTrigger.emit(percent)
iFID += 1
self.doneTrigger.emit()
self.updateProcTrigger.emit()
self.doneTrigger.emit()
def FDSmartStack(self, outlierTest, MADcutoff, canvas):
print("FFT stuff")
self.dataCubeFFT()
Stack = {}
# align phase cycling for stacking and modulate
for pulse in self.DATADICT["PULSES"]:
stack = np.zeros(( len(self.DATADICT[pulse]["chan"]), \
self.DATADICT["nPulseMoments"],\
len(self.DATADICT["stacks"]),\
len(self.DATADICT[pulse][self.DATADICT[pulse]["chan"][0] ]["FFT"]["nu"])//2 + 1),\
dtype=np.complex )
for ipm in range(self.DATADICT["nPulseMoments"]):
istack = 0
for sstack in self.DATADICT["stacks"]:
if self.pulseType == "FID" or pulse == "Pulse 2":
mod = (-1)**(ipm%2) * (-1)**(sstack%2)
elif self.pulseType == "4PhaseT1":
mod = (-1)**(ipm%2) * (-1)**(((sstack-1)/2)%2)
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
#stack[ichan,ipm,istack,:] += mod*self.DATADICT[pulse][chan][ipm][sstack]
stack[ichan,ipm,istack,:] += mod*self.DATADICT[pulse][chan]["FFT"][sstack][ipm,:]
ichan += 1
istack += 1
Stack[pulse] = stack
#########################################
# simple stack and plot of simple stack #
########################################https://faculty.apps.utah.edu/#
canvas.reAxH2(np.shape(stack)[0], False, False)
axes = canvas.fig.axes
SimpleStack = {}
VarStack = {}
for pulse in self.DATADICT["PULSES"]:
SimpleStack[pulse] = {}
VarStack[pulse] = {}
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
SimpleStack[pulse][chan] = 1e9*np.average( Stack[pulse][ichan], 1 )
VarStack[pulse][chan] = 1e9*np.std( Stack[pulse][ichan], 1 )
ax1 = axes[ 2*ichan ]
#ax1.get_yaxis().get_major_formatter().set_useOffset(False)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax1.yaxis.set_major_formatter(y_formatter)
#ax1.plot( 1e3*self.DATADICT[pulse][chan]["FFT"]["nu"][0:len(SimpleStack[pulse][chan])], np.average(SimpleStack[pulse][chan], 0 )) #, color='darkblue' )
#ax1.pcolor( np.real(SimpleStack[pulse][chan]) ) #, color='darkblue' )
ax1.matshow( np.real(SimpleStack[pulse][chan]), aspect='auto') #, color='darkblue' )
ax1.set_title("Ch." + str(chan) + ": avg FID", fontsize=10)
ax1.set_xlabel(r"time (ms)", fontsize=10)
if ichan == 0:
ax1.set_ylabel(r"signal [nV]", fontsize=10)
else:
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax1.get_yaxis().get_offset_text(), visible=False)
ichan += 1
#########################
# Oulier rejectig stack #
#########################
if outlierTest == "MAD":
MADStack = {}
VarStack = {}
#1.4826 is assumption of gaussian noise
madstack = np.zeros(( len(self.DATADICT[pulse]["chan"]),\
self.DATADICT["nPulseMoments"],\
len(self.DATADICT[pulse][self.DATADICT[pulse]["chan"][0] ]["FFT"]["nu"])//2 + 1))
varstack = np.zeros(( len(self.DATADICT[pulse]["chan"]),\
self.DATADICT["nPulseMoments"],\
len(self.DATADICT[pulse][self.DATADICT[pulse]["chan"][0] ]["FFT"]["nu"])//2 + 1))
for pulse in self.DATADICT["PULSES"]:
MADStack[pulse] = {}
VarStack[pulse] = {}
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
ax1 = axes[ 2*ichan ]
for ipm in range(self.DATADICT["nPulseMoments"]):
# # brutal loop over time, can this be vectorized?
# for it in range(len(self.DATADICT[pulse]["TIMES"])):
# x = 1e9 *Stack[pulse][ichan,ipm,:,it]
# MAD = 1.4826 * np.median( np.abs(x-np.median(x)) )
# good = 0
# for istack in self.DATADICT["stacks"]:
# if (np.abs(x[istack-1]-np.median(x))) / MAD < 2:
# good += 1
# madstack[ ichan, ipm, it ] += x[istack-1]
# else:
# pass
# madstack[ichan, ipm, it] /= good
# percent = int(1e2* (float)(ipm) / (float)(self.DATADICT["nPulseMoments"]) )
# self.progressTrigger.emit(percent)
# Vectorized version of above...much, much faster
x = 1e9*copy.deepcopy(Stack[pulse][ichan][ipm,:,:]) # stack and time indices
tile_med = np.tile( np.median(x, axis=0), (np.shape(x)[0],1))
MAD = MADcutoff * np.median(np.abs(x - tile_med), axis=0)
tile_MAD = np.tile( MAD, (np.shape(x)[0],1))
good = np.abs(x-tile_med)/tile_MAD < 2. # 1.4826 # 2
madstack[ichan][ipm] = copy.deepcopy( np.ma.masked_array(x, good != True).mean(axis=0) )
varstack[ichan][ipm] = copy.deepcopy( np.ma.masked_array(x, good != True).std(axis=0) )
# reporting
percent = int(1e2* (float)((ipm)+ichan*self.DATADICT["nPulseMoments"]) /
(float)(self.DATADICT["nPulseMoments"] * len(self.DATADICT[pulse]["chan"])))
self.progressTrigger.emit(percent)
ax2 = axes[2*ichan+1] # TODO fix hard coded number
#ax1.plot( 1e3*self.DATADICT[pulse]["TIMES"], np.average( madstack[ichan], 0 ))# , color='darkred')
MADStack[pulse][chan] = madstack[ichan]
VarStack[pulse][chan] = varstack[ichan]
ax2.matshow( np.real(MADStack[pulse][chan]), aspect='auto') #, color='darkblue' )
ichan += 1
self.DATADICT["stack"] = MADStack
else:
self.DATADICT["stack"] = SimpleStack
# #########################################
# # Plot Fourier Transform representation #
# #########################################
#
# # canvas.fig.subplots_adjust(right=0.8)
# # cbar_ax = canvas.fig.add_axes([0.85, 0.1, 0.015, 0.355])
# # cbar_ax.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
# im2 = []
# im1 = []
# for pulse in self.DATADICT["PULSES"]:
# ichan = 0
# axes = canvas.fig.axes
# vvmin = 1e10
# vvmax = 0
# for chan in self.DATADICT[pulse]["chan"]:
# ax1 = axes[2*ichan ]
# ax2 = axes[2*ichan+1] # TODO fix hard coded number
# if outlierTest == "MAD":
# X = np.fft.rfft( MADStack[pulse][chan][0,:] )
# nu = np.fft.fftfreq(len( MADStack[pulse][chan][0,:]), d=self.dt)
# else:
# X = np.fft.rfft( SimpleStack[pulse][chan][0,:] )
# nu = np.fft.fftfreq(len( SimpleStack[pulse][chan][0,:]), d=self.dt)
#
# nu = nu[0:len(X)]
# nu[-1] = np.abs(nu[-1])
# df = nu[1] - nu[0]
# of = 0
#
# istart = int((self.transFreq-50.)/df)
# iend = int((self.transFreq+50.)/df)
# of = nu[istart]
#
# def freqlabel(xxx, pos):
# return '%1.0f' %(of + xxx*df)
# formatter = FuncFormatter(freqlabel)
#
# SFFT = np.zeros( (self.DATADICT["nPulseMoments"], len(X)), dtype=np.complex64 )
# SFFT[0,:] = X
# for ipm in range(1, self.DATADICT["nPulseMoments"]):
# if outlierTest == "MAD":
# SFFT[ipm,:] = np.fft.rfft( MADStack[pulse][chan][ipm,:] )
# else:
# SFFT[ipm,:] = np.fft.rfft( SimpleStack[pulse][chan][ipm,:] )
#
# # convert to dB and add colorbars
# #db = 20.*np.log10(np.abs(SFFT[:,istart:iend]))
# db = (np.abs(SFFT[:,istart:iend]))
# #db = (np.real(SFFT[:,istart:iend]))
# #dbr = (np.real(SFFT[:,istart:iend]))
# #db = (np.imag(SFFT[:,istart:iend]))
#
# vvmin = min(vvmin, np.min (db))
# vvmax = max(vvmax, np.max (db))
# im2.append(ax2.matshow( db, aspect='auto', cmap=cmocean.cm.ice, vmin=vvmin, vmax=vvmax))
# #im1.append(ax1.matshow( dbr, aspect='auto')) #, vmin=vvmin, vmax=vvmax))
# #im2.append(ax2.matshow( db, aspect='auto', vmin=vvmin, vmax=vvmax))
# #im2 = ax2.matshow( db, aspect='auto', cmap=cmocean.cm.ice, vmin=vvmin, vmax=vvmax)
# if ichan == 0:
# ax2.set_ylabel(r"$q$ (A $\cdot$ s)", fontsize=8)
# else:
# #ax2.yaxis.set_ticklabels([])
# plt.setp(ax2.get_yticklabels(), visible=False)
#
# ax2.xaxis.set_major_formatter(formatter)
# ax2.xaxis.set_ticks_position('bottom')
# ax2.xaxis.set_major_locator(MaxNLocator(3))
#
# y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
# ax2.yaxis.set_major_formatter(y_formatter)
#
#
# #if chan == self.DATADICT[pulse]["chan"][-1]:
# #cb2 = canvas.fig.colorbar(im2, cax=cbar_ax, format='%1.0e')
#
# #cb2 = canvas.fig.colorbar(im2[0], ax=ax2, format='%1.0e', orientation='horizontal')
# #cb2 = canvas.fig.colorbar(im2, ax=ax2, format='%1.0e', orientation='horizontal')
# #cb2.ax.tick_params(axis='both', which='major', labelsize=8)
# #cb2.set_label("signal (dB)", fontsize=8)
#
# ichan += 1
#
#
# canvas.fig.subplots_adjust(hspace=.1, wspace=.05, left=.075, right=.95 )#left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
#
# #cb1 = canvas.fig.colorbar(im, ax=axes[0::2], format='%1.0e', orientation='horizontal', shrink=.35, aspect=30)
# #cb1.ax.tick_params(axis='both', which='major', labelsize=8)
# #cb1.set_label("$\mathcal{V}_N$ (nV)", fontsize=8)
#
# cb2 = canvas.fig.colorbar(im2[-1], ax=axes[1::2], format='%1.0e', orientation='horizontal', shrink=.35, aspect=30)
# cb2.ax.tick_params(axis='both', which='major', labelsize=8)
# cb2.set_label("$\mathcal{V}_N$ (nV)", fontsize=8)
#canvas.fig.tight_layout()
deSpine(ax1)
canvas.draw()
self.doneTrigger.emit()
def sumData(self, canvas, plusminus, sumAll):
print("In sumData", plusminus, sumAll)
if plusminus == "sum":
splus = "+"
else:
splus = "-"
chans = copy.deepcopy(self.DATADICT[self.DATADICT["PULSES"][0]]["chan"]) #= np.array( ( self.DATADICT[pulse]["chan"][0], ) )
nchan = len(chans)
# Sum permutations of two channel combos
for ich in range(nchan-1):
for ch in chans[ich+1:]:
chsum = chans[ich] + splus + ch
for pulse in self.DATADICT["PULSES"]:
self.DATADICT[pulse][chsum] = {}
for ipm in range(self.DATADICT["nPulseMoments"]):
self.DATADICT[pulse][chsum][ipm] = {}
for istack in self.DATADICT["stacks"]:
if plusminus == "sum":
self.DATADICT[pulse][chsum][ipm][istack] = self.DATADICT[pulse][chans[ich]][ipm][istack] + self.DATADICT[pulse][ch][ipm][istack]
else:
self.DATADICT[pulse][chsum][ipm][istack] = self.DATADICT[pulse][chans[ich]][ipm][istack] - self.DATADICT[pulse][ch][ipm][istack]
#if chsum == "1+2":
#self.DATADICT[pulse]["rchan"].pop()
#self.DATADICT[pulse]["rchan"].pop()
self.DATADICT[pulse]["chan"].append(chsum)
# Sum all channels
#sumall = False
if sumAll:
chsum = ""
for ch in chans:
chsum += ch + "+"
chsum = chsum[0:-1] # remove last "+"
for pulse in self.DATADICT["PULSES"]:
self.DATADICT[pulse][chsum] = {}
for ipm in range(self.DATADICT["nPulseMoments"]):
self.DATADICT[pulse][chsum][ipm] = {}
for istack in self.DATADICT["stacks"]:
self.DATADICT[pulse][chsum][ipm][istack] = copy.deepcopy(self.DATADICT[pulse][chans[0]][ipm][istack])
for ch in chans[1:]:
self.DATADICT[pulse][chsum][ipm][istack] += self.DATADICT[pulse][ch][ipm][istack]
self.DATADICT[pulse]["chan"].append(chsum)
# if nchan > 2:
# for ch in chans:
# chsum += ch
# for ch2 in chans[1::]:
# for pulse in self.DATADICT["PULSES"]:
# self.DATADICT[pulse][chsum] = {}
# for istack in self.DATADICT["stacks"]:
# self.DATADICT[pulse][chsum][ipm][istack] = self.DATADICT[pulse][chans[ich]][ipm][istack] + self.DATADICT[pulse][ch][ipm][istack]
self.doneTrigger.emit()
def quadDet(self, clip, method, loss, canvas):
from scipy import signal
self.RotatedAmplitude = True
wL = self.transFreq * 2*np.pi
vL = self.transFreq
#T = 50
dt = self.dt
#DT = 0.01
CA = {} # corrected amplitude
IP = {} # instantaneous phase
NR = {} # Noise residual
RE = {} # Real channel
IM = {} # Imaginary channel
# global maximums for plotting
CAmax = {}
NRmax = {}
REmax = {}
IMmax = {}
E0,phi,df,T2 = 100.,0,0,.2
first = False
self.sigma = {}
for pulse in self.DATADICT["PULSES"]:
CA[pulse] = {}
IP[pulse] = {}
NR[pulse] = {}
RE[pulse] = {}
IM[pulse] = {}
CAmax[pulse] = 0
NRmax[pulse] = 0
REmax[pulse] = 0
IMmax[pulse] = 0
ichan = 0
self.sigma[pulse] = {}
for chan in self.DATADICT[pulse]["chan"]:
CA[pulse][chan] = np.zeros( (self.DATADICT["nPulseMoments"], len(self.DATADICT[pulse]["TIMES"])-clip ) )
IP[pulse][chan] = np.zeros( (self.DATADICT["nPulseMoments"], len(self.DATADICT[pulse]["TIMES"])-clip ) )
NR[pulse][chan] = np.zeros( (self.DATADICT["nPulseMoments"], len(self.DATADICT[pulse]["TIMES"])-clip ) )
RE[pulse][chan] = np.zeros( (self.DATADICT["nPulseMoments"], len(self.DATADICT[pulse]["TIMES"])-clip ) )
IM[pulse][chan] = np.zeros( (self.DATADICT["nPulseMoments"], len(self.DATADICT[pulse]["TIMES"])-clip ) )
#QQ = np.average(self.DATADICT[pulse]["Q"], axis=1 )
#for ipm in np.argsort(QQ):
for ipm in range(0, self.DATADICT["nPulseMoments"]):
#t = self.DATADICT[pulse]["TIMES"] - self.DATADICT[pulse]["PULSE_TIMES"][-1]
xn = self.DATADICT["stack"][pulse][chan][ipm,:]
ht = signal.hilbert(xn)*np.exp(-1j*wL*self.DATADICT[pulse]["TIMES"])
#############################################################
# Quadrature signal
RE[pulse][chan][ipm,:] = np.real(ht[clip::]) # *-1 for negative for consistency with VC ??
IM[pulse][chan][ipm,:] = np.imag(ht[clip::])
REmax[pulse] = max(REmax[pulse], np.max(np.real(ht[clip::])))
IMmax[pulse] = max(IMmax[pulse], np.max(np.imag(ht[clip::])))
#############################################################
# Instantaneous phase
IP[pulse][chan][ipm,:] = np.angle(ht)[clip::]
#############################################################
# Rotated amplitude
if ipm != 0:
[success, E0, df, phi, T2] = decay.quadratureDetect2( ht.real, ht.imag, self.DATADICT[pulse]["TIMES"], method, loss, (E0,phi,df,T2))
#[success, E0, df, phi, T2] = decay.quadratureDetect( ht.real, ht.imag, self.DATADICT[pulse]["TIMES"] )
else:
[success, E0, df, phi, T2] = decay.quadratureDetect2( ht.real, ht.imag, self.DATADICT[pulse]["TIMES"], method, loss)
#[success, E0, df, phi, T2] = decay.quadratureDetect2( ht.real, ht.imag, self.DATADICT[pulse]["TIMES"], (E0,phi,df,T2))
#[success, E0, df, phi, T2] = decay.quadratureDetect( ht.real, ht.imag, self.DATADICT[pulse]["TIMES"] )
#print("success", success, "E0", E0, "phi", phi, "df", df, "T2", T2)
D = self.RotateAmplitude( ht.real, ht.imag, phi, df, self.DATADICT[pulse]["TIMES"] )
CA[pulse][chan][ipm,:] = D.imag[clip::] # amplitude data
NR[pulse][chan][ipm,:] = D.real[clip::] # noise data
CAmax[pulse] = max(CAmax[pulse], np.max(D.imag[clip::]) )
NRmax[pulse] = max(NRmax[pulse], np.max(D.real[clip::]) )
self.sigma[pulse][chan] = np.std(NR[pulse][chan])
# reporting
percent = int(1e2* (float)((ipm)+ichan*self.DATADICT["nPulseMoments"]) /
(float)(self.DATADICT["nPulseMoments"] * len(self.DATADICT[pulse]["chan"])))
self.progressTrigger.emit(percent)
ichan += 1
self.DATADICT[pulse]["TIMES"] = self.DATADICT[pulse]["TIMES"][clip::]
self.DATADICT["CA"] = CA
self.DATADICT["IP"] = IP
self.DATADICT["NR"] = NR
self.DATADICT["RE"] = RE
self.DATADICT["IM"] = IM
self.DATADICT["CAmax"] = CAmax
self.DATADICT["NRmax"] = NRmax
self.DATADICT["REmax"] = REmax
self.DATADICT["IMmax"] = IMmax
self.doneTrigger.emit()
def plotQuadDet(self, clip, phase, canvas):
canvas.reAxH2( len(self.DATADICT[ self.DATADICT["PULSES"][0] ]["chan"] ), False, False)
###############
# Plot on GUI #
###############
fs = 10
dcmap = cmocean.cm.curl_r #"seismic_r" #cmocean.cm.balance_r #"RdBu" #YlGn" # "coolwarm_r" # diverging
canvas.reAxH2( len(self.DATADICT[ self.DATADICT["PULSES"][0] ]["chan"] ), False, False)
for pulse in self.DATADICT["PULSES"]:
ichan = 0
axes = canvas.fig.axes
mmaxr = 0.
mmaxi = 0.
#if clip > 0:
# time_sp = 1e3 * (self.DATADICT[pulse]["TIMES"][clip-1::] - self.DATADICT[pulse]["PULSE_TIMES"][-1] )
#else:
# time_sp = 1e3 * (self.DATADICT[pulse]["TIMES"] - self.DATADICT[pulse]["PULSE_TIMES"][-1] )
time_sp = 1e3 * (self.DATADICT[pulse]["TIMES"] - self.DATADICT[pulse]["PULSE_TIMES"][-1] )
QQ = np.average(self.DATADICT[pulse]["Q"], axis=1 )
iQ = np.argsort(QQ)
for chan in self.DATADICT[pulse]["chan"]:
ax1 = axes[2*ichan ]
ax2 = axes[2*ichan+1]
if phase == 0: # Re Im
im1 = ax1.pcolormesh( time_sp, QQ[iQ], self.DATADICT["RE"][pulse][chan][iQ], cmap=dcmap, \
vmin=-self.DATADICT["REmax"][pulse] , vmax=self.DATADICT["REmax"][pulse] , rasterized=True)
im2 = ax2.pcolormesh( time_sp, QQ[iQ], self.DATADICT["IM"][pulse][chan][iQ], cmap=dcmap, \
vmin=-self.DATADICT["IMmax"][pulse], vmax=self.DATADICT["IMmax"][pulse] , rasterized=True )
#im1 = ax1.matshow( self.DATADICT["RE"][pulse][chan][iQ], cmap=dcmap, aspect='auto', \
# vmin=-self.DATADICT["REmax"][pulse] , vmax=self.DATADICT["REmax"][pulse] )
#im2 = ax2.matshow( self.DATADICT["IM"][pulse][chan][iQ], cmap=dcmap, aspect='auto', \
# vmin=-self.DATADICT["REmax"][pulse] , vmax=self.DATADICT["REmax"][pulse] )
if phase == 1: # Amp phase
im1 = ax1.pcolormesh( time_sp, QQ[iQ], self.DATADICT["CA"][pulse][chan][iQ], cmap=dcmap, \
vmin=-self.DATADICT["CAmax"][pulse] , vmax=self.DATADICT["CAmax"][pulse], rasterized=True )
#im2 = ax2.pcolormesh( time_sp, QQ, self.DATADICT["IP"][pulse][chan], cmap=cmocean.cm.balance, rasterized=True,\
im2 = ax2.pcolormesh( time_sp, QQ[iQ], self.DATADICT["IP"][pulse][chan][iQ], cmap=cmocean.cm.delta, \
vmin=-np.pi, vmax=np.pi, rasterized=True)
if phase == 2: # CA NR
im1 = ax1.pcolormesh( time_sp, QQ[iQ], self.DATADICT["CA"][pulse][chan][iQ], cmap=dcmap, \
vmin=-self.DATADICT["CAmax"][pulse] , vmax=self.DATADICT["CAmax"][pulse], rasterized=True )
im2 = ax2.pcolormesh( time_sp, QQ[iQ], self.DATADICT["NR"][pulse][chan][iQ], cmap=dcmap, \
vmin=-self.DATADICT["NRmax"][pulse] , vmax=self.DATADICT["NRmax"][pulse], rasterized=True )
# cb2 = canvas.fig.colorbar(im2, ax=ax2, format='%1.0e')
# cb2.set_label("Noise residual (nV)", fontsize=8)
# cb2.ax.tick_params(axis='both', which='major', labelsize=8)
# cb1 = canvas.fig.colorbar(im1, ax=ax1, format='%1.0e')
# cb1.set_label("Phased amplitude (nV)", fontsize=8)
# cb1.ax.tick_params(axis='both', which='major', labelsize=8)
# cb2 = canvas.fig.colorbar(im2, ax=ax2, format="%1.0e")
# cb2.set_label("Phase (rad)", fontsize=8)
# cb2.ax.tick_params(axis='both', which='major', labelsize=8)
# cb1 = canvas.fig.colorbar(im1, ax=ax1, format="%1.0e")
# cb1.set_label("FID (nV)", fontsize=8)
# cb1.ax.tick_params(axis='both', which='major', labelsize=8)
# if you save these as pdf or eps, there are artefacts
# for cbar in [cb1,cb2]:
# #cbar.solids.set_rasterized(True)
# cbar.solids.set_edgecolor("face")
# reporting
percent = int(1e2* (float)(ichan)/len(self.DATADICT[pulse]["chan"]))
self.progressTrigger.emit(percent)
if ichan == 0:
ax1.set_ylabel(r"$q$ ( $\mathrm{A}\cdot\mathrm{s}$)", fontsize=fs)
ax2.set_ylabel(r"$q$ ( $\mathrm{A}\cdot\mathrm{s}$)", fontsize=fs)
else:
#ax2.yaxis.set_ticklabels([])
#ax1.yaxis.set_ticklabels([])
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ichan += 1
ax1.set_yscale('log')
ax2.set_yscale('log')
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylim( np.min(QQ), np.max(QQ) )
ax2.set_ylim( np.min(QQ), np.max(QQ) )
ax1.set_xlim( np.min(time_sp), np.max(time_sp) )
ax2.set_xlim( np.min(time_sp), np.max(time_sp) )
#ax2.set_xlabel(r"Time since end of pulse (ms)", fontsize=8)
ax2.set_xlabel(r"time (ms)", fontsize=fs)
#canvas.fig.subplots_adjust(hspace=.15, wspace=.05, left=.15, right=.85, bottom=.15, top=.9 )#left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
canvas.fig.subplots_adjust(hspace=.15, wspace=.05, left=.15, right=.90, bottom=.15, top=.95 )#left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
tick_locator = MaxNLocator(nbins=3)
cb1 = canvas.fig.colorbar(im1, ax=axes[0::2], format='%1.0f', orientation='vertical')
#cb1 = canvas.fig.colorbar(im1, ax=axes[0::2], format='%1.0f', orientation='horizontal', shrink=.35, aspect=30, pad=.4)
cb1.ax.tick_params(axis='both', which='major', labelsize=fs)
cb1.locator = tick_locator
cb1.update_ticks()
tick_locator2 = MaxNLocator(nbins=3)
cb2 = canvas.fig.colorbar(im2, ax=axes[1::2], format='%1.0f', orientation='vertical')
#cb2 = canvas.fig.colorbar(im2, ax=axes[1::2], format='%1.0f', orientation='horizontal', shrink=.35, aspect=30, pad=.4)
cb2.ax.tick_params(axis='both', which='major', labelsize=fs)
if phase == 0: # Re Im
cb1.set_label(r"$\mathrm{Re} \left( \mathcal{V}_N \right)$ (nV)", fontsize=fs)
cb2.set_label(r"$\mathrm{Im} \left( \mathcal{V}_N \right)$ (nV)", fontsize=fs)
elif phase == 1: # Amp phase
cb1.set_label(r"$\left| \mathcal{V}_N \right|$ (nV)", fontsize=fs)
cb2.set_label(r"$\angle \mathcal{V}_N$", fontsize=fs)
else:
cb1.set_label(r"$\left| \mathcal{V}_N \right|$ (nV)", fontsize=fs)
cb2.set_label(r"noise (nV)", fontsize=fs)
cb2.locator = tick_locator2
cb2.update_ticks()
#canvas.fig.tight_layout()
canvas.draw()
self.doneTrigger.emit()
def RotateAmplitude(self, X, Y, zeta, df, t):
V = X + 1j*Y
return np.abs(V) * np.exp( 1j * ( np.angle(V) - zeta - 2.*np.pi*df*t ) )
def gateIntegrate( self, gpd, clip, canvas ):
""" Gate integrate the real, imaginary, phased, and noise residual channels
"""
self.gated = True
self.GATED = {}
for pulse in self.DATADICT["PULSES"]:
QQ = np.average(self.DATADICT[pulse]["Q"], axis=1 )
iQ = np.argsort(QQ)
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
self.GATED[chan] = {}
for ipm in range(0, self.DATADICT["nPulseMoments"]):
#for ipm in iQ:
# Time since pulse end rather than since record starts...
#if clip > 0:
# time_sp = 1e3 * (self.DATADICT[pulse]["TIMES"][clip:] - self.DATADICT[pulse]["PULSE_TIMES"][-1] )
#else:
time_sp = 1e3 * (self.DATADICT[pulse]["TIMES"] - self.DATADICT[pulse]["PULSE_TIMES"][-1] )
#GT, GD, GTT, sig_stack, isum = rotate.gateIntegrate( self.DATADICT["CA"][pulse][chan][ipm,:], time_sp, gpd, self.sigma[pulse][chan], 1.5 )
#GT2, GP, GTT, sig_stack_err, isum = rotate.gateIntegrate( self.DATADICT["NR"][pulse][chan][ipm,:], time_sp, gpd, self.sigma[pulse][chan], 1.5 )
# err
GT, GCA, GTT, sig_stack, isum = rotate.gateIntegrate( self.DATADICT["CA"][pulse][chan][ipm], time_sp, gpd, self.sigma[pulse][chan], 2 )
GT, GNR, GTT, sig_stack, isum = rotate.gateIntegrate( self.DATADICT["NR"][pulse][chan][ipm], time_sp, gpd, self.sigma[pulse][chan], 2 )
GT, GRE, GTT, sig_stack, isum = rotate.gateIntegrate( self.DATADICT["RE"][pulse][chan][ipm], time_sp, gpd, self.sigma[pulse][chan], 2 )
GT, GIM, GTT, sig_stack, isum = rotate.gateIntegrate( self.DATADICT["IM"][pulse][chan][ipm], time_sp, gpd, self.sigma[pulse][chan], 2 )
GT, GIP, GTT, sig_stack, isum = rotate.gateIntegrate( self.DATADICT["IP"][pulse][chan][ipm], time_sp, gpd, self.sigma[pulse][chan], 2 )
#if ipm == iQ[0]:
if ipm == 0:
# self.GATED[chan]["DATA"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)) )
# self.GATED[chan]["ERR"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)) )
# self.GATED[chan]["SIGMA"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)) )
self.GATED[chan]["CA"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)-clip) )
self.GATED[chan]["NR"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)-clip) )
self.GATED[chan]["BN"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)-clip) )
self.GATED[chan]["RE"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)-clip) )
self.GATED[chan]["IM"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)-clip) )
self.GATED[chan]["IP"] = np.zeros( ( self.DATADICT["nPulseMoments"], len(GT)-clip) )
self.GATED[chan]["isum"] = isum
# Bootstrap noise
#self.GATED[chan]["isum"]
Means = rotate.bootstrapWindows( self.DATADICT["NR"][pulse][chan][ipm], 20000, isum[isum!=1], adapt=True)
# MAD, only for windows > 1
c = stats.norm.ppf(3./4.)
sig_stack[isum!=1] = np.ma.median(np.ma.abs(Means), axis=1) / c
self.GATED[chan]["BN"][ipm] = sig_stack[clip:]
#self.GATED[chan]["DATA"][ipm] = GD.real
self.GATEDABSCISSA = GT[clip:]
self.GATEDWINDOW = GTT[clip:]
#self.GATED[chan]["SIGMA"][ipm] = sig_stack #_err # GP.real
#self.GATED[chan]["ERR"][ipm] = GP.real
#self.GATED[chan]["CA"][iQ[ipm]] = GCA.real[clip:]
#self.GATED[chan]["NR"][iQ[ipm]] = GNR.real[clip:]
#self.GATED[chan]["RE"][iQ[ipm]] = GRE.real[clip:]
#self.GATED[chan]["IM"][iQ[ipm]] = GIM.real[clip:]
#self.GATED[chan]["IP"][iQ[ipm]] = GIP.real[clip:]
self.GATED[chan]["CA"][ipm] = GCA.real[clip:]
self.GATED[chan]["NR"][ipm] = GNR.real[clip:]
self.GATED[chan]["RE"][ipm] = GRE.real[clip:]
self.GATED[chan]["IM"][ipm] = GIM.real[clip:]
self.GATED[chan]["IP"][ipm] = GIP.real[clip:]
percent = int(1e2* (float)((ipm)+ichan*self.DATADICT["nPulseMoments"]) /
(float)(self.DATADICT["nPulseMoments"] * len(self.DATADICT[pulse]["chan"])))
self.progressTrigger.emit(percent)
self.GATED[chan]["CA"] = self.GATED[chan]["CA"][iQ,:]
self.GATED[chan]["NR"] = self.GATED[chan]["NR"][iQ,:]
self.GATED[chan]["RE"] = self.GATED[chan]["RE"][iQ,:]
self.GATED[chan]["IM"] = self.GATED[chan]["IM"][iQ,:]
self.GATED[chan]["IP"] = self.GATED[chan]["IP"][iQ,:]
self.GATED[chan]["GTT"] = GTT[clip:]
self.GATED[chan]["GT"] = GT[clip:]
self.GATED[chan]["QQ"] = QQ[iQ]
ichan += 1
self.doneTrigger.emit()
def bootstrap_resample(self, X, n=None):
# from http://nbviewer.jupyter.org/gist/aflaxman/6871948
""" Bootstrap resample an array_like
Parameters
----------
X : array_like
data to resample
n : int, optional
length of resampled array, equal to len(X) if n==None
Results
-------
returns X_resamples
"""
if n == None:
n = len(X)
resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)
return X[resample_i]
def bootstrap_sigma(self, pulse, chan):
# bootstrap resample
nt = len(self.GATED[chan]["GT"])
nb = 5000
XS = np.zeros( (nb, nt) )
for ii in range(nb):
for it in range(nt):
if self.GATED[chan]["isum"][it] < 8:
XS[ii, it] = self.sigma[pulse][chan]
else:
if it == 0:
X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it], \
self.GATED[chan]["NR"][:,it+1], \
self.GATED[chan]["NR"][:,it+2], \
self.GATED[chan]["NR"][:,it+3] ) ), n=nt )
elif it == 1:
X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it-1], self.GATED[chan]["NR"][:,it], \
self.GATED[chan]["NR"][:,it+1], self.GATED[chan]["NR"][:,it+2] ) ), n=nt )
elif it == nt-2:
X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it+1], self.GATED[chan]["NR"][:,it], \
self.GATED[chan]["NR"][:,it-1], self.GATED[chan]["NR"][:,it-2] ) ), n=nt )
elif it == nt-1:
X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it], self.GATED[chan]["NR"][:,it-1], \
self.GATED[chan]["NR"][:,it-2], self.GATED[chan]["NR"][:,it-3] ) ), n=nt )
else:
X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it-2] , self.GATED[chan]["NR"][:,it-1], \
self.GATED[chan]["NR"][:,it], self.GATED[chan]["NR"][:,it+1], self.GATED[chan]["NR"][:,it+2] )), n=nt )
XS[ii,it] = np.std(X)
return XS
def plotGateIntegrate( self, gpd, clip, phase, canvas ):
""" Plot the gate integration
"""
fs = 10
canvas.reAxH2( len(self.DATADICT[ self.DATADICT["PULSES"][0] ]["chan"] ), False, False)
axes = canvas.fig.axes
#cmap = cmocean.cm.balance_r
dcmap = cmocean.cm.curl_r #"seismic_r" #cmocean.cm.balance_r #"RdBu" #YlGn" # "coolwarm_r" # diverging
# Calculate maximum for plotting...TODO move into loop above
vmax1 = 0
vmax2 = 0
for pulse in self.DATADICT["PULSES"]:
for chan in self.DATADICT[pulse]["chan"]:
if phase == 0:
vmax1 = max(vmax1, np.max(np.abs(self.GATED[chan]["RE"])))
vmax2 = max(vmax2, np.max(np.abs(self.GATED[chan]["IM"])))
elif phase == 1:
vmax1 = max(vmax1, np.max(np.abs(self.GATED[chan]["CA"])))
vmax2 = np.pi
elif phase == 2:
vmax1 = max(vmax1, np.max(np.abs(self.GATED[chan]["CA"])))
vmax2 = max(vmax2, np.max(np.abs(self.GATED[chan]["NR"])))
for pulse in self.DATADICT["PULSES"]:
ichan = 0
for chan in self.DATADICT[pulse]["chan"]:
ax1 = axes[2*ichan ]
ax2 = axes[2*ichan+1]
if phase == 0:
im1 = ax1.pcolormesh(self.GATED[chan]["GTT"], self.GATED[chan]["QQ"], self.GATED[chan]["RE"], cmap=dcmap, vmin=-vmax1, vmax=vmax1)
im2 = ax2.pcolormesh(self.GATED[chan]["GTT"], self.GATED[chan]["QQ"], self.GATED[chan]["IM"], cmap=dcmap, vmin=-vmax2, vmax=vmax2)
#im1 = ax1.matshow(self.GATED[chan]["RE"], cmap=dcmap, vmin=-vmax1, vmax=vmax1, aspect='auto')
#im2 = ax2.matshow(self.GATED[chan]["IM"], cmap=dcmap, vmin=-vmax2, vmax=vmax2, aspect='auto')
elif phase == 1:
im1 = ax1.pcolormesh(self.GATED[chan]["GTT"], self.GATED[chan]["QQ"], self.GATED[chan]["CA"], cmap=dcmap, vmin=-vmax1, vmax=vmax1)
im2 = ax2.pcolormesh(self.GATED[chan]["GTT"], self.GATED[chan]["QQ"], self.GATED[chan]["IP"], cmap=cmocean.cm.delta, vmin=-vmax2, vmax=vmax2)
#im2 = ax2.pcolormesh(self.GATED[chan]["GTT"], self.GATED[chan]["QQ"], self.GATED[chan]["IP"], cmap=cmocean.cm.phase, vmin=-vmax2, vmax=vmax2)
elif phase == 2:
im1 = ax1.pcolormesh(self.GATED[chan]["GTT"], self.GATED[chan]["QQ"], self.GATED[chan]["CA"], cmap=dcmap, vmin=-vmax1, vmax=vmax1)
#XS = self.bootstrap_sigma(pulse, chan)
#im2 = ax2.pcolormesh(self.GATED[chan]["GTT"], self.GATED[chan]["QQ"], self.GATED[chan]["NR"], cmap=cmap, vmin=-vmax2, vmax=vmax2)
# bootstrap resample
# nt = len(self.GATED[chan]["GT"])
# nb = 5000
# XS = np.zeros( (nb, nt) )
# for ii in range(nb):
# #XS = []
# for it in range(nt):
# if self.GATED[chan]["isum"][it] < 8:
# XS[ii, it] = self.sigma[pulse][chan]
# else:
# if it == 0:
# X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it], self.GATED[chan]["NR"][:,it+1], \
# self.GATED[chan]["NR"][:,it+2], self.GATED[chan]["NR"][:,it+3] ) ), n=nt )
# if it == 1:
# X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it-1], self.GATED[chan]["NR"][:,it], \
# self.GATED[chan]["NR"][:,it+1], self.GATED[chan]["NR"][:,it+2] ) ), n=nt )
# elif it == nt-2:
# X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it+1], self.GATED[chan]["NR"][:,it], \
# self.GATED[chan]["NR"][:,it-1], self.GATED[chan]["NR"][:,it-2] ) ), n=nt )
# elif it == nt-1:
# X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it], self.GATED[chan]["NR"][:,it-1], \
# self.GATED[chan]["NR"][:,it-2], self.GATED[chan]["NR"][:,it-3] ) ), n=nt )
# else:
# X = self.bootstrap_resample( np.concatenate( (self.GATED[chan]["NR"][:,it-2] , self.GATED[chan]["NR"][:,it-1], \
# self.GATED[chan]["NR"][:,it], self.GATED[chan]["NR"][:,it+1], self.GATED[chan]["NR"][:,it+2] )), n=nt )
# XS[ii,it] = np.std(X)
#if ii == 0:
# ax2.plot( self.GATED[chan]["GT"], XS[ii], '-', linewidth=1, markersize=4, alpha=.5, color='lightgrey', label = "bootstrap sim" )
#else:
# ax2.plot( self.GATED[chan]["GT"], XS[ii], '-', linewidth=1, markersize=4, alpha=.5, color='lightgrey' )
ax2.plot( self.GATED[chan]["GT"], np.std(self.GATED[chan]["NR"], axis=0), color='darkgrey', linewidth=2, label="gate std" )
ax2.plot( self.GATED[chan]["GT"], np.average(self.GATED[chan]["BN"], axis=0), color='black', linewidth=2, label="boot average" )
#ax2.plot( np.tile(self.GATED[chan]["GT"], (5000,1) ), XS, '.', color='lightgrey', linewidth=1, alpha=.5 )
#ax2.plot( self.GATED[chan]["GT"], np.average(XS, axis=0), color='black', linewidth=2, label="bootstrap avg." )
#ax2.plot( self.GATED[chan]["GT"], np.median(XS, axis=0), color='black', linewidth=2, label="bootstrap med." )
ax2.legend()
im1.set_edgecolor('face')
if phase != 2:
im2.set_edgecolor('face')
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylim( np.min(self.GATED[chan]["QQ"]), np.max(self.GATED[chan]["QQ"]) )
if phase != 2:
ax2.set_ylim( np.min(self.GATED[chan]["QQ"]), np.max(self.GATED[chan]["QQ"]) )
ax1.set_xlim( np.min(self.GATED[chan]["GTT"]), np.max(self.GATED[chan]["GTT"]) )
ax2.set_xlim( np.min(self.GATED[chan]["GTT"]), np.max(self.GATED[chan]["GTT"]) )
ax1.set_yscale('log')
ax2.set_yscale('log')
qlabs = np.append(np.concatenate( ( self.GATED[chan]["QQ"][0:1], self.GATED[chan]["QQ"][9::10] )), self.GATED[chan]["QQ"][-1] )
ax1.yaxis.set_ticks( qlabs ) # np.append(np.concatenate( (QQ[0:1],QQ[9::10] )), QQ[-1] ) )
if phase != 2:
ax2.yaxis.set_ticks( qlabs ) #np.append(np.concatenate( (QQ[0:1],QQ[9::10] )), QQ[-1] ) )
#formatter = matplotlib.ticker.LogFormatter(10, labelOnlyBase=False)
formatter = matplotlib.ticker.FuncFormatter(lambda x, pos: str((round(x,1))))
ax1.set_xscale('log')
ax2.set_xscale('log')
ax1.yaxis.set_major_formatter(formatter) #matplotlib.ticker.FormatStrFormatter('%d.1'))
ax2.yaxis.set_major_formatter(formatter) #matplotlib.ticker.FormatStrFormatter('%d.1'))
ax1.xaxis.set_major_formatter(formatter) #matplotlib.ticker.FormatStrFormatter('%d.1'))
ax2.xaxis.set_major_formatter(formatter) #matplotlib.ticker.FormatStrFormatter('%d.1'))
if ichan == 0:
ax1.set_ylabel(r"$q$ ( $\mathrm{A}\cdot\mathrm{s}$)", fontsize=fs)
if phase == 2:
ax2.set_ylabel(r"noise est. (nV)", fontsize=fs)
else:
ax2.set_ylabel(r"$q$ ( $\mathrm{A}\cdot\mathrm{s}$)", fontsize=fs)
else:
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_xlabel(r"$t-\tau_p$ (ms)", fontsize=fs)
ichan += 1
#canvas.fig.subplots_adjust(hspace=.1, wspace=.05, left=.075, right=.925 )#left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
#canvas.fig.tight_layout()
#canvas.draw()
canvas.fig.subplots_adjust(hspace=.15, wspace=.05, left=.15, right=.9, bottom=.1, top=.9 )#left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
tick_locator = MaxNLocator(nbins=5)
cb1 = canvas.fig.colorbar(im1, ax=axes[0::2], format='%1.0f', orientation='horizontal', shrink=.35, aspect=30)
cb1.ax.tick_params(axis='both', which='major', labelsize=fs)
cb1.set_label("$\mathcal{V}_N$ (nV)", fontsize=fs)
#cb1.locator = tick_locator
#cb1.update_ticks()
if phase != 2:
cb2 = canvas.fig.colorbar(im2, ax=axes[1::2], format='%1.0f', orientation='horizontal', shrink=.35, aspect=30, pad=.2)
cb2.ax.tick_params(axis='both', which='major', labelsize=fs)
cb2.set_label("$\mathcal{V}_N$ (nV)", fontsize=fs)
cb2.locator = tick_locator
cb2.update_ticks()
canvas.draw()
self.doneTrigger.emit()
def FDSmartStack(self, cv, canvas):
from matplotlib.colors import LogNorm
from matplotlib.ticker import MaxNLocator
"""
Currently this stacks 4-phase second pulse data only, we need to generalise
"""
try:
canvas.fig.clear()
except:
pass
self.dataCubeFFT( )
# canvas.ax1 = canvas.fig.add_axes([.1, .1, .8, .8])
canvas.ax1 = canvas.fig.add_axes([.1, .1, .2, .8])
canvas.ax2 = canvas.fig.add_axes([.325, .1, .2, .8])
canvas.ax3 = canvas.fig.add_axes([.55, .1, .2, .8])
canvas.ax4 = canvas.fig.add_axes([.815, .1, .05, .8]) #cb
canvas.ax1.tick_params(axis='both', which='major', labelsize=8)
canvas.ax2.tick_params(axis='both', which='major', labelsize=8)
canvas.ax3.tick_params(axis='both', which='major', labelsize=8)
canvas.ax4.tick_params(axis='both', which='major', labelsize=8)
canvas.ax1.set_ylabel("pulse index", fontsize=8)
canvas.ax1.set_xlabel(r"$\omega$ bin", fontsize=8)
canvas.ax2.set_xlabel(r"$\omega$ bin", fontsize=8)
canvas.ax3.set_xlabel(r"$\omega$ bin", fontsize=8)
canvas.ax2.yaxis.set_ticklabels([])
canvas.ax3.yaxis.set_ticklabels([])
#canvas.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
# # Look at pulses
# for pulse in self.DATADICT["PULSES"]:
# for istack in self.DATADICT["stacks"]:
# for ipm in range(0,3):
# canvas.ax1.plot( self.DATADICT[pulse]["CURRENT"][ipm][istack] , label="istack "+str(istack) + " ipm=" + str(ipm) + pulse )
# canvas.draw()
# Create Container for stacks
# sandbox determine pulse sequence again
for pulse in self.DATADICT["PULSES"]:
for ichan in self.DATADICT[pulse]["chan"]:
#for ipm in range(10,11):
CONTAINER = {}
CONTAINER["Cycle 1"] = [] # These are actually subtracted cycles... v+ - v
CONTAINER["Cycle 2"] = []
for istack in self.DATADICT["stacks"]:
#canvas.ax1.clear()
ipm = 8
#for ipm in range(self.DATADICT["nPulseMoments"]):
#canvas.ax1.matshow( np.real(self.DATADICT[pulse][ichan]["FFT"][istack]), aspect='auto' )
#canvas.draw()
if not istack%4%4:
# phase cycle 4, aligned with 1 after sub
CONTAINER["Cycle 1"].append(-self.DATADICT[pulse][ichan]["FFT"][istack])
#canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], -self.DATADICT[pulse][ichan][ipm][istack], label="istack "+str(istack)+ " " + pulse )
elif not istack%4%3:
# phase cycle 3, aligned with 2 after sub
CONTAINER["Cycle 2"].append(-self.DATADICT[pulse][ichan]["FFT"][istack])
#canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], -self.DATADICT[pulse][ichan][ipm][istack], label="istack "+str(istack)+ " " + pulse )
elif not istack%4%2:
# phase cycle 2
CONTAINER["Cycle 2"].append( self.DATADICT[pulse][ichan]["FFT"][istack])
#canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], self.DATADICT[pulse][ichan][ipm][istack], label="istack "+str(istack)+ " " + pulse )
else:
# phase cycle 1
CONTAINER["Cycle 1"].append( self.DATADICT[pulse][ichan]["FFT"][istack])
#canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], self.DATADICT[pulse][ichan][ipm][istack], label="istack "+str(istack)+ " " + pulse )
#canvas.ax1.matshow(np.array(np.average(self.DATADICT[pulse][ichan]["FFT"]), axis=2), aspect='auto' )
#canvas.ax1.plot( self.DATADICT[pulse]["PULSE_TIMES"], self.DATADICT[pulse]["CURRENT"][ipm][istack] , color='black', label="istack "+str(istack) )
#canvas.ax1.plot( self.DATADICT[pulse]["CURRENT"][ipm][istack] , label="istack "+str(istack) + " iFID" + str(iFID) )
#canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], self.DATADICT[pulse][ichan][ipm][istack], label="istack "+str(istack)+ " " + pulse )
#canvas.ax1.legend(prop={'size':6})
#canvas.draw()
# Boostrap
# stack.
#scipy.random.shuffle(x)
# Stack and calculate the pooled variance (http://en.wikipedia.org/wiki/Pooled_variance)
""" All this phase cycling wreaks havoc on a normal calculation of std. and variance. Instead, we resort to calculating
a pooled variance. In this assumption is that the precision of the measurment is constant. This is a poor choice for
any type of moving sensor.
"""
# if a window filter has been applied
#self.WINDOW
#self.IWindowStart
#self.iWindowEnd
#self.FFTtimes
CONTAINER = .5*(np.array(CONTAINER["Cycle 2"]) - np.array(CONTAINER["Cycle 1"]))
print ("container shape", np.shape( CONTAINER), self.iWindowStart+1, self.iWindowEnd-1)
dmin = np.min(np.abs(np.average(np.array(CONTAINER)[:,:,self.iWindowStart+1:self.iWindowEnd-1], axis=0)))
dmax = np.max(np.abs(np.average(np.array(CONTAINER)[:,:,self.iWindowStart+1:self.iWindowEnd-1], axis=0)))
mn = canvas.ax1.matshow( 20.*np.log10(np.abs(np.average(np.array(CONTAINER)[:,:, self.iWindowStart+1:self.iWindowEnd-1], axis=0))), aspect='auto', vmin=-120, vmax=-40)
#mn = canvas.ax1.matshow(20.*np.log10(XA[:,istart:iend+1]), aspect='auto', vmax=-40, vmin=-120) #, norm=LogNorm())
canvas.ax2.matshow( 20*np.log10(np.std(np.real(np.array(CONTAINER)[:,:,self.iWindowStart+1:self.iWindowEnd-1]), axis=0)), aspect='auto', vmin=-120, vmax=-40)
canvas.ax3.matshow( 20*np.log10(np.std(np.imag(np.array(CONTAINER)[:,:,self.iWindowStart+1:self.iWindowEnd-1]), axis=0)), aspect='auto', vmin=-120, vmax=-40)
#canvas.ax1.legend(prop={'size':6})
cb1 = mpl.colorbar.Colorbar(canvas.ax4, mn)
cb1.ax.tick_params(labelsize=8)
cb1.set_label("power [dB]", fontsize=8)
canvas.ax1.xaxis.set_major_locator(MaxNLocator(4))
canvas.ax2.xaxis.set_major_locator(MaxNLocator(4))
canvas.ax3.xaxis.set_major_locator(MaxNLocator(4))
canvas.draw()
self.doneTrigger.emit()
def effectivePulseMoment(self, cv, canvas):
canvas.reAxH(2)
nstack = len(self.DATADICT["stacks"])
#canvas.ax1.set_yscale('log')
for pulse in self.DATADICT["PULSES"]:
self.DATADICT[pulse]["qeff"] = {}
self.DATADICT[pulse]["q_nu"] = {}
for ipm in range(self.DATADICT["nPulseMoments"]):
self.DATADICT[pulse]["qeff"][ipm] = {}
self.DATADICT[pulse]["q_nu"][ipm] = {}
#canvas.ax1.clear()
#scolours = np.array( ( np.linspace(0.8,0.4,len(self.DATADICT["stacks"])), \
# np.linspace(0.0,0.6,len(self.DATADICT["stacks"])), \
# np.linspace(0.6,0.0,len(self.DATADICT["stacks"])) )
# ).T
#scolours = plt.cm.Spectral(np.linspace(0,1,len(self.DATADICT["stacks"])))
#scolours = plt.cm.Blues(np.linspace(0,1,1.5*len(self.DATADICT["stacks"])))
scolours = cmocean.cm.ice(np.linspace(0,1,int(1.5*len(self.DATADICT["stacks"]))))
iistack = 0
for istack in self.DATADICT["stacks"]:
#self.DATADICT[pulse]["PULSE_TIMES"]
x = self.DATADICT[pulse]["CURRENT"][ipm][istack]
X = np.fft.rfft(x)
v = np.fft.fftfreq(len(x), self.dt)
v = v[0:len(X)]
v[-1] = np.abs(v[-1])
# calculate effective current/moment
I0 = np.abs(X)/len(X)
qeff = I0 * (self.DATADICT[pulse]["PULSE_TIMES"][-1]-self.DATADICT[pulse]["PULSE_TIMES"][0])
# frequency plot
#canvas.ax1.set_title(r"pulse moment index " +str(ipm), fontsize=10)
#canvas.ax1.set_xlabel(r"$\nu$ [Hz]", fontsize=8)
#canvas.ax1.set_ylabel(r"$q_{eff}$ [A$\cdot$sec]", fontsize=8)
#canvas.ax1.plot(v, qeff, color=scolours[iistack] ) # eff current
# time plot
canvas.ax1.plot(1e2*(self.DATADICT[pulse]["PULSE_TIMES"]-self.DATADICT[pulse]["PULSE_TIMES"][0]), x, color=scolours[iistack])
self.DATADICT[pulse]["qeff"][ipm][istack] = qeff
self.DATADICT[pulse]["q_nu"][ipm][istack] = v
iistack += 1
#canvas.draw()
percent = int(1e2* (float)((istack)+ipm*self.DATADICT["nPulseMoments"]) /
(float)(len(self.DATADICT["PULSES"])*self.DATADICT["nPulseMoments"]*nstack))
self.progressTrigger.emit(percent)
canvas.ax1.set_xlabel("time (ms)", fontsize=10)
canvas.ax1.set_ylabel("current (A)", fontsize=10)
#canvas.draw()
self.plotQeffNu(cv, canvas.ax2)
deSpine(canvas.ax1)
deSpine(canvas.ax2)
canvas.fig.tight_layout()
canvas.draw()
self.doneTrigger.emit()
def plotQeffNu(self, cv, ax):
####################################
# TODO label fid1 and fid2, and make a legend, and colour by pulse
nstack = len(self.DATADICT["stacks"])
iFID = 0
for pulse in self.DATADICT["PULSES"]:
self.DATADICT[pulse]["Q"] = np.zeros( (self.DATADICT["nPulseMoments"], len(self.DATADICT["stacks"])) )
ilabel = True
for ipm in range(self.DATADICT["nPulseMoments"]):
#scolours = np.array([0.,0.,1.])
scolours = cmocean.cm.ice(np.linspace(0,1,int(1.5*len(self.DATADICT["stacks"]))))
#scolours = plt.cm.Spectral(np.linspace(0,1,len(self.DATADICT["stacks"])))
#scolours = plt.cm.Spectral(np.linspace(0,1,len(self.DATADICT["stacks"])))
istack = 0
for stack in self.DATADICT["stacks"]:
# find index
icv = int(round(cv / self.DATADICT[pulse]["q_nu"][ipm][stack][1]))
self.DATADICT[pulse]["Q"][ipm,istack] = self.DATADICT[pulse]["qeff"][ipm][stack][icv]
if ilabel:
ax.scatter(ipm, self.DATADICT[pulse]["qeff"][ipm][stack][icv], facecolors='none', edgecolors=scolours[istack], label=(str(pulse)))
ilabel = False
else:
ax.scatter(ipm, self.DATADICT[pulse]["qeff"][ipm][stack][icv], facecolors='none', edgecolors=scolours[istack])
#scolours += np.array((0,1./(nstack+1),-1/(nstack+1.)))
percent = int(1e2* (float)((istack)+ipm*self.DATADICT["nPulseMoments"]) /
(float)(len(self.DATADICT["PULSES"])*self.DATADICT["nPulseMoments"]*nstack))
self.progressTrigger.emit(percent)
istack += 1
iFID += 1
ax.set_xlabel(r"pulse moment index", fontsize=10)
ax.set_ylabel(r"$q_{eff}$ (A$\cdot$sec)", fontsize=10)
ax.set_yscale('log')
ax.set_xlim(0, ax.get_xlim()[1])
ax.legend(loc='upper right', scatterpoints = 1, prop={'size':10})
def enableDSP(self):
self.enableDSPTrigger.emit()
def adaptiveFilter(self, M, flambda, truncate, mu, PCA, plot, canvas):
#plot = False
canvas.reAx2(shx=False, shy=False)
# ax1 is top plot of filter taps
# ax2 is bottom plot of conditioned signal
if truncate:
itrunc =(int) ( round( 1e-3*truncate*self.samp ) )
print( "adaptive filter size", 1e3*self.dt*M, " [ms]" )
Filt = adapt.AdaptiveFilter(flambda)
H = {}
for pulse in self.DATADICT["PULSES"]:
H[pulse] = {}
for ichan in self.DATADICT[pulse]["chan"]:
H[pulse][ichan] = np.zeros(M*len( self.DATADICT[pulse]["rchan"] ))
iFID = 0
# original ordering...
#for pulse in self.DATADICT["PULSES"]:
# for ipm in range(self.DATADICT["nPulseMoments"]):
# for istack in self.DATADICT["stacks"]:
# This order makes more sense, same as data collection, verify
for istack in self.DATADICT["stacks"]:
for ipm in range(self.DATADICT["nPulseMoments"]):
for pulse in self.DATADICT["PULSES"]:
if plot:
canvas.softClear()
mmax = 0
for ichan in self.DATADICT[pulse]["chan"]:
canvas.ax2.plot( self.DATADICT[pulse]["TIMES"], 1e9* self.DATADICT[pulse][ichan][ipm][istack], alpha=.5)
mmax = max(mmax, np.max(1e9*self.DATADICT[pulse][ichan][ipm][istack]))
canvas.ax2.set_ylim(-mmax, mmax)
canvas.ax2.set_prop_cycle(None)
for ichan in self.DATADICT[pulse]["chan"]:
#H = np.zeros(M)
RX = []
for irchan in self.DATADICT[pulse]["rchan"]:
RX.append(self.DATADICT[pulse][irchan][ipm][istack][::-1])
# Reset each time?
#H[pulse][ichan] *= 0
#if all(H[pulse][ichan]) == 0:
if False:
####################################################################################
# Padasip adaptive filter implimentations, do not allow for variable filter length
####################################################################################
# identification #
#f = pa.filters.FilterRLS(n=len(self.DATADICT[pulse]["rchan"]), mu=0.99, w="zeros") #
#f = pa.filters.FilterGNGD(n=len(self.DATADICT[pulse]["rchan"]), mu=0.1) # # Nope
#f = pa.filters.FilterLMS(n=len(self.DATADICT[pulse]["rchan"]), mu=0.1) # # NOPE
#f = pa.filters.AdaptiveFilter(model="NLMS", n=len(self.DATADICT[pulse]["rchan"]), mu=0.1, w="random") # NOPE
#f = pa.filters.AdaptiveFilter(model="GNGD", n=len(self.DATADICT[pulse]["rchan"]), mu=0.1) # horrendous
#f = pa.filters.FilterNLMF(n=len(self.DATADICT[pulse]["rchan"]), mu=0.005, w="random") # BAD
#f = pa.filters.FilterSSLMS(n=len(self.DATADICT[pulse]["rchan"]), mu=0.01, w="zeros") # pretty good
f = pa.filters.FilterNSSLMS(n=len(self.DATADICT[pulse]["rchan"]), mu=0.1, w="zeros") # pretty good
y, e, H[pulse][ichan] = f.run(self.DATADICT[pulse][ichan][ipm][istack][::-1], np.array(RX).T) #
####################################################################################
e = self.DATADICT[pulse][ichan][ipm][istack][::-1] - y
elif True:
# check for change in filter coefficients and rerun if things are changing too rapidly,
# this is especially true for the first run
hm1 = np.copy(H[pulse][ichan])
[e, H[pulse][ichan]] = Filt.adapt_filt_Ref( self.DATADICT[pulse][ichan][ipm][istack][::-1],\
RX,\
M, mu, PCA, flambda, H[pulse][ichan])
iloop = 0
#while False:
while (np.linalg.norm( H[pulse][ichan] - hm1) > .05): # threshold for recall
hm1 = np.copy(H[pulse][ichan])
[e, H[pulse][ichan]] = Filt.adapt_filt_Ref( self.DATADICT[pulse][ichan][ipm][istack][::-1],\
RX,\
M, mu, PCA, flambda, H[pulse][ichan])
iloop += 1
#print("Recalled ", iloop, "times with norm=", np.linalg.norm(hm1-H[pulse][ichan]))
else:
[e,H[pulse][ichan]] = Filt.adapt_filt_Ref( self.DATADICT[pulse][ichan][ipm][istack][::-1],\
RX,\
M, mu, PCA, flambda, H[pulse][ichan])
# replace
if truncate:
if plot:
canvas.ax2.plot( self.DATADICT[pulse]["TIMES"][0:itrunc], 1e9* e[::-1][0:itrunc],\
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " ichan=" + str(ichan))
self.DATADICT[pulse][ichan][ipm][istack] = e[::-1][0:itrunc]
else:
if plot:
canvas.ax2.plot( self.DATADICT[pulse]["TIMES"], 1e9* e[::-1],\
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " ichan=" + str(ichan))
self.DATADICT[pulse][ichan][ipm][istack] = e[::-1]
if plot:
#canvas.ax1.plot( H[pulse][ichan].reshape(-1, len(RX)) ) # , label="taps")
canvas.ax1.plot( H[pulse][ichan][::-1].reshape(M, len(RX), order='F' ) ) #.reshape(-1, len(RX)) ) # , label="taps")
canvas.ax2.legend(prop={'size':10}, loc='upper right')
#canvas.ax2.legend(prop={'size':6}, loc='upper right')
mh = np.max(np.abs( H[pulse][ichan] ))
canvas.ax1.set_ylim( -mh, mh )
canvas.ax2.set_xlabel(r"time (s)", fontsize=10)
canvas.ax2.set_ylabel(r"signal (nV)", fontsize=10)
canvas.ax1.set_xlabel(r"filter tap index", fontsize=10)
canvas.ax1.set_ylabel(r"tap amplitude", fontsize=10)
if plot:
canvas.fig.tight_layout()
deSpine(canvas.ax1)
deSpine(canvas.ax2)
canvas.draw()
# truncate the reference channels too, in case you still need them for something.
# Otherwise they are no longer aligned with the data
for rchan in self.DATADICT[pulse]["rchan"]:
if truncate:
self.DATADICT[pulse][rchan][ipm][istack] = self.DATADICT[pulse][rchan][ipm][istack][0:itrunc]
#percent = (int)(1e2*((float)(iFID*self.DATADICT["nPulseMoments"]+(ipm))/( len(self.DATADICT["PULSES"])*self.nPulseMoments)))
percent = (int)(1e2*((float)(istack*self.DATADICT["nPulseMoments"]+(ipm))/( len(self.DATADICT["PULSES"])*self.nPulseMoments*(len(self.DATADICT["stacks"])+1) )))
self.progressTrigger.emit(percent)
# # why is this loop here, istack is not part of rest?
# for istack in self.DATADICT["stacks"]:
# if truncate:
# self.DATADICT[pulse]["TIMES"] = self.DATADICT[pulse]["TIMES"][0:itrunc]
# percent = (int)(1e2*((float)(iFID*self.DATADICT["nPulseMoments"]+(ipm))/( len(self.DATADICT["PULSES"])*self.nPulseMoments)))
# self.progressTrigger.emit(percent)
# iFID += 1
if truncate:
self.DATADICT[pulse]["TIMES"] = self.DATADICT[pulse]["TIMES"][0:itrunc]
self.doneTrigger.emit()
self.updateProcTrigger.emit()
#self.plotFT(canvas)
def plotFT(self, canvas, istart=0, iend=0):
try:
canvas.fig.clear()
except:
pass
canvas.ax1 = canvas.fig.add_axes([.1, .1, .65, .8])
canvas.ax1c = canvas.fig.add_axes([.8, .1, .05, .8])
canvas.ax1.tick_params(axis='both', which='major', labelsize=8)
for pulse in self.DATADICT["PULSES"]:
for istack in self.DATADICT["stacks"]:
for ichan in self.DATADICT[pulse]["chan"]:
# FFT of stack
XA = np.zeros((self.DATADICT["nPulseMoments"] , len(self.DATADICT[pulse][ichan][0][istack])/2+1))
nu = np.fft.fftfreq(self.DATADICT[pulse][ichan][0][istack].size, d=self.dt)
nu[-1] *= -1
df = nu[1]
of = 0
if istart:
of = nu[istart]
def freqlabel(x, pos):
return '%1.0f' %(of + x*df)
formatter = FuncFormatter(freqlabel)
canvas.ax1.clear()
for ipm in range(self.DATADICT["nPulseMoments"]):
X = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
XA[ipm,:] = np.abs(X)
if istart:
mn = canvas.ax1.matshow(20.*np.log10(XA[:,istart:iend+1]), aspect='auto', vmax=-40, vmin=-120) #, norm=LogNorm())
else:
mn = canvas.ax1.matshow(20.*np.log10(XA), aspect='auto', vmax=-40, vmin=-120) #, norm=LogNorm())
smin = np.min(20.*np.log10(XA))
smax = np.max(20.*np.log10(XA))
canvas.ax1.xaxis.set_major_formatter(formatter)
cb1 = mpl.colorbar.Colorbar(canvas.ax1c, mn)
cb1.ax.tick_params(labelsize=8)
cb1.set_label("signal [dB]", fontsize=8)
canvas.ax1.set_xlabel(r"$\nu$ [Hz]", fontsize=10)
canvas.ax1.set_ylabel(r"$q_{index}$", fontsize=10)
canvas.draw()
def plotFT(self, canvas, istart=0, iend=0):
try:
canvas.fig.clear()
except:
pass
canvas.ax1 = canvas.fig.add_axes([.1, .1, .65, .8])
canvas.ax1c = canvas.fig.add_axes([.8, .1, .05, .8])
canvas.ax1.tick_params(axis='both', which='major', labelsize=8)
for pulse in self.DATADICT["PULSES"]:
for istack in self.DATADICT["stacks"]:
for ichan in self.DATADICT[pulse]["chan"]:
# FFT of stack
XA = np.zeros((self.DATADICT["nPulseMoments"] , len(self.DATADICT[pulse][ichan][0][istack])//2+1))
nu = np.fft.fftfreq(self.DATADICT[pulse][ichan][0][istack].size, d=self.dt)
nu[-1] *= -1
df = nu[1]
of = 0
if istart:
of = nu[istart]
def freqlabel(x, pos):
return '%1.0f' %(of + x*df)
formatter = FuncFormatter(freqlabel)
canvas.ax1.clear()
for ipm in range(self.DATADICT["nPulseMoments"]):
X = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
XA[ipm,:] = np.abs(X)
if istart:
mn = canvas.ax1.matshow(20.*np.log10(XA[:,istart:iend+1]), aspect='auto', vmax=-40, vmin=-120, cmap='viridis') #, norm=LogNorm())
else:
mn = canvas.ax1.matshow(20.*np.log10(XA), aspect='auto', vmax=-40, vmin=-120, cmap='viridis') #, norm=LogNorm())
canvas.ax1.xaxis.set_major_formatter(formatter)
cb1 = mpl.colorbar.Colorbar(canvas.ax1c, mn)
cb1.ax.tick_params(labelsize=8)
cb1.set_label("signal [dB]", fontsize=8)
canvas.ax1.set_xlabel(r"$\nu$ [Hz]", fontsize=10)
canvas.ax1.set_ylabel(r"$q_{index}$", fontsize=10)
canvas.draw()
def dataCubeFFT(self):
"""
Performs FFT on entire cube of DATA, and REFERENCE channels, but not pulse currents,
Results are saved to a new field in the data structure
The GMR varies phase as a function of pulse moment index, so that the first pusle moment is zero phase,
the second is pi/2 the third is zero. This method corrects for this, so that all pulse moments are in phase.
Technically we may not want to do this, if there is some system response that this cycles away, and we lose track of
how many of each cycle we have, could this be problomatic? I think it will come out in the wash as we keep track of the
rest of the phase cycles. Holy phase cycling batman.
"""
for pulse in self.DATADICT["PULSES"]:
for ichan in np.append(self.DATADICT[pulse]["chan"], self.DATADICT[pulse]["rchan"]):
# FFT of stack
self.DATADICT[pulse][ichan]["FFT"] = {}
self.DATADICT[pulse][ichan]["FFT"]["nu"] = np.fft.fftfreq(self.DATADICT[pulse][ichan][0][self.DATADICT["stacks"][0]].size, d=self.dt)
self.DATADICT[pulse][ichan]["FFT"]["nu"][-1] *= -1
for istack in self.DATADICT["stacks"]:
self.DATADICT[pulse][ichan]["FFT"][istack] = np.zeros((self.DATADICT["nPulseMoments"] , len(self.DATADICT[pulse][ichan][0][istack])//2+1), dtype=complex)
for ipm in range(self.DATADICT["nPulseMoments"]):
# Mod works for FID pulse sequences, TODO generalize this for 4 phase T1, etc..
mod = (-1)**(ipm%2) * (-1)**(istack%2)
self.DATADICT[pulse][ichan]["FFT"][istack][ipm,:] = np.fft.rfft( self.DATADICT[pulse][ichan][ipm][istack] )
#if ipm%2:
# odd, phase cycled from previous
# self.DATADICT[pulse][ichan]["FFT"][istack][ipm,:] = np.fft.rfft(-self.DATADICT[pulse][ichan][ipm][istack])
#else:
# even, we define as zero phase, first pulse moment has this
# self.DATADICT[pulse][ichan]["FFT"][istack][ipm,:] = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
def adaptiveFilterFD(self, ftype, band, centre, canvas):
try:
canvas.fig.clear()
except:
pass
canvas.ax1 = canvas.fig.add_axes([.1, .5, .7, .4])
canvas.ax1c = canvas.fig.add_axes([.85, .5, .05, .4])
canvas.ax1.tick_params(axis='both', which='major', labelsize=8)
#canvas.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
canvas.ax2 = canvas.fig.add_axes([.1, .05, .7, .4])
canvas.ax2c = canvas.fig.add_axes([.85, .05, .05, .4])
canvas.ax2.tick_params(axis='both', which='major', labelsize=8)
#canvas.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.dataCubeFFT()
Filt = adapt.AdaptiveFilter(0.)
for pulse in self.DATADICT["PULSES"]:
# Compute window function and dimensions
[WINDOW, nd, wstart, wend, dead, idead] = self.computeWindow(pulse, band, centre, ftype)
for istack in self.DATADICT["stacks"]:
for ichan in self.DATADICT[pulse]["chan"]:
# FFT of stack
nd = len(self.DATADICT[pulse][ichan][0][istack])
XX = np.zeros((self.DATADICT["nPulseMoments"] , len(self.DATADICT[pulse][ichan][0][istack])//2+1), dtype=complex)
nu = np.fft.fftfreq(self.DATADICT[pulse][ichan][0][istack].size, d=self.dt)
nu[-1] *= -1
#nu = self.DATADICT[pulse][ichan]["FFT"]["nu"]
def freqlabel(x, pos):
return '%1.0f' %((wstart)*nu[1] + x*nu[1])
formatter = FuncFormatter(freqlabel)
canvas.ax1.clear()
for ipm in range(self.DATADICT["nPulseMoments"]):
X = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
XX[ipm,:] = X
XX = XX*WINDOW
XX = XX[:,wstart:wend]
smin = np.min(20.*np.log10(np.abs(XX)))
smax = np.max(20.*np.log10(np.abs(XX)))
#if smin != smin:
smax = -40
smin = -120
mn = canvas.ax1.matshow(20.*np.log10(np.abs(XX)), aspect='auto', vmin=smin, vmax=smax) #, norm=LogNorm())
canvas.ax1.xaxis.set_major_formatter(formatter)
cb1 = mpl.colorbar.Colorbar(canvas.ax1c, mn)
RX = []
for ichan in self.DATADICT[pulse]["rchan"]:
R = np.zeros((self.DATADICT["nPulseMoments"] , len(self.DATADICT[pulse][ichan][0][istack])//2+1), dtype=complex)
for ipm in range(self.DATADICT["nPulseMoments"]):
R[ipm,:] = np.fft.rfft(self.DATADICT[pulse][ichan][ipm][istack])
RX.append(R[:,wstart:wend])
XC = Filt.transferFunctionFFT(XX, RX)
# TODO inverse FFT, but we need to map back to origional matrix size
#for ichan in self.DATADICT[pulse]["chan"]:
# for ipm in range(self.DATADICT["nPulseMoments"]):
# self.DATADICT[pulse][ichan][ipm][istack] = np.fft.irfft(XC[] , nd)
mc = canvas.ax2.matshow(20.*np.log10(np.abs(XC)), aspect='auto', vmin=smin, vmax=smax) #, norm=LogNorm())
cb2 = mpl.colorbar.Colorbar(canvas.ax2c, mc)
cmin = np.min(20.*np.log10(np.abs(XC)))
cmax = np.max(20.*np.log10(np.abs(XC)))
canvas.ax2.xaxis.set_major_formatter(formatter)
#canvas.ax2.colorbar(mn)
canvas.draw()
##############################3
# TODO inverse FFT to get the damn data back!!!
# self.progressTrigger.emit(percent)
# #label = "iFID="+str(iFID) + " ipm=" + str(ipm) + " istack=" + str(istack) + " ichan=" + str(ichan))
self.doneTrigger.emit()
def findSpikes(self, x, width, threshold, rollOn):
import scipy.ndimage as im
spikes = np.zeros( len(x) )
med = im.median_filter(x, width,mode='nearest')
std = np.std(x)
spikes = (np.abs(x-med) > threshold * std)
return np.array(np.where(spikes[rollOn::])) + rollOn
# def despike(self, width, threshold, itype, rollOn, win, canvas):
# from scipy import interpolate
# """ This was a stab at a despike filter. Better results were achieved using the SmartStack approach
# """
# try:
# canvas.fig.clear()
# except:
# pass
#
# canvas.ax1 = canvas.fig.add_axes([.125,.1,.725,.8])
# canvas.ax1.tick_params(axis='both', which='major', labelsize=8)
# canvas.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
# iFID = 0
# for pulse in self.DATADICT["PULSES"]:
# for ipm in range(self.DATADICT["nPulseMoments"]):
# for istack in self.DATADICT["stacks"]:
# canvas.ax1.clear()
# for ichan in np.append(self.DATADICT[pulse]["chan"], self.DATADICT[pulse]["rchan"]):
# x = self.findSpikes(self.DATADICT[pulse][ichan][ipm][istack], width, threshold, rollOn)
# canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], self.DATADICT[pulse][ichan][ipm][istack],
# label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " ichan=" + str(ichan))
# canvas.ax1.plot( self.DATADICT[pulse]["TIMES"][x], self.DATADICT[pulse][ichan][ipm][istack][x], '.', color='red' , markersize=6 )
#
# FIXED = np.zeros(len(x[0]))
# ii = 0
# for spike in np.array(x[0]).tolist():
# f = interpolate.interp1d(np.delete(self.DATADICT[pulse]["TIMES"][spike-win/2:spike+win/2], x[0]-(spike-win/2)), \
# np.delete(self.DATADICT[pulse][ichan][ipm][istack][spike-win/2:spike+win/2], x[0]-(spike-win/2)), itype)
# FIXED[ii] = f(self.DATADICT[pulse]["TIMES"][spike])
# ii += 1
# canvas.ax1.plot( self.DATADICT[pulse]["TIMES"][x[0]] , FIXED, '.', color='black' , markersize=4 )
# self.DATADICT[pulse][ichan][ipm][istack][x[0]] = FIXED
#
# canvas.ax1.legend(prop={'size':6})
# canvas.draw()
# percent = (int)(1e2*((float)(iFID*self.DATADICT["nPulseMoments"]+(ipm))/( len(self.DATADICT["PULSES"])*self.nPulseMoments)))
# self.progressTrigger.emit(percent)
# iFID += 1
# self.doneTrigger.emit()
def designFilter(self, cf, PB, SB, gpass, gstop, ftype, canvas):
''' cf is central frequency
pb is pass band
sb is stop band
'''
TS = (cf) / (.5/self.dt)
PB = PB / (.5/self.dt) # 1/2 width pass band Muddy Creek
SB = SB / (.5/self.dt) # 1/2 width stop band Muddy Creek
# if butterworth
#[bord, wn] = signal.buttord([TS-PB,TS+PB], [TS-SB,TS+SB], 1e-1, 5.)
if ftype=="Butterworth":
[bord, wn] = signal.buttord([TS-PB,TS+PB], [TS-SB,TS+SB], gpass, gstop)
[self.filt_b, self.filt_a] = signal.butter(bord, wn, btype='bandpass', output='ba')
[self.filt_z, self.filt_p, self.filt_k] = signal.butter(bord, wn, btype='band', output='zpk')
elif ftype == "Chebychev Type II":
[bord, wn] = signal.cheb2ord([TS-PB,TS+PB], [TS-SB,TS+SB], gpass, gstop)
[self.filt_b, self.filt_a] = signal.cheby2(bord, gstop, wn, btype='bandpass', output='ba')
[self.filt_z, self.filt_p, self.filt_k] = signal.cheby2(bord, gstop, wn, btype='band', output='zpk')
elif ftype == "Elliptic":
[bord, wn] = signal.ellipord([TS-PB,TS+PB], [TS-SB,TS+SB], gpass, gstop)
[self.filt_b, self.filt_a] = signal.ellip(bord, gpass, gstop, wn, btype='bandpass', output='ba')
[self.filt_z, self.filt_p, self.filt_k] = signal.ellip(bord, gpass, gstop, wn, btype='band', output='zpk')
# if cheby2
impulse = self.mfreqz2(self.filt_b, self.filt_a, canvas)
self.fe = -5
for it in range(len(impulse[0])):
if abs(impulse[1][0][it][0]) >= .1 * gpass:# gpass:
self.fe = impulse[0][it]
canvas.draw()
return [bord, self.fe]
def downsample(self, truncate, dec, plot=False, canvas=None):
""" Downsamples and truncates the raw signal.
Args
truncate (float) : the length of the signal to truncate to
dec (int) : the decimation factor, 1 results in no downsampling
plot (bool) : perform plots
canvas : MPL axis for plotting
"""
if plot:
fs = 10
canvas.reAx2()
canvas.ax1.set_ylabel(r"signal (nV)", fontsize=fs)
canvas.ax2.set_xlabel(r"time (s)", fontsize=fs)
canvas.ax2.set_ylabel(r"signal (nV)", fontsize=fs)
self.samp /= dec
self.dt = 1./self.samp
iFID = 0
for pulse in self.DATADICT["PULSES"]:
RSTIMES = self.DATADICT[pulse]["TIMES"][::dec]
if truncate:
itrunc = (int)( 1e-3*truncate*self.samp )
RSTIMES = RSTIMES[0:itrunc]
for ipm in range(self.DATADICT["nPulseMoments"]):
for istack in self.DATADICT["stacks"]:
if plot:
canvas.softClear()
for ichan in np.append(self.DATADICT[pulse]["chan"], self.DATADICT[pulse]["rchan"]):
# trim off indices that don't divide evenly
ndi = np.shape(self.DATADICT[pulse][ichan][ipm][istack])[0]%dec
if ndi:
#[self.DATADICT[pulse][ichan][ipm][istack], RSTIMES] = signal.resample(self.DATADICT[pulse][ichan][ipm][istack][0:-ndi],\
# len(self.DATADICT[pulse][ichan][ipm][istack][0:-ndi])//dec,\
# self.DATADICT[pulse]["TIMES"][0:-ndi], window='hamm')
self.DATADICT[pulse][ichan][ipm][istack] = signal.decimate(self.DATADICT[pulse][ichan][ipm][istack], dec, n=None, ftype='iir', zero_phase=True)
else:
#[self.DATADICT[pulse][ichan][ipm][istack], RSTIMES] = signal.resample(self.DATADICT[pulse][ichan][ipm][istack],\
# len(self.DATADICT[pulse][ichan][ipm][istack])//dec,\
# self.DATADICT[pulse]["TIMES"], window='hamm')
self.DATADICT[pulse][ichan][ipm][istack] = signal.decimate(self.DATADICT[pulse][ichan][ipm][istack], dec, n=None, ftype='iir', zero_phase=True)
if truncate:
self.DATADICT[pulse][ichan][ipm][istack] = self.DATADICT[pulse][ichan][ipm][istack][0:itrunc]
if plot:
for ichan in self.DATADICT[pulse]["chan"]:
canvas.ax2.plot( RSTIMES, 1e9*self.DATADICT[pulse][ichan][ipm][istack], \
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " ichan=" + str(ichan))
for ichan in self.DATADICT[pulse]["rchan"]:
canvas.ax1.plot( RSTIMES, 1e9*self.DATADICT[pulse][ichan][ipm][istack], \
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " ichan=" + str(ichan))
canvas.ax1.legend(prop={'size':fs}, loc='upper right')
canvas.ax2.legend(prop={'size':fs}, loc='upper right')
deSpine( canvas.ax1 )
deSpine( canvas.ax2 )
plt.setp(canvas.ax1.get_xticklabels(), visible=False)
canvas.fig.tight_layout()
canvas.draw()
percent = (int)(1e2*((float)(iFID*self.DATADICT["nPulseMoments"]+(ipm))/( len(self.DATADICT["PULSES"])*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iFID += 1
self.DATADICT[pulse]["TIMES"] = RSTIMES
#####################################
# resample pulse data
for pulse in self.DATADICT["PULSES"]:
for ipm in range(self.DATADICT["nPulseMoments"]):
for istack in self.DATADICT["stacks"]:
ndi = np.shape(self.DATADICT[pulse]["CURRENT"][ipm][istack])[0]%dec
if ndi:
[self.DATADICT[pulse]["CURRENT"][ipm][istack], RSPTIMES] = signal.resample(self.DATADICT[pulse]["CURRENT"][ipm][istack][0:-ndi],\
len(self.DATADICT[pulse]["CURRENT"][ipm][istack][0:-ndi])//dec,\
self.DATADICT[pulse]["PULSE_TIMES"][0:-ndi], window='hamm')
else:
[self.DATADICT[pulse]["CURRENT"][ipm][istack], RSPTIMES] = signal.resample(self.DATADICT[pulse]["CURRENT"][ipm][istack],\
len(self.DATADICT[pulse]["CURRENT"][ipm][istack])//dec,\
self.DATADICT[pulse]["PULSE_TIMES"], window='hamm')
self.DATADICT[pulse]["PULSE_TIMES"] = RSPTIMES
self.doneTrigger.emit()
self.updateProcTrigger.emit()
def computeWindow(self, pulse, band, centre, ftype, canvas=None):
# Compute window
nd = len(self.DATADICT[pulse][self.DATADICT[pulse]["chan"][0]][0][self.DATADICT["stacks"][0]]) # num. data
fft1 = np.fft.rfft(self.DATADICT[pulse][self.DATADICT[pulse]["chan"][0]][0][self.DATADICT["stacks"][0]])
freqs = np.fft.fftfreq(nd, self.dt)
df = freqs[1] - freqs[0]
N = int((round)(band/df))
if ftype == "Hamming":
window = np.hamming(N)
elif ftype == "Hanning":
window = np.hanning(N)
elif ftype == "Rectangular":
window = np.ones(N)
elif ftype == "Flat top":
window = signal.flattop(N)
else:
print ("in windowFilter, window type undefined")
WINDOW = np.zeros(len(fft1))
ifreq = int(round(centre/df))
istart = ifreq-len(window)//2
iend = 0
if N%2:
WINDOW[ifreq-N//2:ifreq+N//2+1] = window
iend = ifreq+N//2+1
else:
WINDOW[ifreq-N//2:ifreq+N//2] = window
iend = ifreq+N//2
self.WINDOW = WINDOW
self.iWindowStart = istart
self.iWindowEnd = iend
self.FFTtimes = nd
fft1 = np.fft.irfft(WINDOW)
# calculate dead time
self.windead = 0.
for ift in np.arange(100,0,-1):
#print( ift, fft1[ift] )
if (abs(fft1[ift])/abs(fft1[0])) > 1e-2:
#print ("DEAD TIME", 1e3*self.DATADICT[pulse]["TIMES"][ift] - 1e3*self.DATADICT[pulse]["TIMES"][0] )
dead = 1e3*self.DATADICT[pulse]["TIMES"][ift] - 1e3*self.DATADICT[pulse]["TIMES"][0]
self.windead = self.DATADICT[pulse]["TIMES"][ift] - self.DATADICT[pulse]["TIMES"][0]
break
if canvas != None:
canvas.fig.clear()
canvas.ax1 = canvas.fig.add_axes([.1, .6, .75, .35])
canvas.ax2 = canvas.fig.add_axes([.1, .1, .75, .35])
canvas.ax1.plot(WINDOW)
canvas.ax2.plot( 1e3* self.DATADICT[pulse]["TIMES"][0:100] - 1e3*self.DATADICT[pulse]["TIMES"][0], fft1[0:100] )
canvas.ax2.set_xlabel("time (ms)")
canvas.ax2.set_title("IFFT")
canvas.draw()
return [WINDOW, nd, istart, iend, dead, ift]
def windowFilter(self, ftype, band, centre, trunc, canvas):
###############################
# Window Filter (Ormsby filter http://www.xsgeo.com/course/filt.htm)
# apply window
iFID = 0
for pulse in self.DATADICT["PULSES"]:
[WINDOW, nd, istart, iend, dead, idead] = self.computeWindow(pulse, band, centre, ftype)
for istack in self.DATADICT["stacks"]:
for ipm in range(self.DATADICT["nPulseMoments"]):
for ichan in np.append(self.DATADICT[pulse]["chan"], self.DATADICT[pulse]["rchan"]):
fft = np.fft.rfft( self.DATADICT[pulse][ichan][ipm][istack] )
fft *= WINDOW
if trunc:
self.DATADICT[pulse][ichan][ipm][istack] = np.fft.irfft(fft, nd)[idead:-idead]
else:
self.DATADICT[pulse][ichan][ipm][istack] = np.fft.irfft(fft, nd)
percent = (int)(1e2*((float)(iFID*self.DATADICT["nPulseMoments"]+(ipm))/(len(self.DATADICT["PULSES"])*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iFID += 1
if trunc:
self.DATADICT[pulse]["TIMES"] = self.DATADICT[pulse]["TIMES"][idead:-idead]
[WINDOWxx, ndxx, istart, iend, deadxx, ideadxx] = self.computeWindow(pulse, band, centre, ftype)
self.plotFT(canvas, istart, iend)
self.doneTrigger.emit()
def bandpassFilter(self, canvas, blank, plot=True):
if plot:
canvas.reAx2()
canvas.ax1.set_ylabel(r"signal [nV]", fontsize=8)
canvas.ax2.set_xlabel(r"time [s]", fontsize=8)
canvas.ax2.set_ylabel(r"signal [nV]", fontsize=8)
ife = (int)( max(self.fe, self.windead) * self.samp )
# Data
iFID = 0
for pulse in self.DATADICT["PULSES"]:
self.DATADICT[pulse]["TIMES"] = self.DATADICT[pulse]["TIMES"][ife:-ife]
for ipm in range(self.DATADICT["nPulseMoments"]):
for istack in self.DATADICT["stacks"]:
if plot:
canvas.softClear()
mmax = 0
for ichan in self.DATADICT[pulse]["rchan"]:
canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack][ife:-ife], alpha=.5)
mmax = max( mmax, np.max(1e9*self.DATADICT[pulse][ichan][ipm][istack][ife:-ife]))
for ichan in self.DATADICT[pulse]["chan"]:
canvas.ax2.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack][ife:-ife], alpha=.5)
mmax = max( mmax, np.max(1e9*self.DATADICT[pulse][ichan][ipm][istack][ife:-ife]))
canvas.ax2.set_prop_cycle(None)
canvas.ax1.set_prop_cycle(None)
canvas.ax1.set_ylim(-mmax, mmax)
for ichan in self.DATADICT[pulse]["rchan"]:
# reflect signal back on itself to reduce gibbs effects on early times
#nr = len( self.DATADICT[pulse][ichan][ipm][istack] ) - 1 + ife
#refl = np.append( -1*self.DATADICT[pulse][ichan][ipm][istack][::-1][0:-1], self.DATADICT[pulse][ichan][ipm][istack] )
#reflfilt = signal.filtfilt( self.filt_b, self.filt_a, refl )
#self.DATADICT[pulse][ichan][ipm][istack] = reflfilt[nr:-ife]
# don't reflect
self.DATADICT[pulse][ichan][ipm][istack] = \
signal.filtfilt(self.filt_b, self.filt_a, self.DATADICT[pulse][ichan][ipm][istack])[ife:-ife]
# plot
if plot:
canvas.ax1.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack], \
label = pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " rchan=" + str(ichan))
for ichan in self.DATADICT[pulse]["chan"]:
# reflect signal back on itself to reduce gibbs effects on early times
#nr = len( self.DATADICT[pulse][ichan][ipm][istack] ) - 1 + ife
#refl = np.append( -1*self.DATADICT[pulse][ichan][ipm][istack][::-1][0:-1], self.DATADICT[pulse][ichan][ipm][istack] )
#reflfilt = signal.filtfilt( self.filt_b, self.filt_a, refl )
#self.DATADICT[pulse][ichan][ipm][istack] = reflfilt[nr:-ife]
# don't reflect
self.DATADICT[pulse][ichan][ipm][istack] = \
scipy.signal.filtfilt(self.filt_b, self.filt_a, self.DATADICT[pulse][ichan][ipm][istack])[ife:-ife]
# plot
if plot:
canvas.ax2.plot( self.DATADICT[pulse]["TIMES"], 1e9*self.DATADICT[pulse][ichan][ipm][istack], \
label = "data " + pulse + " ipm=" + str(ipm) + " istack=" + str(istack) + " chan=" + str(ichan))
if plot:
canvas.ax1.legend(prop={'size':6}, loc='upper right')
canvas.ax2.legend(prop={'size':6}, loc='upper right')
canvas.draw()
percent = (int)(1e2*((float)(iFID*self.DATADICT["nPulseMoments"]+(ipm))/(len(self.DATADICT["PULSES"])*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iFID += 1
self.doneTrigger.emit()
self.updateProcTrigger.emit()
def loadGMRBinaryFID( self, rawfname, istack ):
""" Reads a single binary GMR file and fills into DATADICT
"""
#################################################################################
# figure out key data indices
# Pulse
nps = (int)((self.prePulseDelay)*self.samp)
npul = (int)(self.pulseLength[0]*self.samp) #+ 100
# Data
nds = nps+npul+(int)((self.deadTime)*self.samp); # indice pulse 1 data starts
nd1 = (int)(1.*self.samp) # samples in first pulse
invGain = 1./self.RxGain
invCGain = self.CurrentGain
pulse = "Pulse 1"
chan = self.DATADICT[pulse]["chan"]
rchan = self.DATADICT[pulse]["rchan"]
rawFile = open( rawfname, 'rb')
for ipm in range(self.nPulseMoments):
buf1 = rawFile.read(4)
buf2 = rawFile.read(4)
N_chan = struct.unpack('>i', buf1 )[0]
N_samp = struct.unpack('>i', buf2 )[0]
T = N_samp * self.dt
TIMES = np.arange(0, T, self.dt) - .0002 # small offset in GMR DAQ?
DATA = np.zeros([N_samp, N_chan+1])
for ichan in range(N_chan):
DATADUMP = rawFile.read(4*N_samp)
for irec in range(N_samp):
DATA[irec,ichan] = struct.unpack('>f', DATADUMP[irec*4:irec*4+4])[0]
# Save into Data Cube
for ichan in chan:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:,eval(ichan)+3][nds:nds+nd1] * invGain
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] = DATA[:,1][nps:nps+npul] * invCGain
self.DATADICT["Pulse 1"]["PULSE_TIMES"] = TIMES[nps:nps+npul]
# reference channels?
for ichan in rchan:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:,eval(ichan)+3][nds:nds+nd1] * invGain
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
def loadGMRASCIIFID( self, rawfname, istack ):
"""Based on the geoMRI instrument manufactured by VistaClara. Imports
a suite of raw .lvm files with the following format (on one line)
time(s) DC_Bus/100(V) Current+/75(A) Curr-/75(A) Voltage+/200(V) \
Ch1(V) Ch2(V) Ch3(V) Ch4(V)
Sampling rate is assumed at 50 kHz
"""
import pandas as pd
#################################################################################
# figure out key data indices
# Pulse
nps = (int)((self.prePulseDelay)*self.samp)
npul = (int)(self.pulseLength[0]*self.samp) #+ 100
# Data
nds = nps+npul+(int)((self.deadTime)*self.samp); # indice pulse 1 data starts
nd1 = (int)(1.*self.samp) - nds # samples in first pulse
ndr = (int)(1.*self.samp) # samples in record
invGain = 1./self.RxGain
invCGain = self.CurrentGain
pulse = "Pulse 1"
chan = self.DATADICT[pulse]["chan"]
rchan = self.DATADICT[pulse]["rchan"]
T = 1.5 #N_samp * self.dt
TIMES = np.arange(0, T, self.dt) - .0002 # small offset in GMR DAQ?
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
self.DATADICT["Pulse 1"]["PULSE_TIMES"] = TIMES[nps:nps+npul]
# pandas is much faster than numpy for io
#DATA = np.loadtxt(rawfname)
DATA = pd.read_csv(rawfname, header=None, sep="\t").values
for ipm in range(self.nPulseMoments):
for ichan in np.append(chan,rchan):
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:, eval(ichan)+4][nds:(nds+nd1)] * invGain
self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] = DATA[:,2][nps:nps+npul] * invCGain
nds += ndr
nps += ndr
def readMIDI2Header(self, directory):
""" Reads the header of the FID_Q1_D0_R1.dat which should be in very MIDI directory
"""
print("Searching ", directory, "for Q files")
self.QFiles = []
for file in glob.glob(directory+'/FID_Q*R1.dat'):
self.QFiles.append(file)
fidname = self.QFiles[0] # "/FID_Q1_D0_R1.dat"
HEADER = {}
with open(fidname, 'rb') as FID:
#print(FID.name)
headerLine = 0
for i in range(21):
tags = FID.readline().split(b']')
tags[0] = tags[0].strip(b'[')
tags[1] = tags[1].decode().strip( )
HEADER[ ''.join(map(chr, tags[0])) ] = tags[1]
#print(HEADER)
pulseTypeDict = {
0 : lambda: "FID",
2 : lambda: "T1",
3 : lambda: "SPINECHO",
4 : lambda: "4PhaseT1"
}
pulseLengthDict = {
1 : lambda x: np.ones(1) * x,
2 : lambda x: np.ones(2) * x,
3 : lambda x: np.array([x, 2.*x]),
4 : lambda x: np.ones(2) * x
}
if HEADER["P2"] == "0":
self.pulseType = "FID"
else:
self.pulseType = "T1"
self.transFreq = float(HEADER[ 'Excitation frequency /Hz' ])
self.maxBusV = '24 V'
self.pulseLength = [ float(HEADER['P1'])/self.transFreq, float(HEADER['P2'])/self.transFreq]
self.interpulseDelay = 1e-3*float(HEADER["Delay /ms"]) # for T2, Spin Echo
self.repetitionDelay = float(HEADER['Pause /s']) # delay between q pulses
self.nPulseMoments = len(self.QFiles) # Number of pulse moments per stack
self.TuneCapacitance = 0 # tuning capacitance in uF
self.nTransVersion = "MIDI 2" # Transmitter version
self.nDAQVersion = HEADER["Software Revision"] # DAQ software version
self.nInterleaves = 0 # num interleaves
self.Instrument = "MIDI 2"
self.datadir = directory
self.MIDIGain = HEADER["Gains"].split()
# default
self.samp = float(HEADER["Data Rate /Hz"]) # sampling frequency
self.dt = 1./self.samp # sampling rate
def loadMIDI2(self, directory, procStacks, chanin, rchanin, FIDProc, canvas, deadTime, pulseMoments, plot):
"""Reads a MRS MIDI2 experiment.
"""
print("Loading MIDI2 data")
canvas.reAx3(True,False)
chan = []
for ch in chanin:
chan.append(str(ch))
rchan = []
for ch in rchanin:
rchan.append(str(ch))
#print(self.QFiles)
# Set up the same structure as GMR
PULSES = [FIDProc]
PULSES = ["Pulse 1"]
if pulseMoments[0] == -1:
print("using default pulse moments, all of them")
self.pulseMoments = range(0, self.nPulseMoments) # use all of them, assume starting at 0
else:
self.pulseMoments = pulseMoments
self.nPulseMoments = len(self.pulseMoments)
print("nPulseMoments", self.nPulseMoments, self.pulseMoments)
self.DATADICT = {}
self.DATADICT["nPulseMoments"] = self.nPulseMoments
self.DATADICT["stacks"] = procStacks
self.DATADICT["PULSES"] = PULSES
for pulse in PULSES:
self.DATADICT[pulse] = {}
self.DATADICT[pulse]["chan"] = chan # TODO these should not be a subet of pulse! for GMR all
self.DATADICT[pulse]["rchan"] = rchan # data are consistent
self.DATADICT[pulse]["CURRENT"] = {}
for ichan in np.append(chan,rchan):
self.DATADICT[pulse][ichan] = {}
for ipm in range(self.nPulseMoments):
self.DATADICT[pulse][ichan][ipm] = {}
self.DATADICT[pulse]["CURRENT"][ipm] = {}
for istack in procStacks:
pass
#print("pulse", pulse, "ichan",type(ichan), ichan, "ipm", type(ipm), ipm, "istack",type(istack), istack)
#
#self.DATADICT[pulse][ichan][ipm][istack] = np.zeros(3)
#self.DATADICT[pulse]["CURRENT"][ipm][istack] = np.zeros(3)
iistack = 0
idead = int( (self.pulseLength[0]+deadTime) / self.dt)
for ipm, iq in enumerate(self.pulseMoments):
fidbase = self.datadir #+ self.QFiles[iq][0:-5]
for istack in procStacks:
#fidname = fidbase + str(iq).zfill(2) + ".dat"
fidname = fidbase + "/FID_Q" + str(iq) + "_D0_R" + str(istack) + ".dat"
with open(fidname, 'rb') as FID:
#print(FID.name)
headerLine = 0
for i in range(100):
line = FID.readline().strip()
headerLine += 1
if line == b'[Begin Data]':
break
# use numpy for now, consider pandas for faster read?
print("Loading", FID)
DATA = np.genfromtxt(FID, skip_header=0, skip_footer=1 )
#DATA = pd.read_csv(fidname, skiprows=headerLine, skipfooter=1, sep='\t', encoding='ascii')
for ichan in np.append(chan,rchan):
if int(ichan) <= 3:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[idead:,int(ichan)] / float(self.MIDIGain[int(ichan)-1])
elif int(ichan) > 3:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[idead:,int(ichan)+1] / float(self.MIDIGain[int(ichan)-1])
# truncate after dead time
self.DATADICT["Pulse 1"]["TIMES"] = DATA[idead:,0]
# truncate until dead time
ipulse = int(self.pulseLength[0] / self.dt)
self.DATADICT["Pulse 1"]["PULSE_TIMES"] = DATA[0:ipulse,0]
self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] = DATA[0:ipulse,4]
if plot:
canvas.softClear()
for ichan in chan:
canvas.ax1.plot(self.DATADICT["Pulse 1"]["PULSE_TIMES"], self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] , color='black')
canvas.ax3.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID data ch. "+str(ichan)) #, color='blue')
for ichan in rchan:
canvas.ax2.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID ref ch. "+str(ichan)) #, color='blue')
# reference axis
if len(rchan) > 0:
canvas.ax2.legend(prop={'size':10}, loc='upper right')
canvas.ax2.tick_params(axis='both', which='major', labelsize=10)
canvas.ax2.tick_params(axis='both', which='minor', labelsize=10)
#canvas.ax2.xaxis.set_ticklabels([])
plt.setp(canvas.ax2.get_xticklabels(), visible=False)
canvas.ax2.set_title("stack "+str(istack)+" pulse index " + str(iq), fontsize=10)
canvas.ax2.set_ylabel("RAW signal [V]", fontsize=10)
canvas.ax1.set_ylabel("Current (A)", fontsize=10)
canvas.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
canvas.ax1.set_xlabel("time (s)", fontsize=10)
canvas.ax3.legend(prop={'size':10}, loc='upper right')
canvas.ax3.set_ylabel("RAW signal [V]", fontsize=10)
canvas.fig.tight_layout()
canvas.draw()
percent = (int) (1e2*((float)(iistack) / (len(procStacks)*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iistack += 1
# percent = (int) (1e2*((float)((iistack*self.nPulseMoments+ipm+1)) / (len(procStacks)*self.nPulseMoments)))
# self.progressTrigger.emit(percent)
# iistack += 1
self.enableDSP()
self.doneTrigger.emit()
def loadGMRASCIIT1( self, rawfname, istack ):
"""Based on the geoMRI instrument manufactured by VistaClara. Imports
a suite of raw .lvm files with the following format (on one line)
time(s) DC_Bus/100(V) Current+/75(A) Curr-/75(A) Voltage+/200(V) \
Ch1(V) Ch2(V) Ch3(V) Ch4(V)
Sampling rate is assumed at 50 kHz
"""
import pandas as pd
#################################################################################
# figure out key data indices
# Pulse
nps = (int)((self.prePulseDelay)*self.samp)
npul = (int)(self.pulseLength[0]*self.samp) #+ 100
# phase cycling
# Older T1 GMR data had a curious phase cycling
npc = 2 #(int)( self.samp / self.transFreq / 6 )
#print("npc", npc)
# Data
nds = nps+npul+(int)((self.deadTime)*self.samp); # indice pulse 1 data starts
nd1 = (int)( (self.interpulseDelay) * self.samp) - nds # samples in first pulse
ndr = (int)( (self.interpulseDelay) * self.samp) # samples in record
invGain = 1./self.RxGain
invCGain = self.CurrentGain
pulse = "Pulse 1"
chan = self.DATADICT[pulse]["chan"]
rchan = self.DATADICT[pulse]["rchan"]
T = 1.5 #N_samp * self.dt
TIMES = np.arange(0, T, self.dt) - .0002 # small offset in GMR DAQ?
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
self.DATADICT["Pulse 1"]["PULSE_TIMES"] = TIMES[nps:nps+npul]
# pandas is much faster than numpy for io
#DATA = np.loadtxt(rawfname)
DATA = pd.read_csv(rawfname, header=None, sep="\t").values
for ipm in range(self.nPulseMoments):
for ichan in np.append(chan,rchan):
if ipm%2:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:, eval(ichan)+4][(nds+npc):(nds+nd1+npc)] * invGain
#self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:, eval(ichan)+4][nds:(nds+nd1)] * invGain
self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] = DATA[:,2][nps+npc:nps+npul+npc] * invCGain
else:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:, eval(ichan)+4][nds:(nds+nd1)] * invGain
self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] = DATA[:,2][nps:nps+npul] * invCGain
nds += ndr
nps += ndr
def loadFIDData(self, base, procStacks, chanin, rchanin, FIDProc, canvas, deadTime, plot):
'''
Loads a GMR FID dataset, reads binary and ASCII format files
'''
canvas.reAx3(True,False)
chan = []
for ch in chanin:
chan.append(str(ch))
rchan = []
for ch in rchanin:
rchan.append(str(ch))
self.deadTime = deadTime # instrument dead time before measurement
self.samp = 50000. # in case this is a reproc, these might have
self.dt = 1./self.samp # changed
#################################################################################
# Data structures
PULSES = [FIDProc]
PULSES = ["Pulse 1"]
self.DATADICT = {}
self.DATADICT["nPulseMoments"] = self.nPulseMoments
self.DATADICT["stacks"] = procStacks
self.DATADICT["PULSES"] = PULSES
for pulse in PULSES:
self.DATADICT[pulse] = {}
self.DATADICT[pulse]["chan"] = chan # TODO these should not be a subet of pulse! for GMR all
self.DATADICT[pulse]["rchan"] = rchan # data are consistent
self.DATADICT[pulse]["CURRENT"] = {}
for ichan in np.append(chan,rchan):
self.DATADICT[pulse][ichan] = {}
for ipm in range(self.nPulseMoments):
self.DATADICT[pulse][ichan][ipm] = {}
self.DATADICT[pulse]["CURRENT"][ipm] = {}
for istack in procStacks:
self.DATADICT[pulse][ichan][ipm][istack] = np.zeros(3)
self.DATADICT[pulse]["CURRENT"][ipm][istack] = np.zeros(3)
##############################################
# Read in binary (.lvm) data
iistack = 0
for istack in procStacks:
if self.nDAQVersion <= 1.0:
try:
self.loadGMRASCIIFID( base + "_" + str(istack), istack )
except:
self.loadGMRASCIIFID( base + "_" + str(istack) + ".lvm", istack )
elif self.nDAQVersion < 2.3:
self.loadGMRASCIIFID( base + "_" + str(istack), istack )
else:
self.loadGMRBinaryFID( base + "_" + str(istack) + ".lvm", istack )
if plot:
for ipm in range(self.nPulseMoments):
canvas.softClear()
for ichan in chan:
canvas.ax1.plot(self.DATADICT["Pulse 1"]["PULSE_TIMES"], self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] , color='black')
canvas.ax3.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID data ch. "+str(ichan)) #, color='blue')
for ichan in rchan:
canvas.ax2.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID ref ch. "+str(ichan)) #, color='blue')
# reference axis
canvas.ax2.tick_params(axis='both', which='major', labelsize=10)
canvas.ax2.tick_params(axis='both', which='minor', labelsize=10)
#canvas.ax2.xaxis.set_ticklabels([])
plt.setp(canvas.ax2.get_xticklabels(), visible=False)
canvas.ax2.legend(prop={'size':10}, loc='upper right')
canvas.ax2.set_title("stack "+str(istack)+" pulse index " + str(ipm), fontsize=10)
canvas.ax2.set_ylabel("RAW signal [V]", fontsize=10)
canvas.ax1.set_ylabel("Current (A)", fontsize=10)
canvas.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
canvas.ax1.set_xlabel("time (s)", fontsize=10)
canvas.ax3.legend(prop={'size':10}, loc='upper right')
canvas.ax3.set_ylabel("RAW signal [V]", fontsize=10)
canvas.fig.tight_layout()
canvas.draw()
percent = (int) (1e2*((float)((iistack*self.nPulseMoments+ipm+1)) / (len(procStacks)*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iistack += 1
# percent = (int) (1e2*((float)((iistack*self.nPulseMoments+ipm+1)) / (len(procStacks)*self.nPulseMoments)))
# self.progressTrigger.emit(percent)
# iistack += 1
self.enableDSP()
self.doneTrigger.emit()
def loadT1Data(self, base, procStacks, chanin, rchanin, FIDProc, canvas, deadTime, plot):
'''
Loads a GMR T1 dataset, reads binary and ASCII format files
'''
canvas.reAx3(True,False)
chan = []
for ch in chanin:
chan.append(str(ch))
rchan = []
for ch in rchanin:
rchan.append(str(ch))
# not in any headers but this has changed, NOT the place to do this. MOVE
#self.prePulseDelay = 0.01 # delay before pulse
self.deadTime = deadTime # instrument dead time before measurement
self.samp = 50000. # in case this is a reproc, these might have
self.dt = 1./self.samp # changed
#################################################################################
# Data structures
PULSES = [FIDProc]
self.DATADICT = {}
self.DATADICT["nPulseMoments"] = self.nPulseMoments
self.DATADICT["stacks"] = procStacks
self.DATADICT["PULSES"] = PULSES
for pulse in PULSES:
self.DATADICT[pulse] = {}
self.DATADICT[pulse]["chan"] = chan # TODO these should not be a subet of pulse! for GMR all
self.DATADICT[pulse]["rchan"] = rchan # data are consistent
self.DATADICT[pulse]["CURRENT"] = {}
for ichan in np.append(chan,rchan):
self.DATADICT[pulse][ichan] = {}
for ipm in range(self.nPulseMoments):
self.DATADICT[pulse][ichan][ipm] = {}
self.DATADICT[pulse]["CURRENT"][ipm] = {}
for istack in procStacks:
self.DATADICT[pulse][ichan][ipm][istack] = np.zeros(3)
self.DATADICT[pulse]["CURRENT"][ipm][istack] = np.zeros(3)
##############################################
# Read in binary (.lvm) data
iistack = 0
fnames = []
for istack in procStacks:
if self.nDAQVersion < 2.3:
#rawfname = base + "_" + str(istack)
#self.loadGMRASCIIFID( base + "_" + str(istack), istack )
self.loadGMRASCIIT1( base + "_" + str(istack), istack )
else:
self.loadGMRBinaryFID( base + "_" + str(istack) + ".lvm", istack )
#fnames.append( base + "_" + str(istack) + ".lvm" )
percent = (int) (1e2*((float)((iistack*self.nPulseMoments+ipm+1)) / (len(procStacks)*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iistack += 1
# multiprocessing load data
#info = {}
#info["prePulseDelay"] = self.prePulseDelay
#info["samp"] = self.samp
#with multiprocessing.Pool() as pool:
# results = pool.starmap( xxloadGMRBinaryFID, ( fnames, zip(itertools.repeat(info)) ) )
# Plotting
if plot:
iistack = 0
for istack in procStacks:
#for ipm in range(0,7,1):
for ipm in range(self.nPulseMoments):
canvas.ax1.clear()
canvas.ax2.clear()
canvas.ax3.clear()
#canvas.fig.patch.set_facecolor('blue')
for ichan in chan:
canvas.ax1.plot(self.DATADICT["Pulse 1"]["PULSE_TIMES"], self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] , color='black')
canvas.ax3.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID data ch. "+str(ichan)) #, color='blue')
for ichan in rchan:
canvas.ax2.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID ref ch. "+str(ichan)) #, color='blue')
canvas.ax3.legend(prop={'size':6}, loc='upper right')
canvas.ax2.legend(prop={'size':6}, loc='upper right')
canvas.ax1.set_title("stack "+str(istack)+" pulse index " + str(ipm), fontsize=8)
canvas.ax1.set_xlabel("time [s]", fontsize=8)
canvas.ax1.set_ylabel("Current [A]", fontsize=8)
canvas.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
canvas.ax2.set_ylabel("RAW signal [V]", fontsize=8)
canvas.ax2.tick_params(axis='both', which='major', labelsize=8)
canvas.ax2.tick_params(axis='both', which='minor', labelsize=6)
canvas.ax2.set_xlabel("time [s]", fontsize=8)
canvas.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
canvas.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
canvas.draw()
#canvas.draw()
percent = (int) (1e2*((float)((iistack*self.nPulseMoments+ipm+1)) / (len(procStacks)*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iistack += 1
self.enableDSP()
self.doneTrigger.emit()
def load4PhaseT1Data(self, base, procStacks, chan, rchan, FIDProc, canvas, deadTime, plot):
"""
Designed to load GMR 4-phase data which use the following convention for phase cycles
P1 P2
Stack 1 -> 0 0 <-- <--
Stack 2 -> 0 pi/2 | <-- <--
Stack 3 -> pi/2 0 <-- | <--
Stack 4 -> pi/2 pi/2 <-- <--
The cycle is determined by stack indice. Walbrecker proposes for pulse2 data (Stack2 - Stack1) / 2
equivalently (Stack 4 - Stack3) will yield the same voltage response wrt. the second pulse.
Alternatively Stack 4 can be converted to be aligned with Stack 1 by negating, and Stack 3 Can be aligned with Stack 2 by negating
Then there are just the two phase cycles that can be stacked like normal.
Unfortunately, we need to stack each cycle first, then perform corrections for phase cycling. The reason for this is that otherwise,
the entire point is lost, as the signal that is desired to be cancelled out may not be balanced evenly across the stacks. That is to say,
if there is an uneven number of a certain phase cycle.
We could, I suppose impose this condition, but I think I would rather not?
+ more samples for std. deviation calculation
+ single spikes will have less residual effect
- can no longer do normality tests etc. and remove data that are suspect.
- requires a dumb stack, and may also require removal of entire stacks of data
Additonally, the GMR varies phase as a function of pulse moment index, so that the first pusle moment is zero phase, the second is pi/2 the third is zero ...
This however, is altered by the above convention. It gets a little complicated...
"""
import struct
canvas.reAx3()
# not in any headers but this has changed, NOT the place to do this. MOVE
self.prePulseDelay = 0.01 # delay before pulse
self.deadTime = deadTime # instrument dead time before measurement
self.samp = 50000. # in case this is a reproc, these might have
self.dt = 1./self.samp # changed
invGain = 1./self.RxGain
invCGain = self.CurrentGain
#################################################################################
# figure out key data indices
# Pulse
nps = (int)((self.prePulseDelay)*self.samp)
nps2 = (int)((self.prePulseDelay+self.interpulseDelay)*self.samp)
npul = (int)(self.pulseLength[0]*self.samp) #+ 100
np2 = (int)(self.pulseLength[1]*self.samp) #+ 100
# Data
nds = nps+npul+(int)((self.deadTime)*self.samp); # indice pulse 1 data starts
nd1 = (int)((self.interpulseDelay)*self.samp) # samples in first pulse
nd2s = nps+npul+nd1+(int)((self.deadTime)*self.samp); # indice pulse 2 data starts
nd2 = (int)((1.)*self.samp) # samples in first pulse
nd1 -= (int)((.028)*self.samp) + nps # some time to get ready for next pulse
#################################################################################
# Data structures
PULSES = [FIDProc]
if FIDProc == "Both":
PULSES = ["Pulse 1","Pulse 2"]
self.DATADICT = {}
self.DATADICT["nPulseMoments"] = self.nPulseMoments
self.DATADICT["stacks"] = procStacks
self.DATADICT["PULSES"] = PULSES
for pulse in PULSES:
self.DATADICT[pulse] = {}
self.DATADICT[pulse]["chan"] = chan
self.DATADICT[pulse]["rchan"] = rchan
self.DATADICT[pulse]["CURRENT"] = {}
for ichan in np.append(chan,rchan):
self.DATADICT[pulse][ichan] = {}
for ipm in range(self.nPulseMoments):
self.DATADICT[pulse][ichan][ipm] = {}
self.DATADICT[pulse]["CURRENT"][ipm] = {}
for istack in procStacks:
self.DATADICT[pulse][ichan][ipm][istack] = np.zeros(3)
self.DATADICT[pulse]["CURRENT"][ipm][istack] = np.zeros(3)
##############################################
# Read in binary data
iistack = 0
for istack in procStacks:
rawFile = open(base + "_" + str(istack) + ".lvm", 'rb')
for ipm in range(self.nPulseMoments):
N_chan = struct.unpack('>i', rawFile.read(4))[0]
N_samp = struct.unpack('>i', rawFile.read(4))[0]
T = N_samp * self.dt
TIMES = np.arange(0, T, self.dt) - .0002 # small offset in GMR DAQ?
DATA = np.zeros([N_samp, N_chan+1])
for ichan in range(N_chan):
DATADUMP = rawFile.read(4*N_samp)
for irec in range(N_samp):
DATA[irec,ichan] = struct.unpack('>f', DATADUMP[irec*4:irec*4+4])[0]
if plot:
#canvas.ax1.clear()
#canvas.ax2.clear()
canvas.softClear()
li = np.shape( DATA[:,4][nd2s:nd2s+nd2] )[0]
######################################
# save into DATA cube
# TODO, changing iFID to 'Pulse 1' or 'Pulse 2'
for ichan in chan:
if FIDProc == "Pulse 1":
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:,ichan+3][nds:nds+nd1] * invGain
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] = DATA[:,1][nps:nps+npul] * invCGain
self.DATADICT["Pulse 1"]["PULSE_TIMES"] = TIMES[nps:nps+npul]
if plot:
canvas.ax3.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID data ch. "+str(ichan)) #, color='blue')
canvas.ax1.plot(self.DATADICT["Pulse 1"]["PULSE_TIMES"], self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] , color='black')
elif FIDProc == "Pulse 2":
print("TODO fix y scale")
self.DATADICT["Pulse 2"][ichan][ipm][istack] = DATA[:,ichan+3][nd2s:nd2s+nd2] *invGain
self.DATADICT["Pulse 2"]["TIMES"] = TIMES[nd2s:nd2s+nd2]
self.DATADICT["Pulse 2"]["CURRENT"][ipm][istack] = DATA[:,1][nps2:nps2+np2] * invCGain
self.DATADICT["Pulse 2"]["PULSE_TIMES"] = TIMES[nps2:nps2+np2]
if plot:
canvas.ax3.plot(self.DATADICT["Pulse 2"]["TIMES"], self.DATADICT["Pulse 2"][ichan][ipm][istack], label="Pulse 2 FID data ch. "+str(ichan)) #, color='blue')
canvas.ax1.plot( self.DATADICT["Pulse 2"]["PULSE_TIMES"], self.DATADICT["Pulse 2"]["CURRENT"][ipm][istack], color='black' )
else:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:,ichan+3][nds:nds+nd1] * invGain
self.DATADICT["Pulse 2"][ichan][ipm][istack] = DATA[:,ichan+3][nd2s:nd2s+nd2] * invGain
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
self.DATADICT["Pulse 2"]["TIMES"] = TIMES[nd2s:nd2s+nd2]
self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] = DATA[:,1][nps:nps+npul] * invCGain
self.DATADICT["Pulse 1"]["PULSE_TIMES"] = TIMES[nps:nps+npul]
self.DATADICT["Pulse 2"]["CURRENT"][ipm][istack] = DATA[:,1][nps2:nps2+np2] * invCGain
self.DATADICT["Pulse 2"]["PULSE_TIMES"] = TIMES[nps2:nps2+np2]
if plot:
canvas.ax3.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID data ch. "+str(ichan)) #, color='blue')
canvas.ax3.plot(self.DATADICT["Pulse 2"]["TIMES"], self.DATADICT["Pulse 2"][ichan][ipm][istack], label="Pulse 2 FID data ch. "+str(ichan)) #, color='blue')
canvas.ax1.plot( self.DATADICT["Pulse 1"]["PULSE_TIMES"], self.DATADICT["Pulse 1"]["CURRENT"][ipm][istack] , color='black' )
canvas.ax1.plot( self.DATADICT["Pulse 2"]["PULSE_TIMES"], self.DATADICT["Pulse 2"]["CURRENT"][ipm][istack] , color='black')
for ichan in rchan:
if FIDProc == "Pulse 1":
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:,ichan+3][nds:nds+nd1] * invGain
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
if plot:
canvas.ax2.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID ref ch. "+str(ichan)) #, color='blue')
elif FIDProc == "Pulse 2":
self.DATADICT["Pulse 2"][ichan][ipm][istack] = DATA[:,ichan+3][nd2s:nd2s+nd2] * invGain
self.DATADICT["Pulse 2"]["TIMES"] = TIMES[nd2s:nd2s+nd2]
if plot:
canvas.ax2.plot(self.DATADICT["Pulse 2"]["TIMES"], self.DATADICT["Pulse 2"][ichan][ipm][istack], label="Pulse 2 FID ref ch. "+str(ichan)) #, color='blue')
else:
self.DATADICT["Pulse 1"][ichan][ipm][istack] = DATA[:,ichan+3][nds:nds+nd1] * invGain
self.DATADICT["Pulse 2"][ichan][ipm][istack] = DATA[:,ichan+3][nd2s:nd2s+nd2] * invGain
self.DATADICT["Pulse 1"]["TIMES"] = TIMES[nds:nds+nd1]
self.DATADICT["Pulse 2"]["TIMES"] = TIMES[nd2s:nd2s+nd2]
if plot:
canvas.ax2.plot(self.DATADICT["Pulse 1"]["TIMES"], self.DATADICT["Pulse 1"][ichan][ipm][istack], label="Pulse 1 FID ref ch. "+str(ichan)) #, color='blue')
canvas.ax2.plot(self.DATADICT["Pulse 2"]["TIMES"], self.DATADICT["Pulse 2"][ichan][ipm][istack], label="Pulse 2 FID ref ch. "+str(ichan)) #, color='blue')
if plot:
canvas.ax3.legend(prop={'size':6}, loc='upper right')
canvas.ax2.legend(prop={'size':6}, loc='upper right')
canvas.ax1.set_title("stack "+str(istack)+" pulse index " + str(ipm), fontsize=8)
canvas.ax1.set_xlabel("time [s]", fontsize=8)
canvas.ax3.set_ylabel("RAW signal [V]", fontsize=8)
canvas.ax2.set_ylabel("RAW signal [V]", fontsize=8)
canvas.ax1.set_ylabel("Current [A]", fontsize=8)
#canvas.ax2.tick_params(axis='both', which='major', labelsize=8)
#canvas.ax2.tick_params(axis='both', which='minor', labelsize=6)
#canvas.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
#canvas.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
canvas.draw()
# update GUI of where we are
percent = (int) (1e2*((float)((iistack*self.nPulseMoments+ipm+1)) / (len(procStacks)*self.nPulseMoments)))
self.progressTrigger.emit(percent)
iistack += 1
self.enableDSP()
self.doneTrigger.emit()
if __name__ == "__main__":
if len(sys.argv) < 4:
print( "mrsurvey path/to/header <stack1> <stackN> ")
exit()
GMR = GMRDataProcessor()
GMR.readHeaderFile(sys.argv[1])
GMR.Print()
if GMR.pulseType == "FID":
GMR.loadFIDData(sys.argv[1], sys.argv[2], sys.argv[3], 5)
if GMR.pulseType == "4PhaseT1":
GMR.load4PhaseT1Data(sys.argv[1], sys.argv[2], sys.argv[3], 5)
pylab.show() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/mrsurvey.py | mrsurvey.py |
import numpy
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
from numpy import *
from pylab import *
def smooth_demo():
t=linspace(-4,4,100)
x=sin(t)
xn=x+randn(len(t))*0.1
y=smooth(x)
ws=31
subplot(211)
plot(ones(ws))
windows=['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
hold(True)
for w in windows[1:]:
eval('plot('+w+'(ws) )')
axis([0,30,0,1.1])
legend(windows)
title("The smoothing windows")
subplot(212)
plot(x)
plot(xn)
for w in windows:
plot(smooth(xn,10,w))
l=['original signal', 'signal with noise']
l.extend(windows)
legend(l)
title("Smoothing a noisy signal")
show()
if __name__=='__main__':
smooth_demo() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/smooth.py | smooth.py |
from __future__ import division
import matplotlib as mpl
mpl.use('pdf')
#from rasterize import rasterize_and_save
import matplotlib.patches as mpatches
from pwctime import pwcTime
from logbarrier import *
from perlin import perlin
from scipy import stats
import cmocean
import sys
import numpy as np
import seaborn as sns
def bootstrapWindows(N, nboot, isum, adapt=False):
""" Bootstraps noise as a function of gate width
N = input noise signal
nboot = number of boostrap windows to perform
isum = length of windows (L_i)
adapt = reduce nboot as window size increases
"""
nc = np.shape(N)[0]
Means = {}
if adapt:
Means = -9999*np.ones((len(isum), nboot//isum[0])) # dummy value
for ii, nwin in enumerate(isum):
for iboot in range(nboot//isum[ii]):
cs = np.random.randint(0,nc-nwin)
Means[ii,iboot] = np.mean( N[cs:cs+nwin] )
Means = np.ma.masked_less(Means, -9995)
else:
Means = np.zeros((len(isum), nboot))
for ii, nwin in enumerate(isum):
for iboot in range(nboot):
cs = np.random.randint(0,nc-nwin)
Means[ii,iboot] = np.mean( N[cs:cs+nwin] )
return Means, np.array(isum)
def gateIntegrate(T2D, T2T, gpd, sigma, stackEfficiency=2.):
""" Gate integrate the signal to gpd, gates per decade
T2D = the time series to gate integrate, complex
T2T = the abscissa values
gpd = gates per decade
sigma = estimate of standard deviation for theoretical gate noise
stackEfficiency = exponential in theoretical gate noise, 2 represents ideal stacking
"""
# use artificial time gates so that early times are fully captured
T2T0 = T2T[0]
T2TD = T2T[0] - (T2T[1]-T2T[0])
T2T -= T2TD
#####################################
# calculate total number of decades #
# windows edges are approximate until binning but will be adjusted to reflect data timing, this
# primarily impacts bins with a few samples
nd = np.log10(T2T[-1]/T2T[0])
tdd = np.logspace( np.log10(T2T[0]), np.log10(T2T[-1]), (int)(gpd*nd)+1, base=10, endpoint=True)
tdl = tdd[0:-1] # approximate window left edges
tdr = tdd[1::] # approximate window right edges
td = (tdl+tdr) / 2. # approximate window centres
Vars = np.zeros( len(td) )
htd = np.zeros( len(td), dtype=complex )
isum = np.zeros( len(td), dtype=int )
ii = 0
for itd in range(len(T2T)):
if ( T2T[itd] > tdr[ii] ):
ii += 1
# correct window edges to centre about data
tdr[ii-1] = (T2T[itd-1]+T2T[itd])*.5
tdl[ii ] = (T2T[itd-1]+T2T[itd])*.5
isum[ii] += 1
htd[ii] += T2D[ itd ]
Vars[ii] += sigma**2
td = (tdl+tdr) / 2. # actual window centres
sigma2 = np.sqrt( Vars * ((1/(isum))**stackEfficiency) )
# Reset abscissa where isum == 1
# when there is no windowing going on
td[isum==1] = T2T[0:len(td)][isum==1]
tdd = np.append(tdl, tdr[-1])
htd /= isum # average
T2T += T2TD # not used
return td+T2TD, htd, tdd+T2TD, sigma2, isum # centre abscissa, data, window edges, error
PhiD = []
def invert(Time, t, v, sig, lambdastar):
""" helper function that simply calls logBarrier, here to allow for drop in repacement
"""
#model = logBarrier(Time.Genv, 1e-2*v, Time.T2Bins, MAXITER=5000, sigma=1e-2*sig, alpha=1e6, smooth="Both")
model = logBarrier(Time.Genv, 1e-2*v, Time.T2Bins, lambdastar, MAXITER=750, sigma=1e-2*sig, alpha=1e6, smooth="Smallest")
PhiD.append(model[2])
return model
def gateTest(vc, vgc, pperlin, boot, lamdastar):
""" Performs gate integration and adds random noise
vc = clean data (dense)
vgc = clean data at gates
boot = if "boot" then bootstrap the gate noise
lambdastar = l-curve or discrepency principle
pperlin = percent perlin noise, noise floor is maintained at 2.00 PU
"""
t = np.arange(2e-4, .3601, 2e-4)
zeta = np.pi / 3.
v = np.copy(vc) # important!
# Scaling factors to keep noise floor constant with increasing levels of
# Perlin noise. These were determined using populations of 5,000 and hold to
# two significant digits (i.e, 2.00 PU)
PF = {0.0:0,\
2.5:.450,\
5.0:.6125,\
7.5:.765,\
10.0:.87375,\
12.5:.9725,\
15.0:1.05,\
17.5:1.1275,\
20.0:1.20,\
22.5:1.265,\
25.0:1.325}
# random noise
np.random.seed() # necessary for thread pool, otherwise all threads can get same numbers
sigma = 2.*(1.-1e-2*pperlin)
eps = np.random.normal(0, sigma, len(t)) + \
1j*np.random.normal(0, sigma, len(t))
eps += PF[pperlin] * perlin(len(t), L=.3601, sigma_f=.005, sigma_r=0.72) # 1 PU std
v += eps
# Noise based on residual
sigmahat = np.std( v.imag )
gt, gd, we, err, isum = gateIntegrate(v.real, t, 20, sigmahat)
ge = np.copy(err)
if boot=="boot":
Means, isum2 = bootstrapWindows(v.imag, 20000, isum[isum!=1], adapt=True)
# STD
#err[isum!=1] = np.ma.std(Means, axis=1, ddof=1)[isum!=1]
# MAD, only for windows > 1
c = stats.norm.ppf(3./4.)
err[isum!=1] = np.ma.median(np.ma.abs(Means), axis=1) / c
if boot=="uniform":
err = sigmahat
return gt, gd, gd-vgc, err, v.real, t, isum
if __name__ == "__main__":
import multiprocessing
import itertools
from GJIPlot import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
from collections import OrderedDict
if len(sys.argv)<4:
print ( "Python script for generating plots used in GJI publication")
print ( "useage:")
print ( "python gateIntegrate4.py NoiseType Sigma_i Lambda* " )
exit()
if sys.argv[1] not in ['0.0','2.5','5.0','7.5','10.0','12.5','15.0','17.5','20.0','22.5','25.0']:
print ( "PercentPerlin: [0.0,2.5,5.0...25.0] ", "got", sys.argv[1])
exit(1)
if sys.argv[2] != "gauss" and sys.argv[2] != "boot" and sys.argv[2] != "uniform":
print ( "Sigma_i: gauss | boot | uniform")
exit(1)
if sys.argv[3] != "lcurve" and sys.argv[3] != "discrepency":
print ( "Lambda*: lcurve | discrepency ")
exit(1)
#offwhite = (.98,.98,.98)
offwhite = (1.,1.,1.)
mDarkBrown = '#eb811b' # alert colour
mDarkTeal = '#23373b'
mLightBrown= "#EB811B"
mLightGreen = "#14B03D"
# Time series plot
fig = plt.figure(figsize=(pc2in(18),pc2in(18)), facecolor=offwhite)
ax2 = fig.add_axes([.195, .175, .750, .75], facecolor=offwhite) # time
# Main plot
fig2 = plt.figure(figsize=(pc2in(20),pc2in(2*.5*20)))
ax1 = fig2.add_axes([.175, .410*1.5, .6, .225*1.5])
ax1c = fig2.add_axes([.800, .410*1.5, .025, .225*1.5])
ax3 = fig2.add_axes([.175, .100*1.5, .495, .225*1.5], facecolor='None')
ax3r = fig2.add_axes([.175, .100*1.5, .495, .225*1.5], facecolor='None', rasterized=True, sharex=ax3, sharey=ax3)
ax3b = fig2.add_axes([.825, .100*1.5, .1, .225*1.5])
SIG = []
ER = []
GD = []
GT = []
V = []
MOD = []
CONV = []
PHID = []
PHIM = []
LSTAR = []
ns = 10000 #10000 #10000 # number of realizations for PDF
ni = 5000 #5000 #1000 # number of inversions to plot
t = np.arange(2e-4, .3601, 2e-4) # CMR sampling
#CMAP = cmocean.cm.solar
CMAP = cmocean.cm.gray_r
#CMAP = cmocean.cm.haline
#CMAP = cmocean.cm.tempo
##############################################
# set up model
lowT2 = .001
hiT2 = 1.0
nT2 = 30
spacing = "Log_10"
Time = pwcTime()
Time.setT2(lowT2, hiT2, nT2, spacing)
Time.setSampling( np.arange(2e-4, .3601, 2e-4) )
Time.generateGenv()
tmod = np.zeros(nT2)
tmod [8] = .15 # distribution centres...to be smoothed
tmod [20] = .1
for i in range(2):
tmod = np.convolve(tmod, np.array([.0625,.125,.1875,.25,.1875,.125,.0625]), 'same')
vc = 100. * np.dot(Time.Genv, tmod) + 0j # in PU
gt, gd, we, err, isum = gateIntegrate(vc, t, 20, 3)
##############################################
# Set up inversion
Time = pwcTime()
Time.setT2(lowT2, hiT2, nT2, spacing)
Time.setSampling( gt )
Time.generateGenv()
vgc = 100.*np.dot(Time.Genv, tmod) + 0j # in PU
# make the Pool of workers
print("pool gate integrate")
with multiprocessing.Pool() as pool:
results = pool.starmap(gateTest, zip(np.tile(vc, (ns, 1)), np.tile(vgc, (ns,1)), itertools.repeat(eval(sys.argv[1])), \
itertools.repeat(sys.argv[2]), \
itertools.repeat(sys.argv[3])))
print("done pool gate integrate")
# parse out results
for i in range(ns):
gt,gd,ge,err,v,vt,isum = results[i]
V.append(v.real)
GT.append(gt.real)
GD.append(gd.real)
ER.append( ge.real / err.real )
SIG.append( err.real )
print("pool inversions")
with multiprocessing.Pool() as pool:
invresults = pool.starmap(invert, zip(itertools.repeat(Time), GT[0:ni], GD[0:ni], SIG[0:ni], itertools.repeat(sys.argv[3]) ))
#print("done pool inversions",results[:][0])
# Parse results
for i in range(ns):
#print("Sym %", round(100.*i/(float)(1)/(float)(ns)))
# invert
if i < ni:
#mod, conv, phid = invert(Time, gt, gd.real, err)
if sys.argv[3] == "discrepency":
mod, conv, phid_final = invresults[i]
else:
mod, conv, phid_final, phim, phid, lstar = invresults[i]
MOD.append(mod)
CONV.append(conv)
PHID.append(phid)
PHIM.append(phim)
LSTAR.append(lstar)
PHIM = np.array(PHIM)
PHID = np.array(PHID)
ER = np.array(ER)
MOD = np.array(MOD)
GD = np.array(GD)
####################
# Time series plot #
ax2.plot( 1e3*vt, V[0], color=mDarkTeal, label="$V_N$", linewidth=1, zorder=-32) #, rasterized=True)
ax2.errorbar( 1e3*gt, GD[0], yerr=SIG[0], fmt='.', markersize=6, color=mLightBrown, label="$V_G$")
ax2.set_ylim([-10,30])
leg1 = ax2.legend( labelspacing=0.2, scatterpoints=1, numpoints=1, frameon=True )
fixLeg(leg1)
ax2.set_xscale("log", nonposx='clip')
ax2.set_ylabel(r"$V_N$ (PU)")
ax2.get_xaxis().set_major_formatter(FormatStrFormatter('%1.0f'))
ax2.set_xlabel("time (ms)")
deSpine(ax2)
fig.savefig( sys.argv[1] + "-" + sys.argv[2] + "-" + sys.argv[3] + "-ts.pdf", dpi=400, facecolor=offwhite,edgecolor=offwhite)
# histogram of error statistic
bins = np.linspace( -3, 3, 40, endpoint=True )
HIST = []
for i in range(0,np.shape(ER)[1]):
hist, edges = np.histogram(ER[:,i], bins=bins, density=False)
HIST.append(hist)
HIST = np.array(HIST)/(float)(ns) # normalize
im = ax1.pcolor(1e3*we, edges, HIST.T, cmap=CMAP, vmin=0, vmax=.1, rasterized=True)
im.set_edgecolor('face')
cb = plt.colorbar(im, ax1c, label=r"probability density", format=FormatStrFormatter('%1.2f'))
cb.solids.set_rasterized(True)
tick_locator = ticker.MaxNLocator(nbins=4)
cb.locator = tick_locator
cb.update_ticks()
ax1.set_xscale("log", nonposx='clip')
ax1.get_xaxis().set_major_formatter(FormatStrFormatter('%1.0f'))
ax1.set_xlabel("time (ms)")
ax1.set_ylabel(r"gate error $\left( \left( {V_G - V_T} \right) / {\tilde{\sigma_i}} \right)$")
LMT2 = []
THETA = []
MODERR = []
# plot a random sample of ns instead?
for i in range(ni):
# plot log mean and amplitude
model = MOD[i]
theta = np.sum( model )
LogMeanT2 = np.exp(np.sum( model * np.log( Time.T2Bins ) ) / theta )
LMT2.append(LogMeanT2)
THETA.append( np.sum(model) )
MODERR.append( np.linalg.norm(model-tmod) )
CONV = np.array(CONV)
THETA = np.array(THETA)
MOD = np.array(MOD)
MODERR = np.array(MODERR)
#############################
# plot all models, 1 colour #
ires = ax3r.plot( 1e3*np.tile(Time.T2Bins, (np.sum(np.array(CONV)) ,1)).T , 1e2*MOD.T, color=mDarkTeal, alpha=.01, lw=.5, label="$\mathbf{f}_I$", zorder=0, rasterized=True)
lns2, = ax3r.plot(1e3*Time.T2Bins, 1e2*tmod, color=mLightBrown, linewidth=2, label="$\mathbf{f}_T$")
handles, labels = ax3r.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
leg3 = ax3r.legend(by_label.values(), by_label.keys(), labelspacing=0.2, scatterpoints=1, numpoints=1, frameon=True , loc="upper right")
for line in leg3.get_lines():
line.set_linewidth(1)
for lh in leg3.legendHandles:
lh.set_alpha(1)
fixLeg(leg3)
###########################
# Error histogram on side #
ax3b.hist( 1e2*MODERR, bins='auto', orientation="horizontal", color=mDarkTeal, stacked=True, density=True, range=(0,20))
ax3b.axhline(1e2*np.mean(MODERR), linewidth=1.25, color=mLightBrown) #, color=CMAP(0.7), zorder=1)
deSpine(ax3b)
ax3b.set_xscale("log", nonposx='clip')
ax3b.set_ylabel(r"$\Vert \mathbf{f}_I -\mathbf{f}_T \Vert$") # %(m$^3$/m$^3$)") #, color="C0")
ax3b.set_xlabel("log probability\ndensity") #, color="C0")
ax3.set_xlim( (1e3*Time.T2Bins[0], 1e3*Time.T2Bins[-1]) )
ax3.set_ylim( (0,5) )
ax3.set_xlabel("$T_2$ (ms)")
ax3.set_ylabel("partial water content (PU)") #, color="C0")
ax3.set_xscale("log", nonposx='clip')
ax3.get_xaxis().set_major_formatter(FormatStrFormatter('%1.0f'))
plt.setp(ax3r.get_xticklabels(), visible=False)
plt.setp(ax3r.get_yticklabels(), visible=False)
deSpine(ax3)
deSpine(ax3r)
np.save("pperlin" + str(round(1e1*eval(sys.argv[1]))) + "-" + sys.argv[2] + "-" + sys.argv[3] + "-err", MODERR)
plt.savefig("pperlin" + str(round(1e1*eval(sys.argv[1]))) + "-" + sys.argv[2] + "-" + sys.argv[3] + ".pdf", dpi=600, facecolor=offwhite, edgecolor=offwhite)
plt.show() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/gateIntegrate4.py | gateIntegrate4.py |
from akvo.tressel.SlidesPlot import *
import numpy as np
import sys
import matplotlib.pyplot as plt
import cmocean
from pylab import meshgrid
from akvo.tressel.logbarrier import *
import yaml,os
import multiprocessing
import itertools
from scipy.linalg import svd
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LogNorm
from matplotlib.colors import LightSource
from matplotlib.ticker import ScalarFormatter
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import LogLocator
from matplotlib.ticker import FormatStrFormatter
from matplotlib.colors import Normalize
import cmocean
from akvo.tressel.lemma_yaml import *
from akvo.tressel import nonlinearinv as nl
import pandas as pd
import matplotlib.colors as colors
# From https://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def buildKQT(K0,tg,T2Bins):
"""
Constructs a QT inversion kernel from an initial amplitude one.
"""
nlay, nq = np.shape(K0)
nt2 = len(T2Bins)
nt = len(tg)
KQT = np.zeros( ( nq*nt,nt2*nlay), dtype=np.complex128 )
for iq in range(nq):
for it in range(nt):
for ilay in range(nlay):
for it2 in range(nt2):
#KQT[iq*nt + it,ilay*nt2+it2] = K0[ilay,iq]*np.exp(-((10+tg[it])*1e-3)/(1e-3*T2Bins[it2]))
KQT[iq*nt + it,ilay*nt2+it2] = K0[ilay,iq]*np.exp(-((10+tg[it])*1e-3)/(1e-3*T2Bins[it2]))
return KQT
def loadAkvoData(fnamein, chan):
""" Loads data from an Akvo YAML file. The 0.02 is hard coded as the pulse length. This needs to be
corrected in future kernel calculations. The current was reported but not the pulse length.
"""
fname = (os.path.splitext(fnamein)[0])
with open(fnamein, 'r') as stream:
try:
AKVO = (yaml.load(stream, Loader=yaml.Loader))
except yaml.YAMLError as exc:
print(exc)
exit()
Z = np.zeros( (AKVO.nPulseMoments, AKVO.Gated["Pulse 1"]["abscissa"].size ) )
ZS = np.zeros( (AKVO.nPulseMoments, AKVO.Gated["Pulse 1"]["abscissa"].size ) )
for q in range(AKVO.nPulseMoments):
Z[q] = AKVO.Gated["Pulse 1"][chan]["Q-"+str(q) + " CA"].data
if chan == "Chan. 1":
ZS[q] = AKVO.Gated["Pulse 1"][chan]["STD"].data
elif chan == "Chan. 2":
ZS[q] = AKVO.Gated["Pulse 1"][chan]["STD"].data
elif chan == "Chan. 3":
ZS[q] = AKVO.Gated["Pulse 1"][chan]["STD"].data
elif chan == "Chan. 4":
ZS[q] = AKVO.Gated["Pulse 1"][chan]["STD"].data
else:
print("DOOM!!!")
exit()
#Z *= 1e-9
#ZS *= 1e-9
J = AKVO.Pulses["Pulse 1"]["current"].data
J = np.append(J,J[-1]+(J[-1]-J[-2]))
Q = AKVO.pulseLength[0]*J
return Z, ZS, AKVO.Gated["Pulse 1"]["abscissa"].data, Q
def catLayers(K0):
K = np.zeros( (len(K0.keys()), len(K0["layer-0"].data)) , dtype=complex )
for lay in range(len(K0.keys())):
#print(K0["layer-"+str(lay)].data) # print (lay)
K[lay] =K0["layer-"+str(lay)].data # print (lay)
return 1e9*K # invert in nV
def loadK0(fname):
""" Loads in initial amplitude kernel
"""
print("loading K0", fname)
with open(fname) as f:
K0 = yaml.load(f, Loader=yaml.Loader)
K = catLayers(K0.K0)
ifaces = np.array(K0.Interfaces.data)
return ifaces, K
#return ifaces, np.abs(K)
def invertDelta(G, V_n, T2Bins, sig, alphastar):
""" helper function that simply calls logBarrier, simplfies parallel execution
"""
model = logBarrier(G, V_n, T2Bins, "Single", MAXITER=1, sigma=sig, alpha=alphastar, smooth="Smallest")
return model
def main():
if (len (sys.argv) < 2):
print ("akvoQT invertParameters.yaml")
exit()
with open(sys.argv[1], 'r') as stream:
try:
cont = (yaml.load(stream, Loader=yaml.Loader))
except yaml.YAMLError as exc:
print(exc)
exit(1)
###############################################
# Load in data
###############################################
V = []
VS = []
QQ = []
tg = 0
for dat in cont['data']:
for ch in cont['data'][dat]['channels']:
print("dat", dat, "ch", ch)
v,vs,tg,Q = loadAkvoData(dat, ch)
V.append(v)
VS.append(vs)
QQ.append(Q)
for iv in range(1, len(V)):
V[0] = np.concatenate( (V[0], V[iv]) )
VS[0] = np.concatenate( (VS[0], VS[iv]) )
V = V[0]
VS = VS[0]
###############################################
# Load in kernels
###############################################
K0 = []
for kern in cont["K0"]:
ifaces,k0 = loadK0( kern )
K0.append(k0)
for ik in range(1, len(K0)):
K0[0] = np.concatenate( (K0[0].T, K0[ik].T) ).T
K0 = K0[0]
#np.save("ifaces", ifaces)
#exit()
#plt.matshow(np.real(K0))
#plt.show()
#exit()
##############################################################
# VERY Simple Sensitivity based calc. of noise per layer #
# minimally useful, but retained for backwards compatibility #
maxq = np.argmax(np.abs(K0), axis=1)
maxK = .1 * np.abs(K0)[ np.arange(0,len(ifaces)-1), maxq ] # 10% water is arbitrary
SNR = maxK / (VS[0][0])
###############################################
# Build full kernel
###############################################
T2Bins = np.logspace( np.log10(cont["T2Bins"]["low"]), np.log10(cont["T2Bins"]["high"]), cont["T2Bins"]["number"], endpoint=True, base=10)
T2Bins2 = np.append( T2Bins, T2Bins[-1] + (T2Bins[-1]-T2Bins[-2]) )
NT2 = len(T2Bins)
KQT = np.real(buildKQT(np.abs(K0),tg,T2Bins))
###############################################
# Linear Inversion
###############################################
print("Calling inversion", flush=True)
inv, ibreak, errn, phim, phid, mkappa, Wd, Wm, alphastar = logBarrier(KQT, np.ravel(V), T2Bins, "lcurve", MAXITER=150, sigma=np.ravel(VS), alpha=1e7, smooth="Smallest" )
################################
# Summary plots, Data Space #
################################
# TODO, need to clean this up for the case of multiple channels! Each channel should be a new row. It will be ugly, but important
# TODO, loop over channels
ich = 0
for ch in cont['data'][dat]['channels']:
figx = plt.figure( figsize=(pc2in(42.0),pc2in(22.)) )
ax1 = figx.add_axes([.100, .15, .200, .70])
ax2 = figx.add_axes([.325, .15, .200, .70]) # shifted to make room for shared colourbar
axc1= figx.add_axes([.550, .15, .025, .70]) # shifted to make room for shared colourbar
ax3 = figx.add_axes([.670, .15, .200, .70])
axc2= figx.add_axes([.895, .15, .025, .70]) # shifted to make room for shared colourbar
ax3.set_yscale('log')
ax2.set_yscale('log')
ax1.set_yscale('log')
ax2.yaxis.set_ticklabels([])
ax3.yaxis.set_ticklabels([])
ax3.set_xscale('log')
ax2.set_xscale('log')
ax1.set_xscale('log')
ax1.set_ylabel("Q (A $\cdot$ s)")
ax1.set_xlabel("time (s)")
ax2.set_xlabel("time (s)")
ax3.set_xlabel("time (s)")
#TT, QQQ = np.meshgrid(tg, np.ravel(QQ))
TT, QQQ = np.meshgrid(tg, np.ravel(QQ[ich]))
nq = np.shape(QQ[ich])[0] - 1 # to account for padding in pcolor
nt = np.shape(tg)[0]
ntq = nt*nq
VV = V[ich*nq:ich*nq+nq,:] # slice this channel
VVS = VS[ich*nq:ich*nq+nq,:] # slice this channel
mmax = np.max(np.abs(VV))
mmin = np.min(VV)
obs = ax1.pcolor(TT, QQQ, VV, cmap=cmocean.cm.curl_r, vmin=-mmax, vmax=mmax, shading='auto') # pcolor edge not defined
ax1.set_title("observed")
pre = np.dot(KQT[ich*ntq:(ich+1)*ntq,:], inv)
PRE = np.reshape( pre, np.shape(VV) )
prem = ax2.pcolor(TT, QQQ, PRE, cmap=cmocean.cm.curl_r, vmin=-mmax, vmax=mmax,shading='auto' )
ax2.set_title("predicted")
cbar = plt.colorbar(prem, axc1)
axc1.set_ylim( [np.min(VV), np.max(VV)] )
cbar.outline.set_edgecolor(None)
cbar.set_label('$V_N$ (nV)')
DIFF = (PRE-VV) / VVS
md = np.max(np.abs(DIFF))
dim = ax3.pcolor(TT, QQQ, DIFF, cmap=cmocean.cm.balance, vmin=-md, vmax=md, shading='auto')
ax3.set_title("misfit / $\widehat{\sigma}$")
cbar2 = plt.colorbar(dim, axc2)
#axc1.set_ylim( [np.min(V), np.max(V)] )
cbar2.outline.set_edgecolor(None)
cbar2.set_label('$V_N$ (nV)')
#plt.colorbar(dim, ax3)
figx.suptitle(ch + " linear Inversion")
plt.savefig(ch + "dataspace.pdf")
ich += 1
###############################################
# Non-linear refinement!
###############################################
nonLinearRefinement = cont['NonLinearRefinement']
if nonLinearRefinement:
KQTc = buildKQT(K0, tg, T2Bins)
prec = np.abs(np.dot(KQTc, inv))
phidc = np.linalg.norm(np.dot(Wd,prec-np.ravel(V)))**2
print("PHID forward linear=", errn, "PHID forward nonlinear=", phidc/len(np.ravel(V)))
res = nl.nonlinearinversion(inv, Wd, KQTc, np.ravel(V), Wm, alphastar )
if res.success == True:
INVc = np.reshape(res.x, (len(ifaces)-1,cont["T2Bins"]["number"]) )
prec = np.abs(np.dot(KQTc, res.x))
phidc = np.linalg.norm(np.dot(Wd,prec-np.ravel(V)))**2
PREc = np.reshape( prec, np.shape(V) )
print("PHID linear=", errn, "PHID nonlinear=", phidc/len(np.ravel(V)))
while phidc/len(np.ravel(V)) > errn:
phidc_old = phidc/len(np.ravel(V))
#alphastar *= .9
res = nl.nonlinearinversion(res.x, Wd, KQTc, np.ravel(V), Wm, alphastar )
if res.success == True:
INVc = np.reshape(res.x, (len(ifaces)-1,cont["T2Bins"]["number"]) )
prec = np.abs(np.dot(KQTc, res.x))
phidc = np.linalg.norm(np.dot(Wd,prec-np.ravel(V)))**2
PREc = np.reshape( prec, np.shape(V) )
print("PHID linear=", errn, "PHID nonlinear=", phidc/len(np.ravel(V)))
else:
nonLinearRefinement = False
print("Non-linear inversion failed, results will not be shown")
break
if phidc_old - phidc/len(np.ravel(V)) < 0.005:
print("Not making progress reducing misfit in nonlinear refinement")
break
# Turn this into a nice figure w/ shared axes etc.
# plt.matshow(PREc, cmap='Blues')
# plt.gca().set_title("nonlinear predicted")
# plt.colorbar()
#
# DIFFc = (PREc-V) / VS
# md = np.max(np.abs(DIFF))
# plt.matshow(DIFFc, cmap=cmocean.cm.balance, vmin=-md, vmax=md)
# plt.gca().set_title("nonlinear misfit / $\widehat{\sigma}$")
# plt.colorbar()
################################
# Summary plots, Data Space #
################################
ich = 0
for ch in cont['data'][dat]['channels']:
figx = plt.figure( figsize=(pc2in(42.0),pc2in(22.)) )
ax1 = figx.add_axes([.100, .15, .200, .70])
ax2 = figx.add_axes([.325, .15, .200, .70]) # shifted to make room for shared colourbar
axc1= figx.add_axes([.550, .15, .025, .70]) # shifted to make room for shared colourbar
ax3 = figx.add_axes([.670, .15, .200, .70])
axc2= figx.add_axes([.895, .15, .025, .70]) # shifted to make room for shared colourbar
ax3.set_yscale('log')
ax2.set_yscale('log')
ax1.set_yscale('log')
ax2.yaxis.set_ticklabels([])
ax3.yaxis.set_ticklabels([])
ax3.set_xscale('log')
ax2.set_xscale('log')
ax1.set_xscale('log')
ax1.set_ylabel("Q (A $\cdot$ s)")
ax1.set_xlabel("time (s)")
ax2.set_xlabel("time (s)")
ax3.set_xlabel("time (s)")
#TT, QQQ = np.meshgrid(tg, np.ravel(QQ))
TT, QQQ = np.meshgrid(tg, np.ravel(QQ[ich]))
nq = np.shape(QQ[ich])[0] - 1 # to account for padding in pcolor
nt = np.shape(tg)[0]
ntq = nt*nq
VV = V[ich*nq:ich*nq+nq,:] # slice this channel
VVS = VS[ich*nq:ich*nq+nq,:] # slice this channel
mmax = np.max(np.abs(VV))
mmin = np.min(VV)
obs = ax1.pcolor(TT, QQQ, VV, cmap=cmocean.cm.curl_r, vmin=-mmax, vmax=mmax, shading='auto')
ax1.set_title("observed")
## Here neds to change
pre = np.abs(np.dot(KQTc[ich*ntq:(ich+1)*ntq,:], inv))
PRE = np.reshape( pre, np.shape(VV) )
prem = ax2.pcolor(TT, QQQ, PRE, cmap=cmocean.cm.curl_r, vmin=-mmax, vmax=mmax, shading='auto' )
ax2.set_title("predicted")
cbar = plt.colorbar(prem, axc1)
axc1.set_ylim( [np.min(VV), np.max(VV)] )
cbar.outline.set_edgecolor(None)
cbar.set_label('$V_N$ (nV)')
DIFF = (PRE-VV) / VVS
md = np.max(np.abs(DIFF))
dim = ax3.pcolor(TT, QQQ, DIFF, cmap=cmocean.cm.balance, vmin=-md, vmax=md, shading='auto')
ax3.set_title("misfit / $\widehat{\sigma}$")
cbar2 = plt.colorbar(dim, axc2)
#axc1.set_ylim( [np.min(V), np.max(V)] )
cbar2.outline.set_edgecolor(None)
cbar2.set_label('$V_N$ (nV)')
#plt.colorbar(dim, ax3)
figx.suptitle(ch + " non-linear Inversion")
plt.savefig(ch + "_NLdataspace.pdf")
ich += 1
###############################################
# Appraise DOI using simplified MRM
###############################################
CalcDOI = cont['CalcDOI']
if CalcDOI:
pdf = PdfPages('resolution_analysis' + '.pdf' )
MRM = np.zeros((len(ifaces)-1, len(ifaces)-1))
# Build delta models
DELTA = []
for ilay in range(len(ifaces)-1):
#for ilay in range(4):
iDeltaT2 = len(T2Bins)//2
deltaMod = np.zeros( (len(ifaces)-1, len(T2Bins)) )
deltaMod[ilay][iDeltaT2] = 0.3
dV = np.dot(KQT, np.ravel(deltaMod))
#dinv, dibreak, derrn = logBarrier( KQT, dV, T2Bins, "single", MAXITER=1, sigma=np.ravel(VS), alpha=alphastar, smooth="Smallest" )
#output = invertDelta(KQT, dV, T2Bins, np.ravel(VS), alphastar)
DELTA.append(dV)
print("Performing resolution analysis in parallel, printed output may not be inorder.", flush=True)
with multiprocessing.Pool() as pool:
invresults = pool.starmap(invertDelta, zip(itertools.repeat(KQT), DELTA, itertools.repeat(T2Bins), itertools.repeat(np.ravel(VS)), itertools.repeat(alphastar) ))
# invresults = pool.starmap(logBarrier, zip(itertools.repeat(KQT), DELTA, itertools.repeat(T2Bins), itertools.repeat('single'), \
# itertools.repeat('MAXITER=1'), itertools.repeat(np.ravel(VS)), itertools.repeat(alphastar))) #, itertools.repeat(u'smooth=\'Smallest\'')) )
# This could be parallelized
for ilay in range(len(ifaces)-1):
# invert
#dinv, dibreak, derrn = logBarrier(KQT, dV, T2Bins, "single", MAXITER=1, sigma=np.ravel(VS), alpha=alphastar, smooth="Smallest" )
#print("Sum dinv from", str(ifaces[ilay]), "to", str(ifaces[ilay+1]), "=", np.sum(dinv))
dinv, dibreak, derrn = invresults[ilay]
DINV = np.reshape(dinv, (len(ifaces)-1,cont["T2Bins"]["number"]) )
MRM[ilay,:] = np.sum(DINV, axis=1)
Y,X = meshgrid( ifaces, T2Bins2 )
fig = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax1 = fig.add_axes( [.2,.15,.6,.7] )
im = ax1.pcolor(X, Y, DINV.T, cmap=cmocean.cm.tempo, shading='auto')
ax1.plot( T2Bins[iDeltaT2], (ifaces[ilay]+ifaces[ilay+1])/2, 's', markersize=6, markeredgecolor='black') #, markerfacecolor=None )
im.set_edgecolor('face')
ax1.set_xlabel(u"$T_2^*$ (ms)")
ax1.set_ylabel(u"depth (m)")
ax1.set_xlim( T2Bins2[0], T2Bins2[-1] )
ax1.set_ylim( ifaces[-1], ifaces[0] )
ax2 = ax1.twiny()
ax2.plot( np.sum(DINV, axis=1), (ifaces[1:]+ifaces[0:-1])/2 , color='mediumblue' )
ax2.set_xlabel(u"total water (m$^3$/m$^3$)", color='mediumblue')
ax2.set_ylim( ifaces[-1], ifaces[0] )
ax2.xaxis.set_major_locator( MaxNLocator(nbins = 3) )
ax2.get_xaxis().set_major_formatter(FormatStrFormatter('%0.2f'))
pdf.savefig(facecolor=[0,0,0,0])
plt.close(fig)
np.save("MRM", MRM)
centres = (ifaces[0:-1]+ifaces[1:])/2
X,Y = np.meshgrid(ifaces,ifaces)
fig = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax1 = fig.add_axes( [.2,.15,.6,.7] )
ax1.pcolor(X,Y,MRM, cmap = cmocean.cm.ice)
ax1.set_ylim(ifaces[-1], ifaces[0])
maxDepth = np.argmax(MRM, axis=0)
plt.plot(centres[maxDepth], centres, color='white')
# Determine DOI
DOIMetric = centres[maxDepth]/centres #> 0.9
DOI = ifaces[ np.where(DOIMetric < 0.9 ) ][0]
plt.axhline(y=DOI, color='white', linestyle='-.')
ax1.set_ylim( ifaces[-1], ifaces[0] )
ax1.set_xlim( ifaces[0], ifaces[-1] )
ax1.set_xlabel(u"depth (m)")
ax1.set_ylabel(u"depth (m)")
plt.savefig("resolutionmatrix.pdf")
pdf.close()
INV = np.reshape(inv, (len(ifaces)-1,cont["T2Bins"]["number"]) )
############## LINEAR RESULT ##########################
Y,X = meshgrid( ifaces, T2Bins2 )
fig = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax1 = fig.add_axes( [.2,.15,.6,.7] )
im = ax1.pcolor(X, Y, INV.T, cmap=cmocean.cm.tempo) #cmap='viridis')
im.set_edgecolor('face')
ax1.set_xlim( T2Bins[0], T2Bins[-1] )
ax1.set_ylim( ifaces[-1], ifaces[0] )
cb = plt.colorbar(im, label = u"PWC (m$^3$/m$^3$)") #, format='%1.1f')
cb.locator = MaxNLocator( nbins = 4)
cb.ax.yaxis.set_offset_position('left')
cb.update_ticks()
ax1.set_xlabel(u"$T_2^*$ (ms)")
ax1.set_ylabel(u"depth (m)")
ax1.get_xaxis().set_major_formatter(FormatStrFormatter('%1.0f'))
ax1.get_yaxis().set_major_formatter(FormatStrFormatter('%1.0f'))
ax1.xaxis.set_major_locator( MaxNLocator(nbins = 4) )
ax2 = ax1.twiny()
ax2.plot( np.sum(INV, axis=1), (ifaces[1:]+ifaces[0:-1])/2 , color='mediumblue' )
ax2.set_xlabel(u"NMR total water (m$^3$/m$^3$)", color='mediumblue')
ax2.set_ylim( ifaces[-1], ifaces[0] )
ax2.xaxis.set_major_locator( MaxNLocator(nbins = 3) )
ax2.get_xaxis().set_major_formatter(FormatStrFormatter('%0.2f'))
#ax2.axhline( y=ifaces[SNRidx], xmin=0, xmax=1, color='black', linestyle='dashed' )
if CalcDOI:
ax2.axhline( y=DOI, xmin=0, xmax=1, color='black', linestyle='dashed' )
ax2.tick_params(axis='x', colors='mediumblue')
plt.setp(ax2.get_xticklabels(), color="mediumblue")
plt.savefig("akvoInversion.pdf")
#############
# water plot#
fig2 = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax = fig2.add_axes( [.2,.15,.6,.7] )
# Bound water cutoff
Bidx = T2Bins<33.0
twater = np.sum(INV, axis=1)
bwater = np.sum(INV[:,Bidx], axis=1)
ax.plot( twater, (ifaces[0:-1]+ifaces[1::])/2, label="NMR total water", color='blue' )
ax.plot( bwater, (ifaces[0:-1]+ifaces[1::])/2, label="NMR bound water", color='green' )
ax.fill_betweenx((ifaces[0:-1]+ifaces[1::])/2 , twater, bwater, where=twater >= bwater, facecolor='blue', alpha=.5)
ax.fill_betweenx((ifaces[0:-1]+ifaces[1::])/2 , bwater, 0, where=bwater >= 0, facecolor='green', alpha=.5)
ax.set_xlabel(r"$\theta_N$ (m$^3$/m$^3$)")
ax.set_ylabel(r"depth (m)")
ax.set_ylim( ifaces[-1], ifaces[0] )
ax.set_xlim( 0, ax.get_xlim()[1] )
#ax.axhline( y=ifaces[SNRidx], xmin=0, xmax=1, color='black', linestyle='dashed' )
if CalcDOI:
ax.axhline( y=DOI, xmin=0, xmax=1, color='black', linestyle='dashed' )
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.legend()
plt.savefig("akvoInversionWC.pdf")
fr = pd.DataFrame( INV, columns=T2Bins ) #[0:-1] )
fr.insert(0, "layer top", ifaces[0:-1] )
fr.insert(1, "layer bottom", ifaces[1::] )
fr.insert(2, "NMR total water", np.sum(INV, axis=1) )
fr.insert(3, "NMR bound water", bwater )
fr.insert(4, "Layer SNR", SNR )
if CalcDOI:
fr.insert(5, "Resolution", DOIMetric )
fr.to_csv("akvoInversion.csv", mode='w+')
############## NONLINEAR RESULT ##########################
if nonLinearRefinement:
Y,X = meshgrid( ifaces, T2Bins2 )
fig = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax1 = fig.add_axes( [.2,.15,.6,.7] )
im = ax1.pcolor(X, Y, INVc.T, cmap=cmocean.cm.tempo) #cmap='viridis')
#im = ax1.pcolor(X[0:SNRidx,:], Y[0:SNRidx,:], INV.T[0:SNRidx,:], cmap=cmocean.cm.tempo) #cmap='viridis')
#im = ax1.pcolor(X[SNRidx::,:], Y[SNRidx::,:], INV.T[SNRidx::,:], cmap=cmocean.cm.tempo, alpha=.5) #cmap='viridis')
#im = ax1.pcolormesh(X, Y, INV.T, alpha=alphas) #, cmap=cmocean.cm.tempo) #cmap='viridis')
#im = ax1.pcolormesh(X, Y, INV.T, alpha=alphas) #, cmap=cmocean.cm.tempo) #cmap='viridis')
#ax1.axhline( y=ifaces[SNRidx], xmin=T2Bins[0], xmax=T2Bins[-1], color='black' )
im.set_edgecolor('face')
ax1.set_xlim( T2Bins[0], T2Bins[-1] )
ax1.set_ylim( ifaces[-1], ifaces[0] )
cb = plt.colorbar(im, label = u"PWC (m$^3$/m$^3$)") #, format='%1.1f')
cb.locator = MaxNLocator( nbins = 4)
cb.ax.yaxis.set_offset_position('left')
cb.update_ticks()
ax1.set_xlabel(u"$T_2^*$ (ms)")
ax1.set_ylabel(u"depth (m)")
ax1.get_xaxis().set_major_formatter(FormatStrFormatter('%1.0f'))
ax1.get_yaxis().set_major_formatter(FormatStrFormatter('%1.0f'))
ax1.xaxis.set_major_locator( MaxNLocator(nbins = 4) )
#ax1.xaxis.set_label_position('top')
ax2 = ax1.twiny()
ax2.plot( np.sum(INVc, axis=1), (ifaces[1:]+ifaces[0:-1])/2 , color='mediumblue' )
ax2.set_xlabel(u"NMR total water (m$^3$/m$^3$)", color='mediumblue')
ax2.set_ylim( ifaces[-1], ifaces[0] )
ax2.xaxis.set_major_locator( MaxNLocator(nbins = 3) )
ax2.get_xaxis().set_major_formatter(FormatStrFormatter('%0.2f'))
#ax2.axhline( y=ifaces[SNRidx], xmin=0, xmax=1, color='black', linestyle='dashed' )
if CalcDOI:
ax2.axhline( y=DOI, xmin=0, xmax=1, color='black', linestyle='dashed' )
#ax2.xaxis.set_label_position('bottom')
#fig.suptitle("Non linear inversion")
ax2.tick_params(axis='x', colors='mediumblue')
plt.setp(ax2.get_xticklabels(), color="mediumblue")
plt.savefig("akvoInversionNL.pdf")
#############
# water plot#
fig2 = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax = fig2.add_axes( [.2,.15,.6,.7] )
# Bound water cutoff
Bidx = T2Bins<33.0
twater = np.sum(INVc, axis=1)
bwater = np.sum(INVc[:,Bidx], axis=1)
ax.plot( twater, (ifaces[0:-1]+ifaces[1::])/2, label="NMR total water", color='blue' )
ax.plot( bwater, (ifaces[0:-1]+ifaces[1::])/2, label="NMR bound water", color='green' )
ax.fill_betweenx((ifaces[0:-1]+ifaces[1::])/2 , twater, bwater, where=twater >= bwater, facecolor='blue', alpha=.5)
ax.fill_betweenx((ifaces[0:-1]+ifaces[1::])/2 , bwater, 0, where=bwater >= 0, facecolor='green', alpha=.5)
ax.set_xlabel(r"$\theta_N$ (m$^3$/m$^3$)")
ax.set_ylabel(r"depth (m)")
ax.set_ylim( ifaces[-1], ifaces[0] )
ax.set_xlim( 0, ax.get_xlim()[1] )
#ax.axhline( y=ifaces[SNRidx], xmin=0, xmax=1, color='black', linestyle='dashed' )
if CalcDOI:
ax.axhline( y=DOI, xmin=0, xmax=1, color='black', linestyle='dashed' )
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.savefig("akvoNLInversionWC.pdf")
plt.legend()
# Report results into a text file
fr = pd.DataFrame( INVc, columns=T2Bins ) #[0:-1] )
fr.insert(0, "layer top", ifaces[0:-1] )
fr.insert(1, "layer bottom", ifaces[1::] )
fr.insert(2, "NMR total water", np.sum(INVc, axis=1) )
fr.insert(3, "NMR bound water", bwater )
fr.insert(4, "Layer SNR", SNR )
if CalcDOI:
fr.insert(5, "Resolution", DOIMetric )
fr.to_csv("akvoNLInversion.csv", mode='w+')
#fr.to_excel("akvoInversion.xlsx")
plt.show()
if __name__ == "__main__":
main() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/invertTA.py | invertTA.py |
from __future__ import division
import numpy as np
from scipy.sparse.linalg import iterative as iter
from scipy.sparse.linalg import spilu as ilu
import scipy.sparse.linalg as spla
from scipy.sparse import eye as seye
import pylab
import pprint
from scipy.optimize import nnls
import matplotlib.pyplot as plt
from akvo.tressel.SlidesPlot import *
def PhiB(mux, minVal, x):
phib = mux * np.abs( np.sum(np.log( x-minVal)) )
return phib
def curvaturefd(x, y, t):
x1 = np.gradient(x,t)
x2 = np.gradient(x1,t)
y1 = np.gradient(y,t)
y2 = np.gradient(y1,t)
return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
def curvatureg(x, y):
from scipy.ndimage import gaussian_filter1d
#first and second derivative
x1 = gaussian_filter1d(x, sigma=1, order=1)#, mode='constant', cval=x[-1])
x2 = gaussian_filter1d(x1, sigma=1, order=1)#, mode='constant', cval=y[-1])
y1 = gaussian_filter1d(y, sigma=1, order=1)#, mode='constant', cval=x1[-1])
y2 = gaussian_filter1d(y1, sigma=1, order=1)#, mode='constant', cval=y1[-1])
return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
def logBarrier(A, b, T2Bins, lambdastar, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False, MAXITER=70, fignum=1000, sigma=1, callback=None):
"""Impliments a log barrier Tikhonov solution to a linear system of equations
Ax = b s.t. x_min < x < x_max. A log-barrier term is used for the constraint
"""
# TODO input
minVal = 0.0
#maxVal = 1e8
Wd = (np.eye(len(b)) / (sigma)) # Wd = eye( sigma )
WdTWd = (np.eye(len(b)) / (sigma**2)) # Wd = eye( sigma )
ATWdTWdA = np.dot(A.conj().transpose(), np.dot( WdTWd, A )) # TODO, implicit calculation instead?
N = np.shape(A)[1] # number of model
M = np.shape(A)[0] # number of data
SIGMA = .25 # .25 # lower is more aggresive relaxation of log barrier
EPSILON = 1e-25 #1e-35
#SIGMA = .05 # .25 # lower is more aggresive relaxation of log barrier
#EPSILON = 1e-10 #1e-35
# reference model
if np.size(xr) == 1:
xr = np.zeros(N)
# initial guess
if np.size(x_0) == 1:
x = 1e-10 + np.zeros(N)
else:
x = 1e-10 + x_0
# Construct model constraint base
Phim_base = np.zeros( [N , N] )
a1 = .05 # smallest too
# calculate largest term
D1 = 1./abs(T2Bins[1]-T2Bins[0])
D2 = 1./abs(T2Bins[2]-T2Bins[1])
#a2 = 1. #(1./(2.*D1+D2)) # smooth
if smooth == "Both":
#print ("Both small and smooth model")
for ip in range(N):
D1 = 0.
D2 = 0.
if ip > 0:
#D1 = np.sqrt(1./abs(T2Bins[ip]-T2Bins[ip-1]))**.5
D1 = (1./abs(T2Bins[ip]-T2Bins[ip-1])) #**2
if ip < N-1:
#D2 = np.sqrt(1./abs(T2Bins[ip+1]-T2Bins[ip]))**.5
D2 = (1./abs(T2Bins[ip+1]-T2Bins[ip])) #**2
if ip > 0:
Phim_base[ip,ip-1] = -(D1)
if ip == 0:
Phim_base[ip,ip ] = 2.*(D1+D2)
elif ip == N-1:
Phim_base[ip,ip ] = 2.*(D1+D2)
else:
Phim_base[ip,ip ] = 2.*(D1+D2)
if ip < N-1:
Phim_base[ip,ip+1] = -(D2)
Phim_base /= np.max(Phim_base) # normalize
Phim_base += a1*np.eye(N)
elif smooth == "Smooth":
#print ("Smooth model")
for ip in range(N):
if ip > 0:
Phim_base[ip,ip-1] = -1 # smooth in log space
if ip == 0:
Phim_base[ip,ip ] = 2.05 # Encourage a little low model
elif ip == N-1:
Phim_base[ip,ip ] = 2.5 # Penalize long decays
else:
Phim_base[ip,ip ] = 2.1 # Smooth and small
if ip < N-1:
Phim_base[ip,ip+1] = -1 # smooth in log space
elif smooth == "Smallest":
for ip in range(N):
Phim_base[ip,ip ] = 1.
else:
print("non valid model constraint:", smooth)
exit()
Phi_m = alpha*Phim_base
WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
b_pre = np.dot(A, x)
phid = np.linalg.norm( np.dot(Wd, (b-b_pre)) )**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
mu2 = phim
phib = PhiB(mu1, 0, x)
mu1 = 1e-4* ((phid + alpha*phim) / phib)
PHIM = []
PHID = []
MOD = []
ALPHA = []
ALPHA.append(alpha)
#ALPHA = np.linspace( alpha, 1, MAXITER )
for i in range(MAXITER):
#alpha = ALPHA[i]
Phi_m = alpha*Phim_base
# reset mu1 at each iteration
# Calvetti -> No ; Li -> Yes
# without this, non monotonic convergence occurs...which is OK if you really trust your noise
mu1 = 1e-4* ((phid + alpha*phim) / phib)
WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
phid_old = phid
inner = 0
First = True # guarantee entry
xp = np.copy(x) # prior step x
b2a = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) - alpha*np.dot(WmTWm,(x-xr))
xg = nnls(ATWdTWdA + Phi_m, b2a)
x = xg[0]
#b_pre = np.dot(A, x)
#phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
#phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
while ( (phib / (phid+alpha*phim)) > EPSILON or First==True ):
#while ( False ):
First = False
# Log barrier, keep each element above minVal
X1 = np.eye(N) * (x-minVal)**-1
X2 = np.eye(N) * (x-minVal)**-2
# Log barrier, keep sum below maxVal TODO normalize by component. Don't want to push all down
#Y1 = np.eye(N) * (maxVal - np.sum(x))**-1
#Y2 = np.eye(N) * (maxVal - np.sum(x))**-2
AA = ATWdTWdA + mu1*X2 + Phi_m
M = np.eye( N ) * (1./np.diag(ATWdTWdA + mu1*X2 + Phi_m))
##M = seye( N ).dot(1./np.diag(ATWdTWdA + mu1*X2 + Phi_m))
#print("Incomplete LU",flush=True)
#M2 = ilu(AA) #, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=None)
#ilu(AA, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
#diag_pivot_thresh=None, relax=None, panel_size=None, options=None)[source]
#M = spla.LinearOperator(np.shape(AA), M2.solve)
#print("dun ILU", flush=True)
# Solve system (newton step) (Li)
b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) + 2.*mu1*np.diag(X1) - alpha*np.dot(WmTWm,(x-xr))
ztilde = iter.cg(AA, b2, M=M)
h = (ztilde[0].real)
# TODO move nnls outside of this loop...use as starting point maybe?
#print("nnls", flush=True)
#xg = nnls(AA, b2)
#print("nnls done", flush=True)
#print("calling cg", flush=True)
#print("out of cg", flush=True)
#print(xg[0], ztilde[0] , flush=True)
#print(np.linalg.norm(xg[0] - ztilde[0]), flush=True )
# Solve system (direct solution) (Calvetti)
#b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b)) + 2.*mu1*np.diag(X1) - alpha*np.dot(WmTWm,(x-xr))
#ztilde = iter.cg(AA, b2, M=M, x0=xg[0])
#h = (ztilde[0].real - x)
# step size
d = np.min( (1, 0.95 * np.min(x/np.abs(h+1e-120))) )
##########################################################
# Update and fix any over/under stepping
x += d*h
# Determine mu steps to take
s1 = mu1 * (np.dot(X2, ztilde[0].real) - 2.*np.diag(X1))
#s2 = mu2 * (np.dot(Y2, ztilde[0].real) - 2.*np.diag(Y1))
# determine mu for next step
mu1 = SIGMA/N * np.abs(np.dot(s1, x))
#mu2 = SIGMA/N * np.abs(np.dot(s2, x))
b_pre = np.dot(A, x)
phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
phib = PhiB(mu1, minVal, x)
inner += 1
PHIM.append(phim)
PHID.append(phid)
MOD.append(np.copy(x))
# determine alpha
scale = 1.5*(len(b)/phid)
#alpha *= np.sqrt(scale)
alpha *= min(scale, .95) # was .85...
#print("alpha", min(scale, 0.99))
#alpha *= .99 # was .85...
ALPHA.append(alpha)
#alpha = ALPHA[i+1]
print("inversion progress", i, alpha, np.sqrt(phid/len(b)), phim, flush=True)
# if np.sqrt(phid/len(b)) < 0.97:
# ibreak = -1
# print ("------------overshot--------------------", alpha, np.sqrt(phid/len(b)), ibreak)
# alpha *= 2. #0
# x -= d*h
# b_pre = np.dot(A, x)
# phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
# phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )#**2
# mu1 = ((phid + alpha*phim) / phib)
if lambdastar == "discrepency":
if np.sqrt(phid/len(b)) < 1.00 or alpha < 1e-5:
ibreak = 1
print ("optimal solution found", alpha, np.sqrt(phid/len(b)), ibreak)
break
# slow convergence, bail and use L-curve
# TI- only use L-curve. Otherwise results for perlin noise are too spurious for paper.
if lambdastar == "lcurve":
if i > 4:
kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:i+1])#ALPHA[0:-1])
#kappa = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
print("max kappa", np.argmax(kappa), "distance from", i-np.argmax(kappa))
if i > 4 and (i-np.argmax(kappa)) > 4: # ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-4) :
#if np.sqrt(phid/len(b)) < 3.0 and ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-3):
ibreak = 1
MOD = np.array(MOD)
print ("###########################") #slow convergence", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
print ("Using L-curve criteria")
#kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
#kappa2 = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
#kappa = curvature( np.array(PHIM), np.array(PHID))
x = MOD[ np.argmax(kappa) ]
b_pre = np.dot(A, x)
phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
mu1 = ((phid + alpha*phim) / phib)
print ("L-curve selected", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
print ("###########################")
if np.sqrt(phid/len(b)) <= 1:
ibreak=0
fig = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax1 = fig.add_axes( [.2,.15,.6,.7] )
#plt.plot( (np.array(PHIM)), np.log(np.array(PHID)/len(b)), '.-')
#plt.plot( ((np.array(PHIM))[np.argmax(kappa)]) , np.log( (np.array(PHID)/len(b))[np.argmax(kappa)] ), '.', markersize=12)
#plt.axhline()
lns1 = plt.plot( np.log(np.array(PHIM)), np.log(np.sqrt(np.array(PHID)/len(b))), '.-', label="L curve")
lns2 = plt.plot( np.log(np.array(PHIM))[np.argmax(kappa)], np.log(np.sqrt(np.array(PHID)/len(b))[np.argmax(kappa)]), '.', markersize=12, label="$\lambda^*$")
ax2 = plt.twinx()
lns3 = ax2.plot( np.log(np.array(PHIM)), kappa, color='orange', label="curvature" )
# Single legend
lns = lns1+lns3
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc=0)
ax1.set_xlabel("$\phi_m$")
ax1.set_ylabel("$\phi_d$")
ax2.set_ylabel("curvature")
plt.savefig('lcurve.pdf')
break
PHIM = np.array(PHIM)
PHID = np.array(PHID)
if (i == MAXITER-1 ):
ibreak = 2
print("Reached max iterations!!", alpha, np.sqrt(phid/len(b)), ibreak)
kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
x = MOD[ np.argmax(kappa) ]
b_pre = np.dot(A, x)
phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
mu1 = ((phid + alpha*phim) / phib)
if lambdastar == "lcurve":
return x, ibreak, np.sqrt(phid/len(b)), PHIM, PHID/len(b), np.argmax(kappa)
else:
return x, ibreak, np.sqrt(phid/len(b))
if __name__ == "__main__":
print("Test") | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/logbarrier-nnls.py | logbarrier-nnls.py |
#from GJIPlot import *
import numpy as np
import matplotlib.pyplot as plt
#from invertColours import *
from akvo.tressel.decay import *
from scipy import signal
def quadrature(T, vL, wL, dt, xn, DT, t):
# decimate
# blind decimation
# 1 instead of T
irsamp = int(T) * int( (1./vL) / dt) # real
iisamp = int( ((1./vL)/ dt) * ( .5*np.pi / (2.*np.pi) ) ) # imaginary
dsamp = int( DT / dt) # real
iisamp += dsamp
############################################################
# simple quadrature-detection via sampling
xr = xn[dsamp::irsamp]
xi = xn[iisamp::irsamp]
phase = np.angle( xr + 1j*xi )
abse = np.abs( xr + 1j*xi )
# times
#ta = np.arange(0, TT, dt)
#te = np.arange(DT, TT, TT/len(abse))
#############################################################
# hilbert transform
ht = signal.hilbert(xn) #, 100))
he = np.abs(ht) #, 100))
hp = ((np.angle(ht[dsamp::irsamp])))
#############################################################
# Resample ht
#htd = signal.decimate(he, 100, ftype='fir')
#td = signal.decimate(t, 100, ftype='fir')
#[htd, td] = signal.resample(he, 21, t)
#toss first, and use every third
#htd = htd[1::3]
#td = td[1::3]
#############################################################
# Pre-envelope
#gplus = xn + 1j*ht
#############################################################
# Complex envelope
#gc = gplus / np.exp(1j*wL*t)
#############################################################
## Design a low-pass filter
FS = 1./dt # sampling rate
FC = 10.05/(0.5*FS) # cutoff frequency at 0.05 Hz
N = 11 # number of filter taps
a = [1] # filter denominator
b = signal.firwin(N, cutoff=FC, window='hamming') # filter numerator
#############################################################
## In-phase
#2*np.cos(wL*t)
dw = 0 # -2.*np.pi*2
Q = signal.filtfilt(b, a, xn*2*np.cos((wL+dw)*t)) # X
I = signal.filtfilt(b, a, xn*2*np.sin((wL+dw)*t)) # Y
###############################################
# Plots
#plt.plot(ht.real)
#plt.plot(ht.imag)
#plt.plot(np.abs(ht))
#plt.plot(gc.real)
#plt.plot(gc.imag)
#plt.plot(xn)
#plt.plot(xn)
#plt.plot(ta, xn)
#plt.plot(te, abse, '-.', linewidth=2, markersize=10)
#plt.plot(ta, he, '.', markersize=10 )
#plt.plot(td, htd, color='green', linewidth=2)
# Phase Plots
#ax2 = plt.twinx()
#ax2.plot(te, hp, '.', markersize=10, color='green' )
#ax2.plot(te, phase, '-.', linewidth=2, markersize=10, color='green')
return Q[N:-N], I[N:-N], t[N:-N]
# #####################################################################
# # regress raw signal
#
# #[peaks, times, ind] = peakPicker(xn, wL, dt)
# #[a0,b0,rt20] = regressCurve(peaks, times) #,sigma2=1,intercept=True):
#
# dsamp = int( DT / dt) # real
# # regress analytic signal
# [a0,b0,rt20] = regressCurve(he[dsamp::], t[dsamp::], intercept=True) #,sigma2=1,intercept=True):
# #[b0,rt20] = regressCurve(he[dsamp::], t[dsamp::], intercept=False) #,sigma2=1,intercept=True):
# #[a0,b0,rt20] = regressCurve(he, t) #,sigma2=1,intercept=True):
#
# # regress downsampled
# [a,b,rt2] = regressCurve(abse, t[dsamp::irsamp], intercept=True) #,sigma2=1,intercept=True):
# #[b,rt2] = regressCurve(htd, td, intercept=False) #,sigma2=1,intercept=True):
#
# return irsamp, iisamp, htd, b0, rt20, ta, b, rt2, phase, td, he, dsamp
# #return irsamp, iisamp, abse, a0, b0, rt20, times, a, b, rt2, phase
def RotateAmplitude(X, Y, zeta, df, t):
V = X + 1j*Y
return np.abs(V) * np.exp( 1j * ( np.angle(V) - zeta - 2.*np.pi*df*t ) )
#return np.abs(V) * np.exp( 1j * ( np.angle(V) - zeta - df*t ) )
def bootstrapWindows(N, nboot, isum, adapt=False):
""" Bootstraps noise as a function of gate width
N = input noise signal
nboot = number of boostrap windows to perform
isum = length of windows (L_i)
adapt = reduce nboot as window size increases
"""
nc = np.shape(N)[0]
Means = {}
if adapt:
Means = -9999*np.ones((len(isum), nboot//isum[0])) # dummy value
for ii, nwin in enumerate(isum):
for iboot in range(nboot//isum[ii]):
cs = np.random.randint(0,nc-nwin)
Means[ii,iboot] = np.mean( N[cs:cs+nwin] )
Means = np.ma.masked_less(Means, -9995)
else:
Means = np.zeros((len(isum), nboot))
for ii, nwin in enumerate(isum):
for iboot in range(nboot):
cs = np.random.randint(0,nc-nwin)
Means[ii,iboot] = np.mean( N[cs:cs+nwin] )
return Means #, np.array(isum)
def gateIntegrate(T2D, T2T, gpd, sigma, stackEfficiency=2.):
""" Gate integrate the signal to gpd, gates per decade
T2D = the time series to gate integrate, complex
T2T = the abscissa values
gpd = gates per decade
sigma = estimate of standard deviation for theoretical gate noise
stackEfficiency = exponential in theoretical gate noise, 2 represents ideal stacking
"""
# use artificial time gates so that early times are fully captured
T2T0 = T2T[0]
T2TD = T2T[0] - (T2T[1]-T2T[0])
T2T -= T2TD
#####################################
# calculate total number of decades #
# windows edges are approximate until binning but will be adjusted to reflect data timing, this
# primarily impacts bins with a few samples
nd = np.log10(T2T[-1]/T2T[0])
tdd = np.logspace( np.log10(T2T[0]), np.log10(T2T[-1]), (int)(gpd*nd)+1, base=10, endpoint=True)
tdl = tdd[0:-1] # approximate window left edges
tdr = tdd[1::] # approximate window right edges
td = (tdl+tdr) / 2. # approximate window centres
Vars = np.zeros( len(td) )
htd = np.zeros( len(td), dtype=complex )
isum = np.zeros( len(td), dtype=int )
ii = 0
for itd in range(len(T2T)):
if ( round(T2T[itd], 4) > round(tdr[ii], 4) ):
ii += 1
# correct window edges to centre about data
tdr[ii-1] = (T2T[itd-1]+T2T[itd])*.5
tdl[ii ] = (T2T[itd-1]+T2T[itd])*.5
isum[ii] += 1
htd[ii] += T2D[ itd ]
Vars[ii] += sigma**2
td = (tdl+tdr) / 2. # actual window centres
sigma2 = np.sqrt( Vars * ((1/(isum))**stackEfficiency) )
# Reset abscissa where isum == 1
# when there is no windowing going on
td[isum==1] = T2T[0:len(td)][isum==1]
tdd = np.append(tdl, tdr[-1])
htd /= isum # average
T2T += T2TD
return td+T2TD, htd, tdd+T2TD, sigma2, isum # centre abscissa, data, window edges, error
if __name__ == "__main__":
dt = 1e-4
TT = 1.5
t = np.arange(0, TT, dt)
vL = 2057.
wL = 2.*np.pi*vL
wL2 = 2.*np.pi*(vL-2.5) #-2) #-2.2) # 3 Hz off
zeta = -np.pi/6. #4.234
t2 = .150
xs = np.exp(-t/t2) * np.cos(wL2*t + zeta)
xe = np.exp(-t/t2)
xn = xs + np.random.normal(0,.1,len(xs))# + (np.sign(xs)
# np.random.random_integers(-1,1,len(xs))*0.6*np.random.lognormal(0, .35, len(xs)) + \
# np.random.random_integers(-1,1,len(xs))*.004*np.random.weibull(.25, len(xs)), 60)))
# quadrature detection downsampling
T = 50 # sampling period, grab every T'th oscilation
DT = .002 #85 # dead time ms
#[irsamp, iisamp, abse, b0, rt20, times, b, rt2, phase, tdec, he, dsamp] = quadDetect(T, vL, wL, dt, xn, DT)
[Q, I, tt] = quadrature(T, vL, wL, dt, xn, DT, t)
[E0,df,phi,T2] = quadratureDetect(Q, I, tt)
print("df", df)
D = RotateAmplitude(I, Q, phi, df, tt)
fig = plt.figure(figsize=[pc2in(20), pc2in(14)]) #
ax1 = fig.add_axes([.125,.2,.8,.7])
#ax1.plot(tt*1e3, np.exp(-tt/t2), linewidth=2, color='black', label="actual")
ax1.plot(tt*1e3, D.imag, label="CA", color='red')
ax1.plot(t*1e3, xn, color='blue', alpha=.25)
ax1.plot(tt*1e3, I, label="inphase", color='blue')
ax1.plot(tt*1e3, Q, label="quadrature", color='green')
#ax1.plot(tt*1e3, np.angle( Q + 1j*I), label="angle", color='purple')
GT, GD = gateIntegrate( D.imag, tt, 10 )
GT, GDR = gateIntegrate( D.real, tt, 10 )
GT, GQ = gateIntegrate( Q, tt, 10 )
GT, GI = gateIntegrate( I, tt, 10 )
#ax1.plot(tt*1e3, np.arctan( Q/I), label="angle", color='purple')
#ax1.plot(GT*1e3, np.real(GD), 'o', label="GATE", color='purple')
#ax1.plot(GT*1e3, np.real(GDR), 'o', label="GATE Real", color='red')
#ax1.plot(GT*1e3, np.arctan( np.real(GQ)/np.real(GI)), 'o',label="GATE ANGLE", color='magenta')
ax1.set_xlabel(r"time [ms]")
ax1.set_ylim( [-1.25,1.65] )
#light_grey = np.array([float(248)/float(255)]*3)
legend = plt.legend( frameon=True, scatterpoints=1, numpoints=1, labelspacing=0.2 )
#rect = legend.get_frame()
fixLeg(legend)
#rect.set_color('None')
#rect.set_facecolor(light_grey)
#rect.set_linewidth(0.0)
#rect.set_alpha(0.5)
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax1.spines[spine].set_visible(False)
#ax1.xaxis.set_ticks_position('none')
#ax1.yaxis.set_ticks_position('none')
ax1.get_xaxis().tick_bottom()
ax1.get_yaxis().tick_left()
plt.savefig('rotatetime.pdf',dpi=600)
plt.savefig('rotatetime.eps',dpi=600)
# phase part
plt.figure()
plt.plot( tt*1e3, D.real, label="CA", color='red' )
plt.show()
exit() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/rotate.py | rotate.py |
from __future__ import division
import numpy as np
from scipy.sparse.linalg import iterative as iter
from scipy.sparse import eye as seye
import pylab
import pprint
from scipy.optimize import nnls
import matplotlib.pyplot as plt
from akvo.tressel.SlidesPlot import *
def PhiB(mux, minVal, x):
phib = mux * np.abs( np.sum(np.log( x-minVal)) )
return phib
def curvaturefd(x, y, t):
x1 = np.gradient(x,t)
x2 = np.gradient(x1,t)
y1 = np.gradient(y,t)
y2 = np.gradient(y1,t)
return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
def curvatureg(x, y):
from scipy.ndimage import gaussian_filter1d
#first and second derivative
x1 = gaussian_filter1d(x, sigma=1, order=1)#, mode='constant', cval=x[-1])
x2 = gaussian_filter1d(x1, sigma=1, order=1)#, mode='constant', cval=y[-1])
y1 = gaussian_filter1d(y, sigma=1, order=1)#, mode='constant', cval=x1[-1])
y2 = gaussian_filter1d(y1, sigma=1, order=1)#, mode='constant', cval=y1[-1])
return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
def logBarrier(A, b, T2Bins, lambdastar, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth="Smallest", MAXITER=70, fignum=1000, sigma=1, callback=None):
"""Impliments a log barrier Tikhonov solution to a linear system of equations
Ax = b s.t. x_min < x < x_max. A log-barrier term is used for the constraint
"""
# TODO input
minVal = 0.0
#maxVal = 1e8
Wd = (np.eye(len(b)) / (sigma)) # Wd = eye( sigma )
WdTWd = (np.eye(len(b)) / (sigma**2)) # Wd = eye( sigma )
ATWdTWdA = np.dot(A.conj().transpose(), np.dot( WdTWd, A )) # TODO, implicit calculation instead?
N = np.shape(A)[1] # number of model
M = np.shape(A)[0] # number of data
SIGMA = .25 # .25 # lower is more aggresive relaxation of log barrier
EPSILON = 1e-25 #1e-35
# reference model
if np.size(xr) == 1:
xr = np.zeros(N)
# initial guess
if np.size(x_0) == 1:
x = 1e-10 + np.zeros(N)
else:
x = 1e-10 + x_0
# Construct model constraint base
Phim_base = np.zeros( [N , N] )
a1 = .05 # smallest too
# calculate largest term
D1 = 1./abs(T2Bins[1]-T2Bins[0])
D2 = 1./abs(T2Bins[2]-T2Bins[1])
#a2 = 1. #(1./(2.*D1+D2)) # smooth
if smooth == "Both":
#print ("Both small and smooth model")
for ip in range(N):
D1 = 0.
D2 = 0.
if ip > 0:
#D1 = np.sqrt(1./abs(T2Bins[ip]-T2Bins[ip-1]))**.5
D1 = (1./abs(T2Bins[ip]-T2Bins[ip-1])) #**2
if ip < N-1:
#D2 = np.sqrt(1./abs(T2Bins[ip+1]-T2Bins[ip]))**.5
D2 = (1./abs(T2Bins[ip+1]-T2Bins[ip])) #**2
if ip > 0:
Phim_base[ip,ip-1] = -(D1)
if ip == 0:
Phim_base[ip,ip ] = 2.*(D1+D2)
elif ip == N-1:
Phim_base[ip,ip ] = 2.*(D1+D2)
else:
Phim_base[ip,ip ] = 2.*(D1+D2)
if ip < N-1:
Phim_base[ip,ip+1] = -(D2)
Phim_base /= np.max(Phim_base) # normalize
Phim_base += a1*np.eye(N)
elif smooth == "Smooth":
#print ("Smooth model")
for ip in range(N):
if ip > 0:
Phim_base[ip,ip-1] = -1 # smooth in log space
if ip == 0:
Phim_base[ip,ip ] = 2.05 # Encourage a little low model
elif ip == N-1:
Phim_base[ip,ip ] = 2.5 # Penalize long decays
else:
Phim_base[ip,ip ] = 2.1 # Smooth and small
if ip < N-1:
Phim_base[ip,ip+1] = -1 # smooth in log space
elif smooth == "Smallest":
for ip in range(N):
Phim_base[ip,ip ] = 1.
else:
print("non valid model constraint:", smooth)
exit()
Phi_m = alpha*Phim_base
WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
b_pre = np.dot(A, x)
phid = np.linalg.norm( np.dot(Wd, (b-b_pre)) )**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
mu2 = phim
phib = PhiB(mu1, 0, x)
mu1 = ((phid + alpha*phim) / phib)
PHIM = []
PHID = []
MOD = []
ALPHA = []
ALPHA.append(alpha)
#ALPHA = np.linspace( alpha, 1, MAXITER )
print ("{:^5} {:^15} {:^15} {:^15} {:^15} {:^10} {:^10}".format("iter.", "lambda", "phi_d", "phi_m","phi","kappa","kappa dist."), flush=True)
print ("{:^5} {:>15} {:<15} {:<15} {:<15} {:<10} {:<10}".format("-----", "---------------", "---------------","---------------","---------------","----------","----------"), flush=True)
for i in range(MAXITER):
#alpha = ALPHA[i]
Phi_m = alpha*Phim_base
# reset mu1 at each iteration
# Calvetti -> No ; Li -> Yes
# without this, non monotonic convergence occurs...which is OK if you really trust your noise
mu1 = ((phid + alpha*phim) / phib)
WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
phid_old = phid
inner = 0
First = True # guarantee entry
xp = np.copy(x) # prior step x
# quick and dirty solution
#b2a = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) - alpha*np.dot(WmTWm,(x-xr))
#xg = nnls(ATWdTWdA + Phi_m, b2a)
#x = xg[0]
while ( (phib / (phid+alpha*phim)) > EPSILON or First==True ):
#while ( False ): # skip the hard stuff
First = False
# Log barrier, keep each element above minVal
X1 = np.eye(N) * (x-minVal)**-1
X2 = np.eye(N) * (x-minVal)**-2
# Log barrier, keep sum below maxVal TODO normalize by component. Don't want to push all down
#Y1 = np.eye(N) * (maxVal - np.sum(x))**-1
#Y2 = np.eye(N) * (maxVal - np.sum(x))**-2
AA = ATWdTWdA + mu1*X2 + Phi_m
M = np.eye( N ) * (1./np.diag(ATWdTWdA + mu1*X2 + Phi_m))
#M = seye( N ).dot(1./np.diag(ATWdTWdA + mu1*X2 + Phi_m))
# Solve system (newton step) (Li)
b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) + 2.*mu1*np.diag(X1) - alpha*np.dot(WmTWm,(x-xr))
ztilde = iter.cg(AA, b2, M = M)
h = (ztilde[0].real)
# Solve system (direct solution) (Calvetti)
#b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b)) + 2.*mu1*np.diag(X1) - alpha*np.dot(WmTWm,(x-xr))
#ztilde = iter.cg(AA, b2, M=M, x0=x)
#h = (ztilde[0].real - x)
# step size
d = np.min( (1, 0.95 * np.min(x/np.abs(h+1e-120))) )
##########################################################
# Update and fix any over/under stepping
x += d*h
# Determine mu steps to take
s1 = mu1 * (np.dot(X2, ztilde[0].real) - 2.*np.diag(X1))
#s2 = mu2 * (np.dot(Y2, ztilde[0].real) - 2.*np.diag(Y1))
# determine mu for next step
mu1 = SIGMA/N * np.abs(np.dot(s1, x))
#mu2 = SIGMA/N * np.abs(np.dot(s2, x))
b_pre = np.dot(A, x)
phid = np.linalg.norm(np.dot(Wd, (b-b_pre)))**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
phib = PhiB(mu1, minVal, x)
inner += 1
PHIM.append(phim)
PHID.append(phid)
MOD.append(np.copy(x))
tphi = phid + alpha*phim
# determine alpha
scale = 1.5*(len(b)/phid)
#alpha *= np.sqrt(scale)
alpha *= min(scale, .95) # was .85...
#print("alpha", min(scale, 0.99))
#alpha *= .99 # was .85...
ALPHA.append(alpha)
#alpha = ALPHA[i+1]
#print("inversion progress", i, alpha, np.sqrt(phid/len(b)), phim, flush=True)
#print ("{:<8} {:<15} {:<10} {:<10}".format(i, alpha, np.sqrt(phid/len(b)), phim), flush=True)
if i <= 4:
print ("{:^5} {:>15.4f} {:>15.4f} {:>15.4f} {:>15.4f}".format(i, alpha, np.sqrt(phid/len(b)), phim, tphi ), flush=True)
# if np.sqrt(phid/len(b)) < 0.97:
# ibreak = -1
# print ("------------overshot--------------------", alpha, np.sqrt(phid/len(b)), ibreak)
# alpha *= 2. #0
# x -= d*h
# b_pre = np.dot(A, x)
# phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
# phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )#**2
# mu1 = ((phid + alpha*phim) / phib)
if lambdastar == "discrepency":
if np.sqrt(phid/len(b)) < 1.00 or alpha < 1e-5:
ibreak = 1
print ("optimal solution found", alpha, np.sqrt(phid/len(b)), ibreak)
break
# slow convergence, bail and use L-curve
# TI- only use L-curve. Otherwise results for perlin noise are too spurious for paper.
if lambdastar == "lcurve":
if i > 4:
kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:i+1])#ALPHA[0:-1])
#kappa = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
#print("max kappa", np.argmax(kappa), "distance from", i-np.argmax(kappa))
print ("{:^5} {:>15.4f} {:>15.4f} {:>15.4f} {:>15.4f} {:^10} {:^10}".format(i, alpha, np.sqrt(phid/len(b)), phim, tphi, np.argmax(kappa), i-np.argmax(kappa)), flush=True)
if i > 4 and (i-np.argmax(kappa)) > 4: # ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-4) :
#if np.sqrt(phid/len(b)) < 3.0 and ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-3):
ibreak = 1
MOD = np.array(MOD)
print ("################################") #slow convergence", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
print ("Using L-curve criteria")
#kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
#kappa2 = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
#kappa = curvature( np.array(PHIM), np.array(PHID))
x = MOD[ np.argmax(kappa) ]
alphastar = ALPHA[ np.argmax(kappa) ]
b_pre = np.dot(A, x)
phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
mu1 = ((phid + alpha*phim) / phib)
print ("L-curve selected: iteration=", np.argmax(kappa)) #, " lambda*=", alpha, "phid_old=", np.sqrt(phid_old/len(b)), "phid=", np.sqrt(phid/len(b)), ibreak)
print ("################################")
if np.sqrt(phid/len(b)) <= 1:
ibreak=0
fig = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
ax1 = fig.add_axes( [.2,.15,.6,.7] )
#plt.plot( (np.array(PHIM)), np.log(np.array(PHID)/len(b)), '.-')
#plt.plot( ((np.array(PHIM))[np.argmax(kappa)]) , np.log( (np.array(PHID)/len(b))[np.argmax(kappa)] ), '.', markersize=12)
#plt.axhline()
lns1 = plt.plot( np.log(np.array(PHIM)), np.log(np.sqrt(np.array(PHID)/len(b))), '.-', label="L curve")
lns2 = plt.plot( np.log(np.array(PHIM))[np.argmax(kappa)], np.log(np.sqrt(np.array(PHID)/len(b))[np.argmax(kappa)]), '.', markersize=12, label="$\lambda^*$")
ax2 = plt.twinx()
lns3 = ax2.plot( np.log(np.array(PHIM)), kappa, color='orange', label="curvature" )
# Single legend
lns = lns1+lns3
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc=0)
ax1.set_xlabel("$\phi_m$")
ax1.set_ylabel("$\phi_d$")
ax2.set_ylabel("curvature")
plt.savefig('lcurve.pdf')
break
PHIM = np.array(PHIM)
PHID = np.array(PHID)
if (i == MAXITER-1 ):
ibreak = 2
#print("Reached max iterations!!", alpha, np.sqrt(phid/len(b)), ibreak)
#kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
x = MOD[-1]
b_pre = np.dot(A, x)
phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
mu1 = ((phid + alpha*phim) / phib)
if lambdastar == "lcurve":
#print("Returning L curve result")
return x, ibreak, np.sqrt(phid/len(b)), PHIM, PHID/len(b), np.argmax(kappa), Wd, Phim_base, alphastar
else:
print("Returning max iteration result")
return x, ibreak, np.sqrt(phid/len(b))
if __name__ == "__main__":
print("Test") | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/logbarrier.py | logbarrier.py |
import os, sys
import numpy as np
from ruamel import yaml
import pyLemma.LemmaCore as lc
import pyLemma.Merlin as mrln
import pyLemma.FDEM1D as em1d
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
from ruamel import yaml
#import cmocean
#from SEGPlot import *
#from matplotlib.ticker import FormatStrFormatter
#import matplotlib.ticker as plticker
# Converts Lemma/Merlin/Akvo serialized Eigen arrays into numpy ones for use by Python
class VectorXr(yaml.YAMLObject):
"""
Converts Lemma/Merlin/Akvo serialized Eigen arrays into numpy ones for use by Python
"""
yaml_tag = u'VectorXr'
def __init__(self, array):
self.size = np.shape(array)[0]
self.data = array.tolist()
def __repr__(self):
# Converts to numpy array on import
return "np.array(%r)" % (self.data)
class AkvoData(yaml.YAMLObject):
"""
Reads an Akvo serialized dataset into a standard python dictionary
"""
yaml_tag = u'AkvoData'
def __init__(self, array):
pass
#self.size = np.shape(array)[0]
#self.Imp = array.tolist()
def __repr__(self):
# Converts to a dictionary with Eigen vectors represented as Numpy arrays
return self
def loadAkvoData(fnamein):
""" Loads data from an Akvo YAML file. The 0.02 is hard coded as the pulse length. This needs to be
corrected in future kernel calculations. The current was reported but not the pulse length.
"""
fname = (os.path.splitext(fnamein)[0])
with open(fnamein, 'r') as stream:
try:
AKVO = (yaml.load(stream, Loader=yaml.Loader))
except yaml.YAMLError as exc:
print(exc)
return AKVO
def main():
if len(sys.argv) < 2:
print ("usage python calcAkvoKernel.py AkvoDataset.yaml Coil1.yaml kparams.yaml SaveString.yaml " )
print ("usage akvoKO AkvoDataset.yaml kparams.yaml SaveString.yaml " )
exit()
AKVO = loadAkvoData(sys.argv[1])
B_inc = AKVO.META["B_0"]["inc"]
B_dec = AKVO.META["B_0"]["dec"]
B0 = AKVO.META["B_0"]["intensity"]
fT = AKVO.transFreq
#gamma = 2.67518e8
#B0 = (fL*2.*np.pi) /gamma * 1e9
# read in kernel params
kparams = loadAkvoData( sys.argv[2] )
Kern = mrln.KernelV0()
TX = []
for tx in kparams['txCoils']:
Coil1 = em1d.PolygonalWireAntenna.DeSerialize( tx )
Coil1.SetNumberOfFrequencies(1)
Coil1.SetFrequency(0, fT)
Coil1.SetCurrent(1.)
Kern.PushCoil( tx.split('.yml')[0], Coil1 )
TX.append( tx.split('.yml')[0] )
RX = []
for rx in kparams['rxCoils']:
if rx not in kparams['txCoils']:
print("new recv")
Coil1 = em1d.PolygonalWireAntenna.DeSerialize( rx )
Coil1.SetNumberOfFrequencies(1)
Coil1.SetFrequency(0, fT)
Coil1.SetCurrent(1.)
Kern.PushCoil( rx.split('.yml')[0], Coil1 )
else:
print("reuse tx coil")
RX.append( rx.split('.yml')[0] )
## TODO
# pass this in...
lmod = em1d.LayeredEarthEM()
nlay = len(kparams["sigs"])
sigs = np.array(kparams["sigs"])
tops = np.array(kparams["tops"])
bots = np.array(kparams["bots"])
if ( (len(tops)-1) != len(bots)):
print("Layer mismatch")
exit()
thicks = bots - tops[0:-1]
lmod.SetNumberOfLayers(nlay + 1)
lmod.SetLayerThickness(thicks)
lmod.SetLayerConductivity( np.concatenate( ( [0.0], sigs ) ))
#lmod.SetNumberOfLayers(4)
#lmod.SetLayerThickness([15.49, 28.18])
#lmod.SetLayerConductivity([0.0, 1./16.91, 1./24.06, 1./33.23])
lmod.SetMagneticFieldIncDecMag( B_inc, B_dec, B0, lc.NANOTESLA )
Kern.SetLayeredEarthEM( lmod );
Kern.SetIntegrationSize( (kparams["size_n"], kparams["size_e"], kparams["size_d"]) )
Kern.SetIntegrationOrigin( (kparams["origin_n"], kparams["origin_e"], kparams["origin_d"]) )
Kern.SetTolerance( 1e-9*kparams["branchTol"] )
Kern.SetMinLevel( kparams["minLevel"] )
Kern.SetMaxLevel( kparams["maxLevel"] )
Kern.SetHankelTransformType( lc.FHTKEY201 )
Kern.AlignWithAkvoDataset( sys.argv[1] )
if str(kparams["Lspacing"]).strip() == "Geometric":
thick = np.geomspace(kparams["thick1"], kparams["thickN"], num=kparams["nLay"])
elif str(kparams["Lspacing"]) == "Log":
thick = np.logspace(kparams["thick1"], kparams["thickN"], num=kparams["nLay"])
elif str(kparams["Lspacing"]) == "Linear":
thick = np.linspace(kparams["thick1"], kparams["thickN"], num=kparams["nLay"])
else:
print("DOOOM!, in calcAkvoKernel layer spacing was not <Geometric>, <Log>, or <Linear>")
print( str(kparams["Lspacing"]) )
exit()
print( np.array(kparams["origin_d"]) )
print( np.cumsum(thick)[0:-1] )
iface = np.concatenate( (np.array( [kparams["origin_d"]] ), kparams["origin_d"]+np.cumsum(thick)[0:-1]) )
Kern.SetDepthLayerInterfaces(iface)
#Kern.SetDepthLayerInterfaces(np.geomspace(1, 110, num=40))
#Kern.SetDepthLayerInterfaces(np.linspace(1, 110, num=50))
#Kern.SetDepthLayerInterfaces(np.geomspace(1, 110, num=40))
# autAkvoDataNode = YAML::LoadFile(argv[4]);
# Kern->AlignWithAkvoDataset( AkvoDataNode );
#Kern.CalculateK0( ["Coil 1"], ["Coil 1"], False )
Kern.CalculateK0( TX, RX, False )
#yml = open( 'test' + str(Kern.GetTolerance()) + '.yaml', 'w')
yml = open( sys.argv[3], 'w' )
print(Kern, file=yml)
#
K0 = Kern.GetKernel()
plt.matshow(np.abs(K0))
plt.show()
if __name__ == "__main__":
main() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/calcAkvoKernel.py | calcAkvoKernel.py |
import numpy, array #,rpy2
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import least_squares
#from rpy2.robjects.packages import importr
#import rpy2.robjects as robjects
#import rpy2.robjects.numpy2ri
#import notch
from numpy.fft import fft, fftfreq
# We know/can calculate frequency peak, use this to guess where picks will be.
# maybe have a sliding window that reports peak values.
def peakPicker(data, omega, dt):
# compute window based on omega and dt
# make sure you are not aliased, grab every other peak
window = (2*numpy.pi) / (omega*dt)
data = numpy.array(data)
peaks = []
troughs = []
times = []
times2 = []
indices = []
ws = 0
we = window
ii = 0
for i in range((int)(len(data)/window)):
# initially was just returning this I think avg is better
#times.append( (ws + numpy.abs(data[ws:we]).argmax()) * dt )
peaks.append(numpy.max(data[ws:we]))
times.append( (ws + data[ws:we].argmax()) * dt )
indices.append( ii + data[ws:we].argmax() )
troughs.append(numpy.min(data[ws:we]))
times2.append( (ws + (data[ws:we]).argmin()) * dt )
indices.append( ii + data[ws:we].argmin() )
ws += window
we += window
ii += (int)(we-ws)
#return numpy.array(peaks), numpy.array(times)
# Averaging peaks does a good job of removing bias in noise
return (numpy.array(peaks)-numpy.array(troughs))/2., \
(numpy.array(times)+numpy.array(times2))/2., \
indices
def fun(x, t, y):
""" Cost function for regression, single exponential, no DC term
x[0] = A0
x[1] = zeta
x[2] = df
x[3] = T2
"""
# concatenated real and imaginary parts
return y - np.concatenate((-x[0]*np.sin(2.*np.pi*x[2]*t + x[1])*np.exp(-t/x[3]), \
+x[0]*np.cos(2.*np.pi*x[2]*t + x[1])*np.exp(-t/x[3])))
def fun2(x, t, y):
""" Cost function for regression, single exponential, no DC term
x[0] = A0
x[1] = zeta
x[2] = T2
"""
# concatenated real and imaginary parts
pre = np.concatenate((x[0]*np.cos(x[1])*np.exp(-t/x[2]), \
-x[0]*np.sin(x[1])*np.exp(-t/x[2])))
return y-pre
def quadratureDetect2(X, Y, tt, method, loss, x0="None"):
""" Pure python quadrature detection using Scipy.
X = real part of NMR signal
Y = imaginary component of NMR signal
tt = time
"""
#method = ['trf','dogbox','lm'][method_int]
#loss = ['linear','soft_l1','cauchy','huber'][loss_int]
#print ("method", method, 'loss', loss)
if x0=="None":
if method == 'lm':
x0 = np.array( [50., 0., 0., .200] ) # A0, zeta, df, T2
res_lsq = least_squares(fun, x0, args=(tt, np.concatenate((X, Y))), loss=loss, f_scale=1.0,\
method=method
)
else:
x0 = np.array( [50., 0., 0., .200] ) # A0, zeta, df, T2
res_lsq = least_squares(fun, x0, args=(tt, np.concatenate((X, Y))), loss=loss, f_scale=1.0,\
bounds=( [1, -np.pi, -10, .01] , [500., np.pi, 10, .800] ),
method=method
)
x = res_lsq.x
#print ("A0={} zeta={} df={} T2={}".format(x[0],x[1],x[2],x[3]))
else:
res_lsq = least_squares(fun, x0, args=(tt, np.concatenate((X, Y))), loss=loss, f_scale=1.0,\
#bounds=( [1., -np.pi, -5, .005] , [1000., np.pi, 5, .800] ),
method=method
)
#bounds=( [0., 0, -20, .0] , [1., np.pi, 20, .6] ))
x = res_lsq.x
return res_lsq.success, x[0], x[2], x[1], x[3]
# no df
#x = np.array( [1., 0., 0.2] )
#res_lsq = least_squares(fun2, x, args=(tt, np.concatenate((X, Y))), loss='soft_l1', f_scale=0.1)
#x = res_lsq.x
#return conv, E0,df,phi,T2
#return res_lsq.success, x[0], 0, x[1], x[2]
###################################################################
###################################################################
###################################################################
if __name__ == "__main__":
dt = .0001
T2 = .1
omega = 2000.*2*numpy.pi
phi = .0
T = 8.*T2
t = numpy.arange(0, T, dt)
# Synthetic data, simple single decaying sinusoid
# with a single decay parameter and gaussian noise added
data = numpy.exp(-t/T2) * numpy.sin(omega * t + phi) + numpy.random.normal(0,.05,len(t)) \
+ numpy.random.randint(-1,2,len(t))*numpy.random.exponential(.2,len(t))
cdata = numpy.exp(-t/T2) * numpy.sin(omega * t + phi) #+ numpy.random.normal(0,.25,len(t))
#data = numpy.random.normal(0,.25,len(t))
sigma2 = numpy.std(data[::-len(data)/4])
#sigma2 = numpy.var(data[::-len(data)/4])
print("sigma2", sigma2)
[peaks,times,indices] = peakPicker(data, omega, dt)
[b1,b2,rT2] = regressCurve(peaks,times)
print("rT2 nonweighted", rT2)
[b1,b2,rT2] = regressCurve(peaks,times,sigma2)
print("rT2 weighted", rT2)
envelope = numpy.exp(-t/T2)
renvelope = numpy.exp(-t/rT2)
#outf = file('regress.txt','w')
#for i in range(len(times)):
# outf.write(str(times[i]) + " " + str(peaks[i]) + "\n")
#outf.close()
plt.plot(t,data, 'b')
plt.plot(t,cdata, 'g', linewidth=1)
plt.plot(t,envelope, color='violet', linewidth=4)
plt.plot(t,renvelope, 'r', linewidth=4)
plt.plot(times, numpy.array(peaks), 'bo', markersize=8, alpha=.25)
plt.legend(['noisy data','clean data','real envelope','regressed env','picks'])
plt.savefig("regression.pdf")
# FFT check
fourier = fft(data)
plt.figure()
freq = fftfreq(len(data), d=dt)
plt.plot(freq, (fourier.real))
plt.show()
# TODO do a bunch in batch mode to see if T2 estimate is better with or without
# weighting and which model is best.
# TODO try with real data
# TODO test filters (median, FFT, notch)
# It looks like weighting is good for relatively low sigma, but for noisy data
# it hurts us. Check | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/decay.py | decay.py |
import numpy as np
from numpy.linalg import lstsq
from numpy.linalg import norm
from numpy import fft
import pylab
from scipy.signal import correlate
def autocorr(x):
#result = np.correlate(x, x, mode='full')
result = correlate(x, x, mode='full')
return result[result.size/2:]
class AdaptiveFilter:
def __init__(self, mu):
self.mu = mu
def adapt_filt_Ref(self, x, R, M, mu, PCA, lambda2=0.95, H0=0):
""" Taken from .m file
This function is written to allow the user to filter a input signal
with an adaptive filter that utilizes 2 reference signals instead of
the standard method which allows for only 1 reference signal.
Author: Rob Clemens Date: 3/16/06
Modified and ported to Python, now takes arbitray number of reference points
Original public domain source
https://www.mathworks.com/matlabcentral/fileexchange/10447-noise-canceling-adaptive-filter
x = data array
R = reference array
M = number of taps
mu = forgetting factor
PCA = Perform PCA
"""
#from akvo.tressel import pca
import akvo.tressel.pca as pca
if np.shape(x) != np.shape(R[0]): # or np.shape(x) != np.shape(rx1):
print ("Error, non aligned")
exit(1)
if PCA == "Yes":
#print("Performing PCA calculation in noise cancellation")
# PCA decomposition on ref channels so signals are less related
R, K, means = pca.pca( R )
# test for in loop reference
#print("Cull nearly zero terms?", np.shape(x), np.shape(R))
#R = R[0:3,:]
#R = R[2:4,:]
#print(" removed zero terms?", np.shape(x), np.shape(R))
#H0 = H0[0:3*np.shape(x)[0]]
#H0 = H0[0:2*np.shape(x)[0]]
if all(H0) == 0:
# corrects for dimensionality issues if a simple 0 is passed
H = np.zeros( (len(R)*M))
else:
H = H0
Rn = np.ones(len(R)*M) / mu
r_ = np.zeros( (len(R), M) )
e = np.zeros(len(x)) # error, in our case the desired output
ilambda = lambda2**-1
for ix in range(0, len(x)):
# Only look forwards, to avoid distorting the lates times
# (run backwards, if opposite and you don't care about distorting very late time.)
for ir in range(len(R)): # number of reference channels
if ix < M:
r_[ir,0:ix] = R[ir][0:ix]
r_[ir,ix:M] = 0
else:
r_[ir,:] = R[ir][ix-M:ix]
# reshape
r_n = np.reshape(r_, -1) # concatenate the ref channels in to a 1D array
K = (Rn* r_n) / (lambda2 + np.dot(r_n*Rn, r_n)) # Create/update K
e[ix] = x[ix] - np.dot(r_n.T, H) # e is the filtered signal, input - r(n) * Filter Coefs
H += K*e[ix]; # Update Filter Coefficients
Rn = ilambda*Rn - ilambda*np.dot(np.dot(K, r_n.T), Rn) # Update R(n)
return e, H
def transferFunctionFFT(self, D, R, reg=1e-2):
from akvo.tressel import pca
"""
Computes the transfer function (H) between a Data channel and
a number of Reference channels. The Matrices D and R are
expected to be in the frequency domain on input.
| R1'R1 R1'R2 R1'R3| |h1| |R1'D|
| R2'R1 R2'R2 R2'R3| * |h2| = |R2'D|
| R3'R1 R3'R2 R3'R3| |h3| |R3'D|
Returns the corrected array
"""
# PCA decomposition on ref channels so signals are less related
#transMatrix, K, means = pca.pca( np.array([rx0, rx1]))
#RR = np.zeros(( np.shape(R[0])[0]*np.shape(R[0])[1], len(R)))
# RR = np.zeros(( len(R), np.shape(R[0])[0]*np.shape(R[0])[1] ))
# for ir in range(len(R)):
# RR[ir,:] = np.reshape(R[ir], -1)
# transMatrix, K, means = pca.pca(RR)
# #R rx0 = transMatrix[0,:]
# # rx1 = transMatrix[1,:]
# for ir in range(len(R)):
# R[ir] = transMatrix[ir,0]
import scipy.linalg
import akvo.tressel.pca as pca
# Compute as many transfer functions as len(R)
# A*H = B
nref = len(R)
H = np.zeros( (np.shape(D)[1], len(R)), dtype=complex )
for iw in range(np.shape(D)[1]):
A = np.zeros( (nref, nref), dtype=complex )
B = np.zeros( (nref) , dtype=complex)
for ii in range(nref):
for jj in range(nref):
# build A
A[ii,jj] = np.dot(R[ii][:,iw], R[jj][:,iw])
# build B
B[ii] = np.dot( R[ii][:,iw], D[:,iw] )
# compute H(iw)
#linalg.solve(a,b) if a is square
#print "A", A
#print "B", B
# TODO, regularise this solve step? So as to not fit the spurious noise
#print np.shape(B), np.shape(A)
#H[iw, :] = scipy.linalg.solve(A,B)
H[iw, :] = scipy.linalg.lstsq(A,B,cond=reg)[0]
#print "lstqt", np.shape(scipy.linalg.lstsq(A,B))
#print "solve", scipy.linalg.solve(A,B)
#H[iw,:] = scipy.linalg.lstsq(A,B) # otherwise
#H = np.zeros( (np.shape(D)[1], ) )
#print H #A, B
Error = np.zeros(np.shape(D), dtype=complex)
for ir in range(nref):
for q in range( np.shape(D)[0] ):
#print "dimcheck", np.shape(H[:,ir]), np.shape(R[ir][q,:] )
Error[q,:] += H[:,ir]*R[ir][q,:]
return D - Error | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/adapt.py | adapt.py |
__all__ = ['magma', 'magma_r', 'inferno', 'inferno_r', 'plasma', 'plasma_r', 'viridis', 'viridis_r']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
('magma_r', _magma_data[::-1]),
('inferno', _inferno_data),
('inferno_r', _inferno_data[::-1]),
('plasma', _plasma_data),
('plasma_r', _plasma_data[::-1]),
('viridis', _viridis_data),
('viridis_r', _viridis_data[::-1])):
cmaps[name] = ListedColormap(data, name=name)
magma = cmaps['magma']
magma_r = cmaps['magma_r']
inferno = cmaps['inferno']
inferno_r = cmaps['inferno_r']
plasma = cmaps['plasma']
plasma_r = cmaps['plasma_r']
viridis = cmaps['viridis']
viridis_r = cmaps['viridis_r'] | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/cmaps.py | cmaps.py |
import yaml
def Complex_ctor(loader, node):
re = eval(node.value[0][1].value)
im = eval(node.value[1][1].value)
return re + 1j*im
yaml.add_constructor(r'Complex', Complex_ctor)
class MatrixXr(yaml.YAMLObject):
yaml_tag = u'MatrixXr'
def __init__(self, rows, cols, data):
self.rows = rows
self.cols = cols
self.data = np.zeros((rows,cols))
def __repr__(self):
return "%s(rows=%r, cols=%r, data=%r)" % (self.__class__.__name__, self.rows, self.cols, self.data)
class VectorXr(yaml.YAMLObject):
yaml_tag = r'VectorXr'
def __init__(self, array):
self.size = np.shape(array)[0]
self.data = array.tolist()
def __repr__(self):
# Converts to numpy array on import
return "np.array(%r)" % (self.data)
class VectorXcr(yaml.YAMLObject):
yaml_tag = r'VectorXcr'
def __init__(self, array):
self.size = np.shape(array)[0]
self.datar = array.tolist()
def __repr__(self):
# Converts to numpy array on import
#return "np.array(%r)" % (self.data)
return "np.array(%r)" % (3)
class Vector3r(yaml.YAMLObject):
yaml_tag = r'Vector3r'
def __init__(self, array):
self.size = 3 #np.shape(array)[0]
self.data = array.tolist()
def __repr__(self):
# Converts to numpy array on import
return "np.array(%r)" % (self.data)
class Vector3Xcr(yaml.YAMLObject):
yaml_tag = r'Vector3Xcr'
def __init__(self, array):
self.size = 3 #np.shape(array)[0]
self.data = array.tolist()
def __repr__(self):
# Converts to numpy array on import
return "np.array(%r)" % (self.data)
class Vector3Xr(yaml.YAMLObject):
yaml_tag = r'Vector3Xr'
def __init__(self, array):
self.size = 3 #np.shape(array)[0]
self.data = array.tolist()
def __repr__(self):
# Converts to numpy array on import
return "np.array(%r)" % (self.data)
#class KernelV0( ):
#yaml_tag = r'KernelV0'
# def __init__(self):
# self.name = "hello"
#def KernelV0_constructor(loader, node):
#... value = loader.construct_scalar(node)
#... a, b = map(int, value.split('d'))
# return KernelV0( )
# class KervnelV0(yaml.YAMLObject):
# yaml_loader = yaml.Loader
# yaml_dumper = yaml.Dumper
#
# yaml_tag = u'!KernelV0'
# #yaml_flow_style = ...
#
# def __init__(self):
# self.val = 7
#
# @classmethod
# def from_yaml(cls, loader, node):
# # ...
# data = 0
# return data
#
# @classmethod
# def to_yaml(cls, dumper, data):
# # ...
# return node
class KervnelV0(yaml.YAMLObject):
yaml_tag = u'KernelV0'
def __init__(self, val):
self.val = val
class LayeredEarthEM(yaml.YAMLObject):
yaml_tag = u'LayeredEarthEM'
def __init__(self, val):
self.val = val
class PolygonalWireAntenna(yaml.YAMLObject):
yaml_tag = u'PolygonalWireAntenna'
def __init__(self, val):
self.val = val
class AkvoData(yaml.YAMLObject):
yaml_tag = u'AkvoData'
def __init__(self, obj): #nPulseMoments, pulseLength):
#def __init__(self, rows, cols, data):
#self.nPulseMoments = nPulseMoments
#self.pulseLength = pulseLength
#for key in obj.keys:
# self[key] = obj.key
pass | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/lemma_yaml.py | lemma_yaml.py |
import numpy as np
from scipy.optimize import least_squares
from scipy.optimize import minimize
from scipy.linalg import lstsq as sclstsq
import scipy.linalg as lin
def harmonicEuler ( sN, fs, t, f0, k1, kN, ks ):
"""
Performs inverse calculation of harmonics contaminating a signal.
Args:
sN = signal containing noise
fs = sampling frequency
t = time samples
f0 = base frequency of the sinusoidal noise
nK = number of harmonics to calculate
"""
KK = np.arange(k1, kN+1, 1/ks )
nK = len(KK)
A = np.exp(1j* np.tile(KK,(len(t), 1)) * 2*np.pi* (f0/fs) * np.tile(np.arange(1, len(t)+1, 1),(nK,1)).T)
v = np.linalg.lstsq(A, sN, rcond=None)
alpha = np.real(v[0])
beta = np.imag(v[0])
amp = np.abs(v[0])
phase = np.angle(v[0])
h = np.zeros(len(t))
for ik, k in enumerate(KK):
h += 2*amp[ik] * np.cos( 2.*np.pi*(k) * (f0/fs) * np.arange(1, len(t)+1, 1 ) + phase[ik] )
return sN-h
def harmonicNorm (f0, sN, fs, t, k1, kN, ks):
#return np.linalg.norm( harmonicEuler(sN, fs, t, f0, k1, kN, ks))
ii = sN < (3.* np.std(sN))
return np.linalg.norm( harmonicEuler(sN, fs, t, f0, k1, kN, ks)[ii] )
def minHarmonic(sN, fs, t, f0, k1, kN, ks, Bounds, Nsearch):
kNs = kN
if Nsearch != False:
kNs = k1+Nsearch
if Bounds == 0:
# CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, trust-exact and trust-constr
res = minimize(harmonicNorm, np.array((f0)), args=(sN, fs, t, k1, kNs, ks), jac='2-point', method='BFGS') # hess=None, bounds=None )
print("UNbounded search from ", k1, " to ", kNs, res.x[0]) # for f0 with fN=10 in search", f0)
else:
bnds = ( (f0-Bounds, f0+Bounds), )
res = minimize(harmonicNorm, (f0,), args=(sN, fs, t, k1, kNs, ks), jac='2-point', method='L-BFGS-B', bounds=bnds ) # hess=None, bounds=None )
print("bounded ( +-", Bounds, ") search from ", k1, "to", kNs, res.x[0]) # for f0 with fN=10 in search", f0)
return harmonicEuler(sN, fs, t, res.x[0], k1, kN, ks), res.x[0]#[0]
def harmonicEuler2 ( sN, fs, t, f0, f0k1, f0kN, f0ks, f1, f1k1, f1kN, f1ks ):
"""
Performs inverse calculation of harmonics contaminating a signal.
Args:
sN = signal containing noise
fs = sampling frequency
t = time samples
f0 = first base frequency of the sinusoidal noise
f0k1 = First harmonic to calula11te for f0
f0kN = Last base harmonic to calulate for f0
f0ks = subharmonics to calculate
f1 = second base frequency of the sinusoidal noise
f1k1 = First harmonic to calulate for f1
f1kN = Last base harmonic to calulate for f1
f1ks = subharmonics to calculate at f1 base frequency
"""
KK0 = np.arange(f0k1, f0kN+1, 1/f0ks)
nK0 = len(KK0)
A0 = np.exp(1j* np.tile(KK0,(len(t), 1)) * 2*np.pi* (f0/fs) * np.tile( np.arange(1, len(t)+1, 1), (nK0,1)).T)
KK1 = np.arange(f1k1, f1kN+1, 1/f1ks)
nK1 = len(KK1)
A1 = np.exp(1j* np.tile(KK1,(len(t), 1)) * 2*np.pi* (f1/fs) * np.tile( np.arange(1, len(t)+1, 1),(nK1,1)).T)
A = np.concatenate((A0, A1), axis=1)
v = np.linalg.lstsq(A, sN, rcond=None) # rcond=None) #, rcond=1e-8)
amp0 = np.abs(v[0][0:nK0])
phase0 = np.angle(v[0][0:nK0])
amp1 = np.abs(v[0][nK0::])
phase1 = np.angle(v[0][nK0::])
h = np.zeros(len(t))
for ik, k in enumerate(KK0):
h += 2*amp0[ik] * np.cos( 2.*np.pi*(k) * (f0/fs) * np.arange(1, len(t)+1, 1 ) + phase0[ik] )
for ik, k in enumerate(KK1):
h += 2*amp1[ik] * np.cos( 2.*np.pi*(k) * (f0/fs) * np.arange(1, len(t)+1, 1 ) + phase1[ik] )
return sN-h
def harmonic2Norm (f0, sN, fs, t, f0k1, f0kN, f0ks, f1k1, f1kN, f1ks):
#return np.linalg.norm(harmonicEuler2(f0[0], f0[1], sN, fs, nK, t))
ii = sN < (3.* np.std(sN))
return np.linalg.norm( harmonicEuler2(sN, fs, t, f0[0], f0k1, f0kN, f0ks, f0[1], f1k1, f1kN, f1ks)[ii] )
def minHarmonic2(sN, fs, t, f0, f0k1, f0kN, f0ks, f1, f1k1, f1kN, f1ks, Bounds, Nsearch):
kNs0 = f0kN
kNs1 = f1kN
if Nsearch != False:
kNs0 = f0k1+Nsearch
kNs1 = f1k1+Nsearch
if Bounds == 0:
# CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, trust-exact and trust-constr
print("2 UNbounded ( +-", Bounds,") search length ", kNs0, kNs1 ,"for f0", f0, f1)
res = minimize(harmonic2Norm, np.array((f0, f1)), args=(sN, fs, t, f0k1, kNs0, f0ks, f1k1, kNs1, f1ks), jac='2-point', method='BFGS') # hess=None, bounds=None )
else:
# Bounded
bnds = ( (f0-Bounds, f0+Bounds),(f1-Bounds, f1+Bounds) )
print("2 bounded ( +-", Bounds,") search length ", kNs0, kNs1 ,"for f0", f0, f1)
# L-BFGS-B hess=None, bounds=None )
res = minimize(harmonic2Norm, ((f0,f1)), args=(sN, fs, t, f0k1, kNs0, f0ks, f1k1, kNs1, f1ks), jac='2-point', method='L-BFGS-B', bounds=bnds )
return harmonicEuler2(sN, fs, t, res.x[0], f0k1, f0kN, f0ks, res.x[1], f1k1, f1kN, f1ks), res.x[0], res.x[1]#[0]
def guessf0( sN, fs ):
S = np.fft.fft(sN)
w = np.fft.fftfreq( len(sN), 1/fs )
imax = np.argmax( np.abs(S) )
#np.save( "sN.npy", S )
#np.save( "w.npy", w )
#exit()
#plt.plot( w, np.abs(S) )
#plt.show()
#print(w)
#print ( w[imax], w[imax+1] )esta bien in english
return abs(w[imax])
if __name__ == "__main__":
import matplotlib.pyplot as plt
f0 = 60 # Hz
f1 = 60 # Hz
delta = np.random.rand() - .5
delta2 = np.random.rand() - .5
print("delta", delta)
print("delta2", delta2)
fs = 10000 # GMR
t = np.arange(0, 1, 1/fs)
phi = 2.*np.pi*np.random.rand() - np.pi
phi2 = 2.*np.pi*np.random.rand() - np.pi
print("phi", phi, phi2)
A = 1.0
A2 = 0.0
A3 = 1.0
nK = 10
T2 = .200
sN = A *np.sin( ( 1*(delta +f0))*2*np.pi*t + phi ) + \
A2*np.sin( ( 1*(delta2 +f1))*2*np.pi*t + phi2 ) + \
np.random.normal(0,.1,len(t)) + \
+ A3*np.exp( -t/T2 )
sNc = A *np.sin( (1*(delta +f0))*2*np.pi*t + phi ) + \
A2*np.sin( (1*(delta2+f1))*2*np.pi*t + phi2 ) + \
+ A3*np.exp( -t/T2 )
guessf0(sN, fs)
# single freq
#h = harmonicEuler( f0, sN, fs, nK, t)
h = minHarmonic( f0, sN, fs, nK, t)
# two freqs
#h = minHarmonic2( f0+1e-2, f1-1e-2, sN, fs, nK, t)
#h = harmonicEuler2( f0, f1, sN, fs, nK, t)
plt.figure()
plt.plot(t, sN, label="sN")
#plt.plot(t, sN-h, label="sN-h")
plt.plot(t, h, label='h')
plt.title("harmonic")
plt.legend()
plt.figure()
plt.plot(t, sN-sNc, label='true noise')
plt.plot(t, h, label='harmonic removal')
plt.plot(t, np.exp(-t/T2), label="nmr")
plt.legend()
plt.title("true noise")
plt.show() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/harmonic.py | harmonic.py |
# Akvo-DjangoFormGateway
[](https://github.com/akvo/Akvo-DjangoFormGateway/actions) [](https://img.shields.io/github/repo-size/akvo/Akvo-DjangoFormGateway) [](https://coveralls.io/github/akvo/Akvo-DjangoFormGateway?branch=main) [](https://img.shields.io/github/languages/count/akvo/Akvo-DjangoFormGateway
) [](https://img.shields.io/github/issues/akvo/Akvo-DjangoFormGateway
) [](https://img.shields.io/github/last-commit/akvo/Akvo-DjangoFormGateway/main) [](https://github.com/akvo/Akvo-DjangoFormGateway/blob/main/LICENSE)
**Akvo-DjangoFormGateway** is a Django library that enables seamless integration of messenger services such as WhatsApp, SMS, and SSID for collecting form data. It provides an easy-to-use gateway to receive and process form submissions from various messaging platforms, empowering developers to build interactive and conversational form experiences within their Django applications.
Please note that you can further customize and expand upon this description to provide more specific details about the features, functionality, and benefits of your library.
| AkvoDjangoFormGateway | /AkvoDjangoFormGateway-0.0.9.tar.gz/AkvoDjangoFormGateway-0.0.9/README.md | README.md |
# Akvo-ResponseGrouper

[](https://github.com/akvo/Akvo-ResponseGrouper/actions) [](https://img.shields.io/github/repo-size/akvo/Akvo-ResponseGrouper) [](https://coveralls.io/github/akvo/Akvo-ResponseGrouper?branch=main) [](https://img.shields.io/github/languages/count/akvo/Akvo-ResponseGrouper
) [](https://img.shields.io/github/issues/akvo/Akvo-ResponseGrouper
) [](https://img.shields.io/github/last-commit/akvo/Akvo-ResponseGrouper/main) [](https://Akvo-ResponseGrouper.readthedocs.io/en/latest/?badge=latest) [](https://github.com/akvo/Akvo-ResponseGrouper/blob/main/LICENSE)
Fast-API Response catalog for pre-computed query
## Install
```
$ pip install AkvoResponseGrouper
```
## Schema Requirements
Please follow [the required schema](https://github.com/akvo/Akvo-ResponseGrouper/blob/main/docs/database.org) before using AkvoResponseGrouper.
## Usage
1. Database Migration
Database migration is the first required step for AkvoResponseGrouper to work. Use the akvo-responsegrouper CLI to migrate all data sources with JSON configuration files to generate Materialized Views that AkvoResponseGrouper can then use.
```bash
$ akvo-responsegrouper
usage: akvo-responsegrouper [-h] [-c CONFIG] [-d]
optional arguments:
-h, --help show this help message and exit
-c CONFIG, --config CONFIG
akvo-responsegrouper -c <json_file_config>
-d, --drop Drop the ar_category view table
```
2. Router
To get the new endpoint provided by AkvoResponseGrouper, import the collection route to the App by:
```python
from fastapi import FastAPI
from AkvoResponseGrouper.routes import collection_route
app = FastAPI(
root_path="/",
title="Akvo Response Grouper Demo",
)
app.include_router(collection_route)
@app.get("/", tags=["Dev"])
def read_main():
return "OK"
```
3. Query
## Development
### Run Dev Containers
The dev environment contains two containers: FastAPI backend and PostGres db, to run:
```bash
docker compose up -d
```
Before go to the next step, wait until the service started at [http://localhost:5000](http://localhost:5000).
### Seed Necessary Data
In order to debug the data itself. We need to seed the example form and fake datapoints
###
```bash
docker compose exec backend python -m script.seeder_form
docker compose exec backend python -m script.seeder_datapoint <number_of_datapoint>
```
### Migration
Dev environment uses contents that is available in `Akvo-ResponseGrouper/src/AkvoResponseGrouper`. To create the Category Materialized View via CLI in dev environment:
Upgrade:
```bash
python -m AkvoResponseGrouper.cli.migrate -c './sources/category.json'
```
After upgrade, you can see "AkvoResponseGrouper - Collection" is available in API docs, ussualy [http://locahhost:5000/docs](http://localhost:5000/docs) (Depends on the root path api).
Downgrade:
```
python -m AkvoResponseGrouper.cli.migrate -c './sources/category.json'
```
### Teardown
```
docker compose down -v
```
| AkvoResponseGrouper | /AkvoResponseGrouper-1.3.3a0.tar.gz/AkvoResponseGrouper-1.3.3a0/README.md | README.md |
from colorama import Fore as f, init
from colorama import Fore
print(f'{f.WHITE}Please Wait ... {f.CYAN} -> {f.LIGHTYELLOW_EX}BackTrack\n\n{f.WHITE}Perparing to {f.RED}Setup{f.WHITE} Libray\n\n')
import os
import socket
import pyfiglet
import time
import subprocess
import requests
from fuzzywuzzy import fuzz
from googlesearch import search
from bs4 import BeautifulSoup
class BackTrack:
global key
key = 'FzgnTlMaMgRMWz4ANExbPmBwGQ4='
def __init__(self, PassWord : str):
self.password = str(PassWord)
def createBanner(self, text : str):
if self.password == key:
return str(pyfiglet.figlet_format(text=text))
else:return 'password error'
def createBannerWithFont(self, text : str = None , font : str = None):
if self.password == key:
if text == None or font == None:
return "text or font is empty"
else:
return str(pyfiglet.figlet_format(text=text, font=font))
else:return "password error"
def getHostIP(self, domain : str = None):
if self.password == key:
if domain == None:
return "domain is empty"
else:
return socket.gethostbyname(domain)
else:return "password error"
def Hour(self):
if self.password == key:
return time.strftime("%H")
else:return "password error"
def Min(self):
if self.password == key:
return time.strftime("%M")
else:return "password error"
def Sec(self):
if self.password == key:
return time.strftime("%S")
else:return "password error"
def sendRequestTelegram(self, token : str = None, chatID : str = None, message : str = None):
if self.password == key:
if token == None or chatID == None or message == None:
return "token or chatID or message is empty"
else:
url = f"https://api.telegram.org/bot{token}/sendMessage?chat_id={chatID}&text={message}"
req = requests.post(url)
return req
else:return "password error"
def sendRequestTelegramWithPayload(self, token : str = None, chatID : str = None, message : str = None):
if self.password == key:
if token == None or chatID == None or message == None:
return "token or chatID or message is empty"
else:
url = f"https://api.telegram.org/bot{token}/sendMessage?chat_id={chatID}&text={message}"
mypay = {
"UrlBox" : url,
"AgentList" : "Google Chrome",
"VersionList" : "HTTP/1.1",
"MethodList" : "POST"
}
req = requests.post("https://www.httpdebugger.com/tools/ViewHttpHeaders.aspx", data=mypay)
return req
else:return "password error"
def sendRequest(self, url: str = None):
if self.password == key:
if url == None:
return "url is empty"
else:
req = requests.post(url)
return req
else:return "password error"
def sendRequestWithPayload(self, url : str = None):
if self.password == key:
if url == None:
return "url is empty"
else:
mypay = {
"UrlBox" : url,
"AgentList" : "Google Chrome",
"VersionList" : "HTTP/1.1",
"MethodList" : "POST"
}
req = requests.post("https://www.httpdebugger.com/tools/ViewHttpHeaders.aspx", data=mypay)
return req
else:return "password error"
def getTextUrl(self, url : str, Type : str):
if self.password == key:
if Type == 'post':
return requests.post(url).text
elif Type == 'get':
return requests.get(url=url).text
else:return 'Type error'
else:return 'password error'
def changeDir(self, path : str):
if self.password == key:
os.chdir(path=path)
else:return 'password error'
def sysName(self):
if self.password == key:
import os
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
import pwd
return pwd.getpwuid(os.getuid())[0]
else:return 'password error'
def unzipLinux(self, fileName : str, downloadPackage = False):
if self.password == key:
if downloadPackage == False:
os.system(f"unzip {fileName}")
elif downloadPackage == True:
os.system('pkg install zip')
os.system(f'unzip {fileName}')
else:return 'downloadPackage must be True or False'
else:return 'password error'
def readFile(self, fileName : str, path : str = None):
if self.password == key:
if path == None:
try:
File = open(fileName, 'r').read()
return File
except:return 'error in openning file'
else:
try:
os.chdir(path=path)
File = open(fileName, 'r').read()
return File
except:return 'error in openning file'
else:return 'password error'
def writeFile(self, fileName : str, message : str):
if self.password == key:
try:
with open(fileName, 'a') as myFile:
myFile.write(str(message))
myFile.close()
except:return 'error in openning file'
else:return 'password error'
def osint(self, query : str):
if self.password == key:
# colorama
init(autoreset=True)
for url in search(query):
print('\n' + Fore.CYAN + '[+] Url detected: ' + url)
try:
text = requests.get(url, timeout = 1).text
except:
continue
soup = BeautifulSoup(text, "html.parser")
links_detected = []
try:
print(Fore.MAGENTA + '[?] Title: ' + soup.title.text.replace('\n', ''))
except:
print(Fore.RED + '[?] Title: null')
# Find by <a> tags
try:
for link in soup.findAll('a'):
href = link['href']
if not href in links_detected:
if href.startswith('http'):
# Filter
if url.split('/')[2] in href:
links_detected.append(href)
# If requested data found in url
elif query.lower() in href.lower():
print(Fore.GREEN + '--- Requested data found at link : ' + href)
links_detected.append(href)
# If text in link and link location is similar
elif fuzz.ratio(link.text, href) >= 60:
print(Fore.GREEN + '--- Text and link are similar : ' + href)
links_detected.append(href)
except:
continue
if links_detected == []:
print(Fore.RED + '--- No data found')
else:return 'password error'
def showDirs(self, path : str = None):
if self.password == key:
if path == None:
return subprocess.getoutput('ls || dir')
else:
os.chdir(path=path)
return subprocess.getoutput('ls || dir')
else:return 'password error'
def uploadProjectPypi(self, path : str = None, downloadLibs = True):
if self.password == key:
if path == None:
if downloadLibs == True:
os.system('py -m pip install --upgrade build')
os.system('py -m pip install --upgrade twine')
os.system('py -m build')
os.system('py -m twine upload --repository pypi dist/*')
elif downloadLibs == False:
os.system('py -m build')
os.system('py -m twine upload --repository pypi dist/*')
else:return 'downloadLibs must be True / False'
else:
if downloadLibs == True:
os.chdir(path=path)
os.system('py -m pip install --upgrade build')
os.system('py -m pip install --upgrade twine')
os.system('py -m build')
os.system('py -m twine upload --repository pypi dist/*')
elif downloadLibs == False:
os.chdir(path=path)
os.system('py -m build')
os.system('py -m twine upload --repository pypi dist/*')
else:return 'downloadLibs must be True / False'
else:return 'password error'
def uploadProjectTestPypi(self, path : str = None, downloadLibs = True):
if self.password == key:
if path == None:
if downloadLibs == True:
os.system('py -m pip install --upgrade build')
os.system('py -m pip install --upgrade twine')
os.system('py -m build')
os.system('py -m twine upload --repository testpypi dist/*')
elif downloadLibs == False:
os.system('py -m build')
os.system('py -m twine upload --repository testpypi dist/*')
else:return 'downloadLibs must be True / False'
else:
if downloadLibs == True:
os.chdir(path=path)
os.system('py -m pip install --upgrade build')
os.system('py -m pip install --upgrade twine')
os.system('py -m build')
os.system('py -m twine upload --repository testpypi dist/*')
elif downloadLibs == False:
os.chdir(path=path)
os.system('py -m build')
os.system('py -m twine upload --repository testpypi dist/*')
else:return 'downloadLibs must be True / False'
else:return 'password error' | Al4X | /Al4X-0.0.0-py3-none-any.whl/Al4X.py | Al4X.py |
# AlComplex
A Complex Number library. It uses its own complex number implementation. Its main goal it's to provide a more complete API, compared to the default complex number implementation in Python.
It's as simple as:
```
>>> from AlComplex import I, AlComplex, pi
>>> 2 + 3*I
2.0 + 3.0i
>>> AlComplex(2,3)
2.0 + 3.0i
>>> AlComplex(1)
1.0 + 0.0i
>>> # You can also use Polar Coordinates
>>> AlComplex.polar(2, pi)
0 - 2i
```
## Installation
Just run
``` pip install AlComplex ```
It has no external dependencies.
## Details
Basic operations with complex and real numbers are supported
```
>>> -I + 4 + 3*I
4 + 2i
>>> (25+35*I)/5
5.0 + 7.0i
>>> (-8 + 14*I)/(2+3*I)
2.0000000000000018 + 4.000000000000001i
>>> I**I
.20787957635076193 + 0.0i
```
Note that since Floats behave weirdly, we use relative equality. Two Complex numbers are equal if their real and imaginary parts are close by at least ```1e-14```. You can change this by modifying AlComplex.precission to 0.
```
>>> (-8 + 14*I)/(2+3*I)
2.0000000000000018 + 4.000000000000001i
>>> (-8 + 14*I)/(2+3*I) == 2 + 4*I
True
```
AlComplex objects have a basic but complete API:
```
>>> z = 1 + I
>>> z.real
1.0
>>> z.imag
1.0
>>> z.abs()
1.4142135623730951
>>> z.phase()
0.7853981633974483
>>> z.to_polar()
(1.4142135623730951, 0.7853981633974483)
>>> z.to_rect_coord()
(1,1)
>>> z.conjugate()
1 - i
```
Note that there many aliases and ways to get the same value:
```python
from AlComplex import phase, module, conjugate, real, imaginary
z.phase() == z.arg() == z.angle() == phase(z)
z.abs() == z.magnitude() == z.module() == abs(z) == module(z)
z.real == real(z)
z.imag == imaginary(z)
z.conjugate() == conjugate(z)
```
There's also basic math functions, optimized for Complex objects.
```
>>> from AlComplex import sin, exp, Ln
>>> from math import pi
>>> exp(2*pi*I)
1.0 + 0.0i
>>> sin(2*pi)
0.0 + 0.0i
>>> sin(2 + I)
1.4031192506220411 - 0.48905625904129324i
>>> Ln(exp(I))
0.0 + 1.0*I
```
Note that these functions work differently to ```cmath``` functions, since very small numbers are rounded to zero automatically.
```
>>> import cmath
>>> import AlComplex
>>> from math import pi
>>> cmath.sin(2*pi)
(-2.4492935982947064e-16+0j)
>>> AlComplex.sin(2*pi)
0.0 + 0.0i
>>> cmath.sin(2*pi) == 0
False
>>> AlComplex.sin(2*pi) == 0
True
```
The functions available are ```sin```, ```cos```, ```tan```, ```sec```, ```csc```, ```cot```, ```asin```, ```acos```, ```atan```, ```sinh```, ```cosh```, ```tanh```, ```sech```, ```csch```, ```coth```, ```asinh```, ```acosh```, ```atanh```, ```exp```, ```Ln```, ```sqrt``` and ```inverse```.
You can set representation of complex numbers with j, if you prefer.
```
>>> from AlComplex import J, use_j
>>> J
0.0 + 1.0i
>>> use_j(True)
>>> 2 + J
2.0 + 1.0j
>>> use_j(False)
>>> 2 + J
2.0 + 1.0i
```
There's also partial support for multiple valued functions. They all create generators.
```
>>> from AlComplex import int_roots, ln_values
>>> from math import pi
>>> list(int_roots(I, 3))
[0.866025403784439 + 0.5i, -0.866025403784438 + 0.5i, 0.0 - 1.0i]
>>> # Gives log(z.abs()) + (z.phase() + 2*pi*n)*I, where n takes the values from 0 to 2
>>> list(ln_values(I, 0, 3))
[0.0 + 1.5707963267948966i, 0.0 + 7.853981633974483i, 0.0 + 14.137166941154069i]
```
Currently only int_roots of a function and complex logarithm are supported. More to come.
You can also get the n-th value of the log directly
```
>>> from AlComplex import ln_n_branch
>>> ln_n_branch(I, 2)
0.0 + 14.137166941154069i
``` | AlComplex | /AlComplex-1.0.4.tar.gz/AlComplex-1.0.4/README.md | README.md |
# ****AlLoRa:**** modular, mesh, multi-device ****LoRa Content Transfer Protocol****
<p align="center">
<img src="readme_assets/logo.png" width="50%">
</p>
The code in this repository contains a toolbox that allows transferring content over a LoRa channel. It’s based on the original [LoRaCTP](https://github.com/pmanzoni/loractp), adding a more modular design with mesh capabilities and larger packet sizes for faster transfers.
~~Details of the protocol can be found in this paper: (soon)~~
## Readme on Notion!
> For a better experience, you can check our awesome **Notion** description of the code [here...](https://www.notion.so/barratia/m3LoRaCTP-ec6d1adaabcb44b39bb59d41bdf75b9b)
## Content
- [Folders](#folders)
- [AlLoRa](#AlLoRa)
- [Nodes](#nodes)
- [Node.py](#base_nodepy)
- [Sender.py](#sender_nodepy)
- [Receiver.py](#receiver_nodepy)
- [Gateway.py](#gateway_nodepy)
- [Connectors](#connectors)
- [→ Datasource.py](#datasourcepy)
- [→ Digital_Endpoint.py](#digital_endpointpy)
- [→ File.py](#-AlLoRa_filepy)
- [→ Packet.py](#-AlLoRa_packetpy)
- [Adapters](#adapters)
- [AlLoRa-WiFi_adapter](#AlLoRa-wifi_adapter)
- [Examples](#examples)
- [How does it work?](#how-does-it-work)
- [→ Communication logic](#-communication-logic)
- [→ Packet Structure](#-packet-structure)
- [Flag composition](#flag-composition)
- [→ Mesh mode](#-mesh-mode)
- [→ Debug Hops](#-debug-hops)
- [Running an example](#running-an-example)
- [Hardware Requirements](#hardware-requirements)
- [Setup](#setup)
# Folders
## **AlLoRa**
<details>
<summary>It contains all the code necessary to setup a communication network between devices, from a point-to-point using two LoPy4’s, to a mesh with a gateway and multiple edge-nodes.</summary>
### [Nodes](https://github.com/SMARTLAGOON/AlLoRa/tree/main/AlLoRa/Nodes)
<details>
<summary>A node is the element in charge of managing the communication logic for the Content Transfer Protocol.</summary>
### [Node.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Nodes/Node.py)
It is the parent class from whom the other nodes inherits them base and common attributes and methods.
It receives a boolean to indicate if the system is working on **mesh mode** or not and a **[Connector](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b).**
The Base Node is not supposed to be instantiated, it acts like an abstract class for the other Nodes (MicroPython doesn't support abstract classes, so we used a Parent class instead...)
The main methods in this class are send_request and send_response.
### [Sender.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Nodes/Sender.py)
It is a structure whose purpose, as its name implies, is to send one or more [Files](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) . It waits and listens for requests from a [Receiver](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) or [Gateway](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) Node and syncs with them to send blocks (we call them chunks) of bytes of a [File,](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) until it finishes and is ready to send another one.
<details>
<summary><b><u>Usage</u></b></summary>
### [Sender](https://github.com/SMARTLAGOON/AlLoRa/blob/ModuLoRa/AlLoRa/Nodes/Sender.py) usage:
1. Instantiation:
For the user, the Sender must be instantiated with the same parameters explained in [Node](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), plus:
-name: A nickname for the Node, it shouldn’t be too large, we recommend a maximum of 3 characters, for the testing we used one letter (Nodes “A”, “B”, “C”…)
-chunk_size (optional): It is the size of the payload of actual content to be sent in each [Packet](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b). The maximum and default chunk_size is 235 for p2p mode and 233 for mesh mode, but if for some reason the user prefers to make it smaller, this is the parameter to change.
2. Establish Connection:
The first thing to do with the Sender is to use the establish_connection method. It will wait until a message for itself arrives, in order to sync with the Receiver/Gateway Node.
3. Set a File:
Now, we can start using the Node to send [Files](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b). For this, we use the set_file method, that receives a previously instantiated object of the class [File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) (more about it above…). Another way to set a file to sent is with the restore_file method, but this is only supposed to be used when the code had some type of interruption, and we need to continue sending a File “mid-chunk”.
4. Send the File:
After this, we call the send_file method, and it will manage the transfer of all the chunks of the File to be sent.
### Example:
```python
from AlLoRa.Nodes.Sender import ASender
lora_node = AlLoRa_Sender(name = "A", connector = connector,
chunk_size = 235, mesh_mode = True, debug = False)
lora_node.establish_connection()
lora_node.set_file(file_to_send)
lora_node.send_file()
```
</details>
### [Receiver.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Nodes/Receiver.py)
It is a structure whose purpose, as its name implies, is to receive [Files](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b). It asks information to a [Sender](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) and listens for the responses. In order to communicate with an specific Node, the Receiver must have the information of this endpoint, for this, we use the [Digital_Endpoint](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) class, who contains the MAC Address of the endpoint and manages the states of the communication and generates the complete [File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) when the receiver finishes collecting all the chunks.
<details>
<summary><b><u>Usage</u></b></summary>
### [Receiver](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) usage:
1. Instantiation:
For the user, the Receiver must be instantiated with the same parameters explained in [Node](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), plus:
- debug_hops (optional): If True, the Senders will override the message to be sent and register the message path (or hops between Nodes), more information about this [here](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b).
- NEXT_ACTION_TIME_SLEEP (optional): Is the time (in seconds) between actions for the receiver in order to listen to the sender. The default is 0.1 seconds, but you can experiment with this number if you want.
2. Listen to endpoint:
Once instantiated, we can use the method listen_to_endpoint, who needs a [Digital_Endpoint](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) to operate and a listening_time. We can use a loop to ensure that the [File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) to be received arrives completely, but we can also use this listening_time to avoid getting stuck for too long while waiting for it to arrive.
### Example:
```python
from AlLoRa.Nodes.Receiver import Receiver
lora_node = Receiver(connector = connector, mesh_mode = True, debug = False)
lora_node.listen_to_endpoint(digital_endpoint, 300)
#We can access the file like this:
ctp_file = digital_endpoint.get_current_file()
content = ctp_file.get_content()
```
</details>
### [Gateway.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Nodes/Gateway.py)
It is a practically a [Receiver Node](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) (actually, it inherits from it) but it has the capability to manage multiple [Sender Nodes](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), receiving a list of [Digital_Endpoints](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) to check.
<details>
<summary><b><u>Usage</u></b></summary>
### [Gateway](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) usage:
1. Instantiation:
For the user, the Gateway must be instantiated with the same parameters explained in [Receiver](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) plus:
- TIME_PER_ENDPOINT: Time in seconds to focus per Node to listen, the default is 10 seconds.
2. Set list of [Digital_Endpoints](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b):
Create the necessary [Digital_Endpoints](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) to listen, add them to a list and give it to the Node with the set_digital_endpoints method
3. Check them:
Finally, execute the check_digital_endpoints method in order to listen all the nodes, each at a time, for the time that you indicated. This function contains a While True loop, because it’s supposed to keep listening periodically to the Nodes, so be careful when using it!
### Example:
```python
from AlLoRa.Nodes.Gateway import Gateway
lora_node = AlLoRa_Gateway(mesh_mode = True, debug_hops = False, connector = connector)
lora_gateway.set_digital_endpoints(list_of_digital_endpoints)
lora_gateway.check_digital_endpoints() # Listening for ever...
```
</details>
</details>
### [Connectors](https://github.com/SMARTLAGOON/AlLoRa/tree/main/AlLoRa/Connectors)
<details>
<summary>A connector is the element that gives and manages the access to LoRa to a Node. The main objective of the connector is to make AlLoRa available to as many type of devices as possible. Many devices have embedded LoRa capabilities, while others maybe not, so the connector is a class that acts as a bridge to LoRa.</summary>
### [Connector.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Connectors/Connector.py)
It is the parent class from whom the connectors inherits them base attributes and methods.
It manages the methods to send and receive data using raw LoRa, gives access to the RSSI of the last received package and the MAC address of the device. It also contains the method send_and_wait_response, whose function is to send a packet (usually with a request) and wait for a predefined period of time (WAIT_MAX_TIMEOUT).
### [LoPy4_connector.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Connectors/LoPy4_connector.py)
This type of connector is very straightforward, it uses the native library for using LoRa from the LoPy4 (Only tested in LoPy4)
### [SX127x_connector](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Connectors/SX127.py)
This connector was developed to use in a Raspberry Pi connected to a Dragino LoRa/HPS HAT for RPi v1.4. It uses the SX127x library to manage the Raspberry Pi’s GPIOs in order to control the Dragino and send packages using a LoRa channel. It also works with ESP32 that uses the SX127x.
### [Wifi_connector.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Connectors/Wifi_connector.py)
Is the counterpart of the [AlLoRa-WiFi_adapter](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa-Adapters), developed to use in a Raspberry Pi, but also tested on computers running macOS and Windows.
</details>
### → [Datasource.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/DataSource.py)
A Datasource is a handy class that can be use to manage the files to be send. It is supposed to be used to feed Files to send to a Sender Nodes.
### → [Digital_Endpoint.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Digital_Endpoint.py)
Contains the MAC Address of the endpoint to communicate with and manages the states of the communication. It also manages the generation of the complete [File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) when the receiver finishes collecting all the chunks.
It also manages the “state” or phase in which the transfer is.
### → [AlLoRa_File.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/AlLoRa_File.py)
It is the class who focus on the actual File to be sent or received. It can be used to obtain the chunks of the content to transfer to the Sender Nodes and also assembly all the blocks received to obtain the complete File in the Receiver/Gateway side.
It can be instantiated with content (byte array) to be used by the Sender to transmit the content, or it can also be instantiated as a “container”, in order to receive the chunks and finally assemble it to obtain the whole content, in the Receiver side.
### → [AlLoRa_Packet.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/AlLoRa_Packet.py)
This class structures the actual packet to be sent through LoRa. It manages the creation of the message to be sent and also is capable of load the data received by LoRa in order to check that the message was correctly received (with checksum).
It is composed by a header and the actual payload.
More details about the structure of the packages [here](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b).
</details>
## Adapters
<details>
<summary>Sometimes another device is needed in order to bridge to LoRa, depending of the technology used for the connection. In this cases, the code for the adapters will be in this folder, for now we have a WiFi to LoRa adapter</summary>
### [AlLoRa-WiFi_adapter](https://github.com/SMARTLAGOON/AlLoRa/tree/main/AlLoRa-Adapters/AlLoRa-WiFi_adapter)
It contains the code for a LoPy4. It activates a hotspot for the Node to be bridged to connect to and a “light version” of a mix of the code of a Node and a Connector.
It operates in this way:
🍓 Raspberry Pi/Computer Node (Wifi Connector) **←Wi-Fi→** LoPy4 with AlLoRa-WiFi_adapter **←LoRa→** Node
<details>
<summary><b><u>Setup Lopy4 Adapter</u></b></summary>
1. Setup the LoPy4 (following this [instructions](#setup)).
2. Uploading and running the code
1. Open the [AlLoRa-WiFi_adapter](https://github.com/SMARTLAGOON/AlLoRa/tree/main/AlLoRa-Adapters/AlLoRa-WiFi_adapter) folder of the repo in your IDE
2. Connect your LoPy4 + expansion board to your computer. PyMakr should recognise it and show you something like this:

- If it doesn’t do it automatically, you can open the “Connect Device” option and manually select your Port:
<p align="center">
<img width="400" src="readme_assets/Hardware_Setup/Untitled%204.png">
</p>
3. Open the config.txt file and setup a SSID and Password for the Lopy4's Wi-Fi hotspot, you will use this to connect to the adapter with the device that you are connecting to LoRa through this adapter.
4. Press Ctrl+Alt/Opt + s or the “Upload Project to Device” button to upload the code to the LoPy4

With this, the code will boot automatically each time the LoPy4 is on.
5. If everything is ok, you should see something like this on the terminal:
<p align="center">
<img width="400" src="readme_assets/Hardware_Setup/Untitled%207.png">
</p>
6. Import and setup a [Wifi_connector.py](https://github.com/SMARTLAGOON/AlLoRa/blob/main/AlLoRa/Connectors/Wifi_connector.py) in the device that you want to communicate using AlLoRa, and then use the rest of the library as explained in this repo. Your device should be connected to the Adapter's hotspot and everything should work as normal.
</details>
</details>
## Examples
<details>
<summary>Contain examples of uses of the AlLoRa code.</summary>
### [LoPy Sender](https://github.com/SMARTLAGOON/AlLoRa/tree/main/examples/LoPySender)
A simple implementation of a Sender Node use case, it sends increasingly larger files of numbers.
### [LoPy Receiver](https://github.com/SMARTLAGOON/AlLoRa/tree/main/examples/LoPyReceiver)
A simple implementation of a Receiver Node use case, it ask a Sender Node for information and listen to the responses
### [Raspberry Gateway](https://github.com/SMARTLAGOON/AlLoRa/tree/main/examples/RaspberryGateway)
An implementation of a Gateway Node using Raspberry Pi (or a desktop computer), it access LoRa using the [AlLoRa-WiFi_adapter](https://github.com/SMARTLAGOON/AlLoRa/tree/main/AlLoRa/Adapters/AlLoRa-WiFi_adapter) and listens to two Sender Nodes and prints the content when a whole File is received.
</details>
# **How does it work?**
<p align="center">
<img width="700" src="readme_assets/figures/Untitled.png">
</p>
As we can see in the image above, the protocol is structured in a symmetrical way. At the left we have the Sender side, with a [Sender Node](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) that receives a [AlLoRa File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) to be sent from a [Data Source](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), and uses a [Connector](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) to access LoRa to send [AlLoRa Packets](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b).
At the right we have the Receiver side, with a [Receiver Node](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) that receives a [Digital Endpoint](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), that provides the Sender information, in order to listen to it to receive the [AlLoRa File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), it also uses a [Connector](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) to access LoRa to receive the [AlLoRa Packets](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), that contains the chunks (blocks of bytes) of the content transmitted.
## → Communication logic
The system follow a logic of requests from the Receiver to the Sender. Depending of the state of the state of the [Digital Endpoint](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), the Receiver will send requests to the specific Sender and wait a time for an answer or reply. If the answer does not arrive or it arrives with corruptions, the Receiver Node will repeat the request until the message arrives correctly (with a timeout when necessary).
The [Digital Endpoints](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) operates with the following states or phases of the communication:
<img align="right" width="400" src="readme_assets/figures/Untitled%201.png">
1. **Establish connection**
Every Digital Endpoint start in this state, is sends a simple packet with the command “ok” and waits until a “ok” from the sender is received, then, it continues to the next state.
2. **Ask metadata**:
This is the first step for receiving a [AlLoRa File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), it asks the Sender for the metadata of the content to be received and waits until a Packet arrives with the name and the number of chunks of the content. In this stage, the [Digital Endpoint](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) creates an empty [AlLoRa File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) object that will act as a container for the incoming chunks. If successful, it continues to the next state.
3. **Ask for data**
In this state, the Receiver will sequentially ask for the chunks necessary to obtain the whole content. When a chunk arrives, it will feed the [AlLoRa](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) object until it collected all. When the [AlLoRa File](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) is complete, it will be assembled and the content will be ready to access or saved.
4. **Final acknowledge**
In order to maintain the synchronization between the Nodes, a final acknowledge will be sent, and the system will wait until the Sender replies with an “ok” command.
**More information about how the [commands work](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) in the Packet Structure section*
## → Packet Structure
The [Packet](https://github.com/SMARTLAGOON/AlLoRa/blob/ModuLoRa/AlLoRa/Packet.py) is the element that is sent and receive through LoRa. It is designed to maximize the amount of actual content (or chunk size) sent each time, but also to ensure the correct reception of the package by the Node that is supposed to receive it.
For compatibility’s sake, It is designed to have a maximum of 255 Bytes, that is the maximum size of a LoRa message on a LoPy4.
The header size is variable depending on the enabled mode (mesh or point-2-point), but both have in common a header of 20 Bytes, the first 16 Bytes contain the first 8 characters of the MAC Address of the source and destination Nodes. 1 Byte is destined to the message’s command and flags (explained in more detail [below](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b)). Another 3 Bytes are destined to the Checksum of the content, it is used to check if the content has arrived correctly or if it has some type of corruption.
Finally, if the system is working in [Mesh mode](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) (detailed below), an additional 2 Bytes are used to store a message ID. The ID is a random number between 0 and 65.535 (the range of values that can be represented in binary using 2 Bytes) and it is used to manage the retransmissions when in mesh mode and to avoid chunk duplication in the receiver.
<div align="center">
<table>
<tr>
<th>Point-2-point Packet</th>
<th>Mesh Packet</th>
</tr>
<tr>
<td>
<pre>
<img align="center"
src="readme_assets/figures/Untitled%202.png"
title = "hola"
width="300"
/>
</pre>
</td>
<td>
<img align="center"
src="readme_assets/figures/Untitled%202.png"
title = "hola"
width="300"
/>
</td>
</tr>
</table>
</div>
With this, the point-2-point Packet has 235 Bytes maximum for its payload, while the mesh Packet has 233 available Bytes. It seems like a small difference, but with 255 Bytes maximum per Packet, every Byte counts when sending Kilobytes of data.
### Flag composition
The Flag Byte is structured as follows:
<p align="center">
<img src="readme_assets/figures/Untitled%204.png"
alt="Picture"
width="500"
style="display: block; margin: 0 auto" />
</p>
- **Command bits**: 2 bits that combined represent one of four type of commands:
- **00 → DATA:** The command activated when the payload contains a requested chunk.
- **01 → OK:** The acknowledgement command, it is used to establish connection between nodes or notify of the correct reception of the final chunk of the content being received. It usually implies that the payload is empty.
- **10 → CHUNK:** This command is used by the Receiver/Gateway to ask for a chunk of the content being received. The chunk number is stored in the payload, so the Sender can know what block is being requested.
- **11 → METADATA:** This command is used by the Receiver/Gateway to ask for the metadata of the file to be received. If this is the case, the payload of the request will be empty. It is also used by the Sender to answer the request of metadata. In this case the payload contains the name and size of the File to sent.
- **Retransmission bit**: Not being used for the moment
- **Mesh bit**: 1 bit that indicates if the message is supposed to be forwarded or not (more about this in the [Mesh mode](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) segment).
- **Hop bit**: 1 bit that is True if the message was forwarded at some point (more about this in the [Mesh mode](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) segment).
- **Debug hop bit**: 1 bit that indicates that the message in question is in “debug hop mode” (more about this in the [Debug hops](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) segment).
## → Mesh mode
If the communication protocol has the mesh mode activated, the communication will work exactly the same as described [before](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b), but in the case of a request don’t being answered by a Sender for a specific number of times (set by the user), the [Digital Endpoint](https://www.notion.so/AlLoRa-ec6d1adaabcb44b39bb59d41bdf75b9b) will jump to “retransmission mode”. Activating the mesh bit in the Packet in order to tell the other Nodes in the system to retransmit the message if received, to extend the reach of the system and try to establish the communication with the missing Node.
If a Sender Node receives a Packet that is not for itself, it usually discards it and keep listening for request directed to it. But with the mesh bit activated, it will forward it to help reach the real destination. For this forwarding, the Node sleeps for a random time between 0.1 and 1 second before sending it. This reduces the possibility of collisions between Packets when multiple Nodes are active and in reach between them. Each time a Packet is forwarded, the Hop bit of it will be activated in order to announce that it actually went through other devices during its path. When the destination Node receives its message, it notices that the message arrived using the “retransmission mode” and creates a response Packet with the mesh bit activated, because it assumes that if it arrived like this, it is probably that the response will reach the Gateway jumping through the same path. In this case, the Node doesn’t sleep before sending the response, prioritizing always the sender Node being requested something.
If the response Packet arrives with the Hop bit off to the Gateway, it means that it didn't go through any other Node in order response to the request, indicating that the retransmission maybe are not needed. In this cases the Gateway will deactivate the “retransmission mode” of this specific Digital Endpoint.
In order to avoid duplication and over retransmission of messages that could collapse the system, each new Packet is assigned a random ID by the Node, and is saved in a fixed-size list that is checked wherever a new message with mesh bit activated arrives. The Nodes also have another fixed-size list that saves all the forwarded message’s IDs and that checks to avoid forwarding multiple times the same Packet.
## → Debug Hops
The debug hops is an option available to activate when instantiating a Receiver or Gateway Node, and is a useful tool to check the path of a Packet when using the Mesh mode. It overrides the messages and focus on register in the payload each time the Packet goes through a Node. This information can be later retrieved in the Receiver/Gateway Node’s device’s memory and can be used to make decisions about the distribution of the Nodes in the area to cover.
The output of this process generates a log_rssi.txt file that looks like this:
```
022-06-17_17:11:40: ID=24768 -> [['B', -112, 0.5], ['A', -107, 0], ['B', -106, 0.3], ['C', -88, 0.2], ['G', -100, 0]]
2022-06-17_17:11:50: ID=2065 -> [['C', -99, 0.4], ['B', -93, 0], ['C', -93, 0.2], ['G', -105, 0]]
2022-06-17_17:11:53: ID=63728 -> [['C', -100, 0.4], ['B', -95, 0], ['C', -95, 0.5], ['G', -103, 0]]
2022-06-17_17:11:54: ID=32508 -> [['B', -114, 0], ['C', -95, 0.4], ['G', -103, 0]]
2022-06-17_17:11:56: ID=10063 -> [['C', -99, 0.1], ['B', -95, 0], ['C', -94, 0.1], ['G', -103, 0]]
```
Where it shows the time of reception, the ID of the message and then a list of hops that the Packet did. Each hop saves the name of the Node, the RSSI of the last package received with LoRa when registering the hop, and the random time that the Node had to wait before forwarding the message. As we can see, in some cases this random sleep is 0. This is not random, because those Nodes were the destination of the requests of the Gateway, and, as commented before, they have the priority.
# Running an example
### Hardware Requirements
- [Raspberry Pi 4](https://www.raspberrypi.com/products/raspberry-pi-4-model-b/)
- [Lopy4](https://pycom.io/product/lopy4/) with an expansion board like:
- [Pysense 2.0 X](https://pycom.io/product/pysense-2-0-x/)
- [Pygate 868](https://pycom.io/product/pygate/)
### Setup:
In order to run an example, if you are using a Python compatible device, you should install the latest version of the AlLoRa library. If your device has MicroPython support (like the LoPy4), we recommend copy the AlLoRa folder of this repo directly into your device.
<details>
<summary><b><u>Setup a LoPy4</u></b></summary>
* <details>
<summary><b>1. Updating the expansion boards (Pysense 2.0 X or Pygate)</b></summary>
Follow this: [Updating Expansion Board Firmware](https://docs.pycom.io/chapter/pytrackpysense/installation/firmware.html)
* <details>
<summary><b><i>TL;DR ⚡ </i></b></summary>
>
> ⚠️ You should remove the LoPy4 from the board for this step, we are only working with the Pysense 2 or the Pygate
>
1. Download this:
• **[Pysense 2 DFU](https://software.pycom.io/findupgrade?key=pysense2.dfu&type=all&redirect=true)**
• **[Pygate](https://software.pycom.io/findupgrade?key=pygate.dfu&type=all&redirect=true)**
2. Install dfu-util:
- MacOs
```bash
brew install dfu-util
```
- Linux
```bash
sudo apt-get install dfu-util
```
- Windows
Harder, follow the [official explanation](https://docs.pycom.io/chapter/pytrackpysense/installation/firmware.html) or check-out this video:
[https://www.youtube.com/watch?v=FkycTZvj-ss](https://www.youtube.com/watch?v=FkycTZvj-ss)
3. Use dfu-util to update each expansion board
Write this in the terminal
- MacOs and Linux
- Update Pysense 2:
```bash
sudo dfu-util -D pysense2_v16.dfu #This name will change with new versions, match it...
```
- Update Pygate:
```bash
sudo dfu-util -D pygate_v13_1049665.dfu #This name will change with new versions, match it...
```
- Windows
- Update Pysense 2:
```bash
dfu-util-static.exe -D #This name will change with new versions, match it...
```
- Update Pygate:
```bash
dfu-util-static.exe -D #This name will change with new versions, match it...
```
Connect the expansion board to your computer while pressing the DFU button (toggle to check where it is depending of the board...)
- Pysense 2

- Pygate

Wait 1 second, release the DFU button and press enter in the terminal to run the code.
As a result, you should expect something like this:

4. Check it with:
```bash
lsusb
```
You should expect something like this:
```bash
Bus 000 Device 001: ID 04d8:f012 Microchip Technology Inc. Pysense Serial: Py8d245e
```
</details>
* <details>
<summary><b>2. Update the Lopy4 </b></summary>
1. Download the Pycom Firmware Tool from: [Updating Device Firmware](https://docs.pycom.io/updatefirmware/device/)
2. Download this legacy firmware: [LoPy4-1.19.0.b4.tar.gz](https://software.pycom.io/downloads/LoPy4-1.19.0.b4.tar.gz)
- (You can find it here) [Firmware Downgrade](https://docs.pycom.io/advance/downgrade/)
3. Connect each LoPy4 to it’s respective Expansion Board (The LED side of the LoPy should be facing the USB port of the expansion board) ant then plug it on your computer
4. Open Pycom Firmware Tool and press continue 2 times to get to the “Communication” section
5. Select the port and the speed (for me 115200 worked ok), select the “Show Advanced Settings” checkbox and select “Flash from local file” and locate the firmware that we downloaded a few steps before (LoPy4-1.19.0.b4.tar.gz).
6. Select the Erase flash file system and Force update LoRa region and press continue
7. In the LoRa region selection select your country or region to establish your LoRa frequency.
8. Press “Done” and it should start updating
9. Repeat this step with the other LoPy4 with it’s respective expansion board...
</details>
* <details>
<summary><b>3. Setting the environment</b></summary>
Here is the official [documentation](https://docs.pycom.io/gettingstarted/software/) for this step.
We’ll need to upload the programs using PyMakr, a library that can be installed into [VS Code](https://code.visualstudio.com/) and [Atom](https://atom.io/) (I will refer to them as [IDE](https://en.wikipedia.org/wiki/Integrated_development_environment))
> ⚠️ I personally used an M1 Pro Macbook Pro and Atom with PyMakr and it worked fine for me.
- Here is the official Pycom guide to using Atom + PyMakr: [Atom](https://docs.pycom.io/gettingstarted/software/atom/)
- If you want to use VS Code, here are the official Pycom instructions: [Visual Studio Code](https://docs.pycom.io/gettingstarted/software/vscode/)
Once you have everything installed and working, you should be able to connect your LoPy4 + expansion board (Pygate and Pysense 2.0 X for the sender and the receiver respectively) to your computer using an USB cable and PyMakr should recognise it.
</details>
* <details>
<summary><b>4. Uploading and running code</b></summary>
1. Open the folder of the example you want to run in the LoPy4 in your IDE
2. Connect your LoPy4 + expansion board to your computer. PyMakr should recognise it and show you something like this:
<p align="center">
<img width="500" src="readme_assets/Hardware_Setup/Untitled%203.png">
</p>
- If it doesn’t do it automatically, you can open the “Connect Device” option and manually select your Port:
<p align="center">
<img width="400" src="readme_assets/Hardware_Setup/Untitled%204.png">
</p>
3. Press Ctrl+Alt/Opt + s or the “Upload Project to Device” button to upload the code to the LoPy4

With this, the code will boot automatically each time the LoPy4 is on.
4. If everything is ok, you should see something like this on the terminal:
<p align="center">
<img width="400" src="readme_assets/Hardware_Setup/Untitled%207.png">
</p>
</details>
</details>
<details>
<summary><b><u>Raspberry Pi 4 as a Gateway</u></b></summary>
1. Setup your Raspberry Pi 4 with [Raspberry Pi OS 32bit](https://www.raspberrypi.com/software/) and [install Python 3.8.](https://itheo.tech/install-python-38-on-a-raspberry-pi)
2. Setup an [adapter](#adapters) to give access to the Raspberry Pi to LoRa.
3. Download the [RaspberryGateway](https://github.com/SMARTLAGOON/AlLoRa/examples/RaspberryGateway) example and execute it.
</details>
| AlLoRa | /AlLoRa-1.1.0.tar.gz/AlLoRa-1.1.0/README.md | README.md |
# Alphac Registration Code
This is a Small Package For Generate Increment Registration Code.
The Format is : CustomTextYYYYMMIncrementCode
### How To Use :
##### 1. Import Module Name
#
```Python
import alregcode
```
##### 2. Use Syntax
#
>regGenerate("Your Custom String", Start Number, CodeLength)
#
```Python
regGenerate("TEST", 0, 4)
```
##### For Get Increment Number, Just Write
#
```Python
regGenerate.inc
```
### Example :
I Want Generate and Print Registration Code With Condition :
1. I Want to Use "ALPHAC" Word to be First
2. I Want The Increment Start From Number 00001
So i Write :
```Python
from alregcode import regGenerate
test = regGenerate("ALPHAC", 0, 5)
print(test)
```
#### Output :
> ALPHAC20190100001
| AlRegCode | /AlRegCode-0.0.3.tar.gz/AlRegCode-0.0.3/README.md | README.md |
import time,os,random
class tool:
__my_headers__ = [
# 各种PC端
# Opera
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60",
"Opera/8.0 (Windows NT 5.1; U; en)",
"Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50",
# Firefox
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
# Safari
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
# chrome
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16",
# 360
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
# 淘宝浏览器
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
# 猎豹浏览器
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
# QQ浏览器
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
# sogou浏览器
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)",
# maxthon浏览器
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000 Chrome/30.0.1599.101 Safari/537.36",
# UC浏览器
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36",
# 各种移动端
# IPhone
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
# IPod
"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
# IPAD
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
# Android
"Mozilla/5.0 (Linux; U; Android 2.2.1; zh-cn; HTC_Wildfire_A3333 Build/FRG83D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
# QQ浏览器 Android版本
"MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
# Android Opera Mobile
"Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
# Android Pad Moto Xoom
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
# BlackBerry
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
# WebOS HP Touchpad
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
# Nokia N97
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
# Windows Phone Mango
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
# UC浏览器
"UCWEB7.0.2.37/28/999",
"NOKIA5700/ UCWEB7.0.2.37/28/999",
# UCOpenwave
"Openwave/ UCWEB7.0.2.37/28/999",
# UC Opera
"Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
# 一部分 PC端的
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
]
def get_headers():
return random.choice(self.__my_headers__)
def get_now_date(self):
'''
===========================
返回当前日期
===========================
'''
YY = time.strftime("%Y",time.localtime())
MM = time.strftime("%m",time.localtime())
DD = time.strftime("%d",time.localtime())
return YY+MM+DD
class StuManage:
__file_name__ = 'student.txt'
def usr(self):
while True:
self.menu()
choose = int(input('请选择:'))
if choose in range(8):
if choose == 0:
answer = input('您确定要退出系统吗?(Y/N):')
if answer == 'y' or answer == 'Y':
print('谢谢您的使用!')
break
else:
continue
elif choose == 1:
self.insert()
elif choose == 2:
self.search()
elif choose == 3:
self.delete()
elif choose == 4:
self.modify()
elif choose == 5:
self.sort()
elif choose == 6:
self.total()
elif choose == 7:
self.show()
def menu(self):
print('='*20,'学生信息管理系','='*20)
print('-'*20,'功能菜单','-'*20)
print('\t\t1.录入学生成绩')
print('\t\t2.查找学生成绩')
print('\t\t3.删除学生成绩')
print('\t\t4.修改学生成绩')
print('\t\t5.排序')
print('\t\t6.统计学生总人数')
print('\t\t7.显示所有学生信息')
print('\t\t0.退出')
def show_student(self,lst):
if len(lst) == 0:
print('没有查询到学生,无数据显示!')
return
format_title = '{:^6}\t{:^12}\t{:^8}\t{:^10}\t{:^10}\t{:^8}'
print(format_title.format('ID','姓名','英语成绩','Python成绩','Java成绩','总成绩'))
format_data = '{:^6}\t{:^12}\t{:^8}\t{:^8}\t{:^8}\t{:^8}'
for item in lst:
print(format_data.format(item.get('id'),
item.get('name'),
item.get('english'),
item.get('python'),
item.get('java'),
int(item.get('english'))+int(item.get('python'))+int(item.get('java'))
))
def insert(self):
student_list = []
while True:
id = input('请输入ID:')
if not id:
break
name = input('请输入姓名')
if not name:
break
try:
english = int(input('请输入英语成绩:'))
python = int(input('请输入python成绩:'))
java = int(input('请输入java成绩:'))
except:
print('输入成绩无效,不是整数类型,请重新输入')
student = {'id':id,'name':name,'english':english,'python':python,'java':java}
student_list.append(student)
anawer = input('是否继续添加信息(Y/N):')
if anawer == 'y' or anawer == 'Y':
continue
else:
break
self.save(student_list)
print('学生信息录入完毕!!!')
def save(self,lst):
try:
stu_txt = open(self.__file_name__,'a',encoding='utf-8')
except:
stu_txt = open(self.__file_name__,'w',encoding='utf-8')
for item in lst:
stu_txt.write(str(item)+'\n')
stu_txt.close()
def search(self):
pass
def delete(self):
while True:
student_id = input('请输入要删除的学生都ID:')
if student_id != '':
if os.path.exists(self.__file_name__):
with open(self.__file_name__,'r',encoding='utf-8') as file:
student_old = file.readlines()
else:
student_old = []
flag = False
if student_old:
with open(self.__file_name__,'w',encoding='utf-8') as wfile:
d={}
for item in student_old:
d = dict(eval(item))
if d['id'] != student_id:
wfile.write(str(d)+'\n')
else:
flag = True
if flag:
print(f'id为{student_id}的学生信息已被删除')
else:
print(f'没有找到{student_id}的学生信息')
else:
print('无学生信息')
break
self.show()
answer = input('是否继续修改其他学生信息?(Y/N):')
if answer == 'y' or answer == 'Y':
continue
else:
break
def modify(self):
self.show()
if os.path.exists(self.__file_name__):
with open(self.__file_name__,'r',encoding='utf-8') as rfile:
student_old = rfile.readlines()
else:
return
student_id = input('请输入要修改的学员ID:')
with open(self.__file_name__,'w',encoding='utf-8') as wfile:
for item in student_old:
d = dict(eval(item))
if d['id'] == student_id:
print('找到学生信息,可以修改他的相关信息了!')
while True:
try:
d['name'] = input('请输入姓名')
d['english'] = input('请输入英语成绩')
d['python'] = input('请输入python成绩')
d['java'] = input('请输入java成绩')
except:
print('您的输入有误,请重新输入!!!')
else:
break
wfile.write(str(d)+'\n')
print('修改成功')
else:
wfile.write(str(d)+'\n')
answer = input('是否继续修改其他学生信息?(Y/N):')
if answer == 'y' or answer == 'Y':
self.modify()
def sort(self):
show()
if os.path.exists(self.__file_name__):
with open(self.__file_name__,'r',encoding='utf-8') as rfile:
student_list = rfile.readlines()
student_new = []
for item in student_list:
d = dict(eval(item))
student_new.append(d)
else:
return
asc_or_desc = input('请选择(0.升,1.降)')
if asc_or_desc == '0':
asc_or_desc = False
elif asc_or_desc == '1':
asc_or_desc =True
else:
print('您到输入有误,请重新输入')
sort()
mode = input('请选择排序方式(1.按英语成绩排序,2.按python成绩排序,3.按java成绩排序,0.按总成绩排序)')
if mode == '1':
student_new.sort(key = lambda x :int(x['english']),reverse = asc_or_desc )
elif mode == '2':
student_new.sort(key = lambda x :int(x['python']),reverse = asc_or_desc )
elif mode == '3':
student_new.sort(key = lambda x :int(x['java']),reverse = asc_or_desc )
elif mode == '0':
student_new.sort(key = lambda x :int(x['english'])+int(x['python'])+int(x['java']),reverse = asc_or_desc )
else:
print('您到输入有误,请重新输入')
sort()
self.show_student(student_new)
def total(self):
if os.path.exists(self.__file_name__):
with open(self.__file_name__,'r',encoding='utf-8') as rfile:
students = rfile.readlines()
if students:
print(f'一共有{len(students)}名学生')
else:
print('还没有录入学生信息')
def show(self):
student_lst = []
if os.path.exists(self.__file_name__):
with open(self.__file_name__,'r',encoding='utf-8') as rfile:
students = rfile.readlines()
for item in students:
student_lst.append(eval(item))
if student_lst:
self.show_student(student_lst) | Alanos | /Alanos-0.0.0.11.tar.gz/Alanos-0.0.0.11/alanos/alanos.py | alanos.py |
# Alapchari
Alapchari is a Python class that provides a simple interface for interacting with the Chatrik API. The Chatrik API is an AI-based natural language processing API that generates text based on a given prompt.
## Installation
You can install Alapchari using pip:
```
pip install Alapchari
```
## Usage
```python
from Alapchari import Alapchari
alapchari = Alapchari()
prompt = "বঙ্গবন্ধু শেখ মুজিবুর রহমান কে ছিলেন ?"
result = alapchari.ask(prompt)
print(result)
```
The `Alapchari` class has three attributes:
- `count`: This is an integer attribute that keeps track of the number of times the `ask` method has been called.
- `url`: This is the URL of the Chatrik API.
- `method`: This is the HTTP method used to send the request to the Chatrik API.
The `Alapchari` class has one method:
- `ask(prompt:str) -> str`: This method takes a string `prompt` as input and returns a string generated by the Chatrik API based on the given `prompt`.
This will send a request to the Chatrik API with the prompt "What is the meaning of life?" and print the generated text.
## License
This project is licensed under the terms of the MIT license. See the LICENSE file for details. | Alapchari | /Alapchari-1.0.0.tar.gz/Alapchari-1.0.0/README.md | README.md |
# **AlarmConvergence**
###@Mihuier
AlarmConvergence是一个关于告警治理与收敛问题的实习项目。该项目利用AIOps手段集中化解决业务变更导致的告警风暴、找出相似和关联告警。旨在帮助运维人员在收到告警时,快速定位到故障的大致方向,从而提高故障排查的效率。本方法通过对一段时间内的告警进行聚类收敛处理,根据运维告警关键属性找出泛化报警,最终形成仅有几条泛化告警的告警摘要。AlarmConvergence项目包括两套方案:静态(历史)数据收敛方案和动态(流式)数据收敛方案。
## 安装
方式1:
```
$ python setup.py install --user
```
方式2:
```
$ sudo python setup.py install
```
方式3:
```
$ pip install AlarmConvergence --user
```
方式4:
```
$ sudo pip install AlarmConvergence
```
Python 3下需要将上面的python改成python3,pip改成pip3。
## 卸载
```plain
$ pip uninstall AlarmConvergence
```
## 依赖
Python 2.8.1
Pandas 1.2.4
Jieba >= 0.35
Numpy >= 1.7.1
Networkx >= 1.9.1
## 兼容性
Python 3.8.8中测试通过。
##静态数据收敛方案
静态(历史)数据告警收敛过程分为4个步骤:
- Step1:资源对象与告警内容属性的关键字提取;
- Step2:定义时间窗口;
- Step3:计算时间窗口内相邻告警的相似度;
- Step4:收敛时间窗口内满足一定相似度的告警并输出摘要。
### 1.1 Extract\_keywards
本过程使用TextRank提取关键字,TextRank的详细原理请参考:
> Mihalcea R, Tarau P. TextRank: Bringing order into texts[C]. Association for Computational Linguistics, 2004.
关于TextRank的原理和使用介绍:[使用TextRank算法为文本生成关键字和摘要](https://www.letiantian.me/2014-12-01-text-rank/)
- 关键词提取:
将告警内容和资源对象合并本拆分为句子,在每个句子中过滤掉停用词(可选),并只保留指定词性的单词(可选)。由此可以得到句子的集合和单词的集合。
每个单词作为pagerank中的一个节点。设定窗口大小为k,假设一个句子依次由下面的单词组成:
```
w1, w2, w3, w4, w5, ..., wn
```
`w1, w2, ..., wk`、`w2, w3, ...,wk+1`、`w3, w4, ...,wk+2`等都是一个窗口。在一个窗口中的任两个单词对应的节点之间存在一个无向无权的边。
基于上面构成图,可以计算出每个单词节点的重要性。最重要的若干单词可以作为关键词。
* 关键短语提取:
参照[关键词提取](#关键词提取)提取出若干关键词。若原文本中存在若干个关键词相邻的情况,那么这些关键词可以构成一个关键词组。
例如,在一篇介绍`支持向量机`的文章中,可以找到关键词`支持`、`向量`、`机`,通过关键词组提取,可以得到`支持向量机`。
* 摘要生成:
将每个句子看成图中的一个节点,若两个句子之间有相似性,认为对应的两个节点之间有一个无向有权边,权值是相似度。
通过pagerank算法计算得到的重要性最高的若干句子可以当作摘要。
### 1.2. Time\_transform
将首次发生时间转换为时间戳格式,计算相邻告警的时间差
### 1.3. Similarity\_calculation
利用Jaccard相似系数衡量文本相似度,给定两个集合A,B,Jaccard 系数定义为A与B交集的大小与A与B并集的大小的比值。与Jaccard 系数相关的指标叫做Jaccard 距离,用于描述集合之间的不相似度。Jaccard 距离越大,样本相似度越低。
### 1.4. Highlight\_2min\_0.8sim
收敛2min时间窗口内满足相似度80%以上的告警并输出摘要
##动态数据收敛方案
- 对一段时间内的报警依据生产部署拓扑根因进行聚类收敛处理,根据告警内容找出泛化报警,最终形成仅有几条泛化告警的告警摘要。选定Bert预训练模型和层次聚类模型结合,给出了针对告警聚类的一种具体的实现方案。在分布式业务服务的系统下构造了验证了算法的效果。
- 动态(流式)数据收敛全过程包括收集告警信息、提取告警信息的关键特征、聚类处理、展示告警摘要。
### 2.1. Classify_Model
- 我们可以将这几条报警抽象为:“自定义业务 CRM五项考核 数据库故障”,该泛化报警包含的范围较广;也可以抽象为:“CRM服务器 业务失败量”获取产品类型信息失败”,此时包含的范围较小。当然也可以用其他层次的抽象来表达这个报警集群。可以观察到,抽象层次越高,细节越少,但是它能包含的范围就越大;反之,抽象层次越低,则可能无用信息越多,包含的范围就越小。这种抽象的层次关系可以用一些有向无环图(DAG)来表达
### 2.2. SVM
- 完成静态数据收敛方案后,每类告警均被打上标签,将该类带有标签的数据进行有监督训练,从而得到一个自动告警类别判定模型。这里的有监督模型使用的是SVM, SVM 的本质就是想要画出一条线,以“最好地”区分这各类点,以至如果以后有了新的点,这条线也能做出很好的分类。
- 如何找到最合适的分类超平面?依据的原则就是间隔最大化。所谓间隔最大化,说的是分类超平面跟两类数据的间隔要尽可能大(即远离两边数据),这就要提到我们前面说到的公平原则。“三八线”要划在课桌正中间,不偏向任何一方,才能保证双方利益最大化。对于分类超平面来说,也就是要位于两类数据的正中间,不偏向任何一类,才能保证离两边数据都尽可能远,从而实现间隔最大化。
## 使用说明
1.本方案针对实际生产运维环境个性化设计,数据集不公开。
2.类TextRank4Keyword、TextRank4Sentence在处理一段文本时会将文本拆分成4种格式:
* sentences:由句子组成的列表。
* words_no_filter:对sentences中每个句子分词而得到的两级列表。
* words_no_stop_words:去掉words_no_filter中的停止词而得到的二维列表。
* words_all_filters:保留words_no_stop_words中指定词性的单词而得到的二维列表。
## 实例
例如,对于文本1:
* 资源对象 ——
```自定义业务:【CRM五项考核】统一积分平台交易及时率—积分自有业务订单下发应答未反馈—T5000040-请立即手工反馈
```
* 告警内容 ——
```
实时检查,数据库表统计结果大于0,当前值为1
```
* 关键字提取结果 ——
```
反馈 检查 业务 实时 统一 平台 订单 应答 下发 数据库
```
* 输出摘要 ——
```
数据库业务告警
```
文本2:
*
```
这间酒店位于北京东三环,里面摆放很多雕塑,文艺气息十足。答谢宴于晚上8点开始。
```
* 运行结果如下:
```plain sentences:
这间酒店位于北京东三环,里面摆放很多雕塑,文艺气息十足
答谢宴于晚上8点开始;
words_no_filter:
这/间/酒店/位于/北京/东三环/里面/摆放/很多/雕塑/文艺/气息/十足
答谢/宴于/晚上/8/点/开始;
words_no_stop_words:
间/酒店/位于/北京/东三环/里面/摆放/很多/雕塑/文艺/气息/十足
答谢/宴于/晚上/8/点;
words_all_filters:
酒店/位于/北京/东三环/摆放/雕塑/文艺/气息
答谢/宴于/晚上;
```
## API
TODO.
类的实现、函数的参数请参考源码注释。
## License
[MIT](./LICENSE)
| AlarmConvergence | /AlarmConvergence-2.0.tar.gz/AlarmConvergence-2.0/README.md | README.md |
from AlarmTime.AlarmTime import DetectDate
##############################################
t = DetectDate(1570000023345)
print(t.now) #2019-10-02 13:07:39.393012
t = DetectDate('2019/5/2/14/5')
print(t.now) #2019-05-02 14:05:39.393185
t = DetectDate()
print(t.now) #2019-04-21 12:26:12.542967
############################################
t = DetectDate()
target_time = t.DateTimeDetect('detect the time and date on December 16 8 p.m') print(target_time)
target_time = t.DateTimeDetect('detect the time and date on December 16 ') print(target_time)
target_time = t.DateTimeDetect('after 700 days') print(target_time)
target_time = t.DateTimeDetect('after 6 month 700 days 8 hour 5 minute') print(target_time)
target_time = t.DateTimeDetect('after 700 days') print(target_time)
now we can get the value of target year, day, month, hour, minute, second
print(target_time.day)
=============================================================================
2019-04-21 12:26:12.542967
2019-12-16 20:00:00.542967
2019-12-16 00:00:00.542967
2021-03-21 12:26:12.542967
2021-09-20 20:31:12.542967
2021-03-21 12:26:12.542967
21
=============================================================================
minute_diff = t.minute_diff_from_now('after 1 days')
print(minute_diff) #1440.0
hour_diff = t.hour_diff_from_now('after 1 days')
print(hour_diff) #24.0
day_diff = t.day_diff_from_now('after 1 days')
print(day_diff) #1.0
sec_diff = t.second_diff_from_now('after 1 days')
print(sec_diff) #86400.0 | AlarmTime | /AlarmTime-0.1.4.tar.gz/AlarmTime-0.1.4/Readme.md | Readme.md |
import json
class Config(dict):
"""Alarmageddon configuration object.
A configuration object that both acts like a read-only dictionary and
provides some methods to access application specific settings
:param dictionary: A dictionary of the form {'env':{config options},...}
:param environment_name: The environment that this Config object belongs to
"""
ENVIRONMENT_KEY = 'environment'
def __init__(self, dictionary, environment_name):
super(Config, self).__init__(self, **dictionary)
self._environment_name = environment_name
try:
config = self[Config.ENVIRONMENT_KEY][environment_name]
self.environment_config = config
except KeyError:
raise ValueError(
"environment: '%s' was not found in configuration"
% environment_name)
@staticmethod
def from_file(config_path, environment_name):
"""Load a Config object from a file
An environment_name must be provided so that the resulting Config
object can provide access to environment specific settings.
"""
with open(config_path, 'r') as config_file:
return Config(json.load(config_file), environment_name)
def hostname(self, alias):
"""Returns an environment-specific hostname given its alias.
host names are pulled from the hosts dictionary under each of the
environment dictionaries.
"""
try:
return self.environment_config['hosts'][alias]['url']
except:
raise KeyError("No base URL defined for alias: %s" % alias)
def environment_name(self):
"""returns current environment name"""
return self._environment_name
def test_results_file(self):
"""returns the location of the test results file"""
return self['test_results_file']
def __str__(self):
"""Return a string representation of this Config object"""
return "Current Environment: %s Dictionary: %s" % (
self._environment_name, dict.__str__(self)) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/config.py | config.py |
import time
import collections
import multiprocessing
import warnings
from alarmageddon.config import Config
from alarmageddon.reporter import Reporter
from alarmageddon.publishing import hipchat, pagerduty, graphite, junit
from alarmageddon.validations.validation import Priority
from alarmageddon.result import Success, Failure
from alarmageddon import banner
import logging
logger = logging.getLogger(__name__)
def load_config(config_path, environment_name):
"""Helper method for loading a :py:class:`~alarmageddon.config.Config`
:param config_path: Path to the JSON configuration file.
:param environment_name: The config environment to run Alarmageddon in.
"""
return Config.from_file(config_path, environment_name)
def run_tests(validations, publishers=None, config_path=None,
environment_name=None, config=None, dry_run=False,
processes=1, print_banner=True, timeout=60, timeout_retries=2):
"""Main entry point into Alarmageddon.
Run the given validations and report them to given publishers.
Either both `config_path` and `environment_name` should not be None,
or `config` should not be None.
:param validations: List of :py:class:`~.validation.Validation` objects
that Alarmageddon will perform.
:param publishers: List of :py:class:`~.publisher.Publisher`
objects that Alarmageddon will publish validation results to.
:param dry_run: When True, will prevent Alarmageddon from performing
validations or publishing results, and instead will print which
validations will be published by which publishers upon failure.
:param processes: The number of worker processes to spawn.
:param print_banner: When True, print the Alarmageddon banner.
:timeout: If a validation runs for longer than this number of seconds,
Alarmageddon will kill the process running it.
.. deprecated:: 1.0.0
These parameters are no longer used: *config_path*,
*environment_name*, *config*.
Configuration happens when constructing publishers instead.
"""
if config is not None:
warnings.warn("config keyword argument in run_tests is deprecated" +
" and has no effect.", DeprecationWarning)
if config_path is not None:
warnings.warn("config_path keyword argument in run_tests is" +
" deprecated and has no effect.", DeprecationWarning)
if environment_name is not None:
warnings.warn("environment_name keyword argument in run_tests is " +
"deprecated and has no effect.", DeprecationWarning)
publishers = publishers or []
publishers.append(junit.JUnitPublisher("results.xml"))
# We assume that if one is calling run_tests one actually wanted
# to run some tests, not just fail silently
if not validations:
raise ValueError("run_tests expected non-empty list of validations," +
"got {} instead".format(validations))
if print_banner:
banner.print_banner(True)
#always dry run. this will catch weird issues with enrichment
do_dry_run(validations, publishers)
if not dry_run:
# run all of the tests
_run_validations(validations, Reporter(publishers), processes,
timeout, timeout_retries)
def _run_validations(validations, reporter, processes=1, timeout=60, timeout_retries=3):
""" Run the given validations and publish the results
Sort validations by order and then run them. All results are logged
to the given reporter. Once everything has been run, the reporter
will publish.
:param validations: List of :py:class:`~.validation.Validation` objects
that Alarmageddon will perform.
:param publishers: :py:class:`~.reporter.Reporter` object that will
collect validation results and then report those results to its
publishers.
:processes: The number of worker processes to spawn. Does not run
spawn additional processes if set to 1.
:timeout: If a validation runs for longer than this number of seconds,
Alarmageddon will kill the process running it.
"""
order_dict = collections.defaultdict(list)
for validation in validations:
order_dict[validation.order].append(validation)
ordered_validations = [l for _, l in sorted(order_dict.items())]
group_failures = {}
for validation in validations:
if (validation.group is not None and
validation.group not in group_failures):
group_failures[validation.group] = []
manager = multiprocessing.Manager()
for order_set in ordered_validations:
immutable_group_failures = dict(group_failures)
results = manager.list()
for valid in order_set:
for i in range(timeout_retries):
#TODO: parallelize
p = multiprocessing.Process(target=_perform, args=(valid, immutable_group_failures, results))
p.start()
p.join(timeout)
if p.is_alive():
#job is taking too long, kill it
#this is messy, but we assume that if something hit the
#general alarmageddon timeout, then it's stuck somewhere
#and we can't stop it nicely
p.terminate()
logger.warn("Validation {} ran for longer than {}".format(valid, timeout))
else:
break
else:
results.append(Failure(valid.name, valid,
"{} failed to terminate (ran for {}s)".format(valid,timeout),
time=timeout))
for result in results:
if result.is_failure() and result.validation.group is not None:
group_failures[result.validation.group].append(result.description())
reporter.collect(result)
reporter.report()
def _parallel_perform(wrapped_info):
return _perform(*wrapped_info)
def _perform(validation, immutable_group_failures, results):
start = time.time()
try:
validation.perform(immutable_group_failures)
try:
runtime = validation.get_elapsed_time()
except NotImplementedError:
runtime = time.time() - start
if validation.timeout is not None and runtime > validation.timeout:
result = Failure(validation.name, validation,
"{} ran for {} (exceeded timeout of {})".format(
validation, runtime, validation.timeout),
time=runtime)
else:
result = Success(validation.name, validation,
time=runtime)
except Exception as e:
result = Failure(validation.name, validation, str(e),
time=time.time() - start)
#appending is atomic
results.append(result)
def do_dry_run(validations, publishers):
"""Print which validations will be published by which publishers.
Assume all validations fail and list the messages that would have
been published.
:param validations: List of :py:class:`~.validation.Validation` objects
that Alarmageddon would perform.
:param publishers: List of :py:class:`~.publisher.Publisher`
objects that Alarmageddon would publish validation results to.
"""
dry_run = _compute_dry_run(validations, publishers)
publishers = list(dry_run.keys())
for publisher in sorted(
publishers, reverse=True,
key=lambda x: x.priority_threshold if x.priority_threshold is not None else -1):
print(("Publisher: %s (threshold: %s)" % (
publisher.name(), Priority.string(publisher.priority_threshold))))
for validation in dry_run[publisher]:
print((" %s (priority: %s)" % (
validation.name, Priority.string(validation.priority))))
def _compute_dry_run(validations, publishers):
"""Helper method for computing which validations are published where.
Provides programmatic access to the association between publishers
and validations. Return is of the form {publisher:[validation,...],...}.
"""
associations = {}
for publisher in publishers:
associations[publisher] = []
for validation in sorted(
validations, reverse=True,
key=lambda x: x.priority):
test_result = Failure(validation.name, validation, "failure")
if publisher.will_publish(test_result):
associations[publisher].append(validation)
return associations
def construct_publishers(config):
"""Construct the built-in publishers.
:param config: Config object to construct the publishers from.
"""
environment = config.environment_name()
env_config = config.environment_config
publishers = []
try:
publishers.append(
hipchat.HipChatPublisher(
api_end_point = env_config["hipchat_host"],
api_token = env_config["hipchat_token"],
environment = environment,
room_name = env_config["hipchat_room"],
priority_threshold = Priority.NORMAL))
except KeyError:
pass
try:
publishers.append(pagerduty.PagerDutyPublisher(
api_end_point = env_config["pagerduty_host"],
api_key = env_config["pagerduty_token"],
environment = environment,
priority_threshold = Priority.CRITICAL))
except KeyError:
pass
try:
publishers.append(graphite.GraphitePublisher(
host = env_config["graphite_host"],
port = env_config["graphite_port"],
environment = environment,
priority_threshold = Priority.LOW))
except KeyError:
pass
return publishers | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/run.py | run.py |
import os, sys
from colorama import Fore, Back, Style, init, deinit
def print_banner(color=True):
"""Prints an Alarmageddon banner in color if the caller requests it
and stdout is a terminal. PEP8 is temporarily suspended...
"""
if color and sys.stdout.isatty():
# Print a color version of the banner
init()
print("")
print((Fore.WHITE + " " + Style.DIM + "( " + Style.NORMAL + "." + Style.DIM + " ( ) :" + Style.NORMAL + "." + Style.DIM + " ) " +
Style.NORMAL + Fore.YELLOW + ". , // . , " + Fore.GREEN + "/\\"))
print((Fore.WHITE + " " + Style.DIM + "( ( ) ) " + Style.NORMAL + Fore.YELLOW + ". // . " + Fore.GREEN + "/\\/ \\/\\"))
print((Fore.WHITE + " " + Style.DIM + "( : " + Style.NORMAL + "*" + Style.DIM + " ( ) " + Style.NORMAL + " * " + Fore.YELLOW +
". // . " + Fore.GREEN + "( " + Fore.RED + ">" + Fore.GREEN + "\\ /" + Fore.RED + "<" + Fore.GREEN + " )"))
print((Fore.WHITE + " * " + Style.DIM + " ( : ) " + Style.NORMAL + Fore.YELLOW + ". // . . " + Fore.GREEN + "/ `__` \\"))
print((Fore.WHITE + " " + Style.DIM + "( : : ) " + Style.NORMAL + " * " + Fore.RED + Style.BRIGHT + " O" + Fore.YELLOW + Style.NORMAL +
" . " + Fore.GREEN + "\\ /" + Fore.WHITE + "VVVV" + Fore.GREEN + "\ /"))
print((Fore.WHITE + " * " + Style.DIM + " ( : ) " + Style.NORMAL + Fore.RED + "/" + Fore.WHITE + "IIIIIII" + Fore.RED +
"/[]\\ "))
print((Fore.WHITE + " . " + Fore.RED + "||||" + Fore.WHITE + " . " + Fore.WHITE + Style.DIM + "d" + Style.NORMAL + Fore.RED + "_" + Fore.WHITE + "O" + Fore.RED +
"______" + Fore.WHITE + "O" + Fore.RED + "___" + Style.DIM + Fore.WHITE + "b" + Style.NORMAL))
print((Fore.WHITE + " . " + Fore.RED + "||||" + Fore.WHITE + " . \\o/ \\o/ " + Fore.GREEN + " __ \\" + Fore.WHITE + "^^^^" +
Fore.GREEN + "/ \ "))
print((Fore.WHITE + " " + Fore.MAGENTA + "_/ " + Fore.YELLOW + "@ @@" + Fore.MAGENTA + "_" + Fore.WHITE + " | | " +
Fore.GREEN + "/ /\\ \__/ \ "))
print((Fore.WHITE + " " + Fore.MAGENTA + "/ " + Fore.YELLOW + "@ @" + Fore.MAGENTA + " \ " + Fore.WHITE + " // \\\\ " +
Fore.GREEN + " " + Fore.WHITE + " VVV" + Fore.GREEN + "\\ \ \ \ " + Fore.RESET))
print("")
print("Alarmageddon: Monitoring Your Stuff...")
print(" Until You Don't Care About Your Stuff.")
print("")
deinit()
else:
# Print a monochrome version of the banner.
print("")
print(" ( . ( ) :. ) . , // . , /\ ")
print(" ( ( ) ) . // . /\/ \/\ ")
print(" ( : * ( ) * . // . ( >\ /< ) ")
print(" * ( : ) . // . . / `__` \ ")
print(" ( : : ) * O . \ /VVVV\ / ")
print(" * ( : ) /IIIIIII/[]\ ")
print(" . |||| . d_O______O___b ")
print(" . |||| . \o/ \o/ __ \^^^^/ \ ")
print(" _/ @ @@_ | | / /\ \__/ \ ")
print(" / @ @ \ // \\\\ VVV\ \ \ \ ")
print("")
print("Alarmageddon: Monitoring Your Stuff... ")
print(" Until You Don't Care About Your Stuff. ")
print("") | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/banner.py | banner.py |
class TestResult(object):
"""Base class representing the result of performing a validation.
Contains the outcome information that Alarmageddon will publish.
:param test_name: Name of the validation this result is associated with.
:param validation: The :py:class:`~validation.Validation` this result is
associated with.
:param description: Default None. A description of the outcome of the
validation. If the validation failed, this field is expected to not
be None.
:param time: Default None. How long the validation took to perform.
"""
def __init__(self, test_name, validation,
description=None, time=None):
self._test_name = test_name
self._description = description
self.time = time
# if this is set, it will report the time to graphite and this
# will be the label in graphite
self.timer_name = validation.timer_name()
self.priority = validation.priority
self.validation = validation
def test_name(self):
"""Returns the name of the test."""
return self._test_name
def description(self):
"""Returns additional descriptive text about the test.
For Failures, description is required.
"""
return self._description
def is_failure(self):
"""Returns True if and only if this Result represents a failed test."""
pass
def __str__(self):
return "Result: '%s', Description: '%s', Failure: %s, Priority: %s" % (
self._test_name, self._description,
self.is_failure(), self.validation.priority)
def __repr__(self):
return self.__str__()
class Failure(TestResult):
"""The result of a failed validation.
`description` is required.
"""
def __init__(self, test_name, validation, description, time=None):
TestResult.__init__(self, test_name, validation, description, time)
def is_failure(self):
"""Returns True."""
return True
class Success(TestResult):
"""The result of a successful validation."""
def __init__(self, test_name, validation, description=None, time=None):
TestResult.__init__(self, test_name, validation, description, time)
def is_failure(self):
"""Returns False."""
return False | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/result.py | result.py |
from alarmageddon.publishing.publisher import Publisher
from jinja2 import Template, Environment, FileSystemLoader, Undefined
import smtplib
from six.moves.email_mime_text import MIMEText
from six.moves.email_mime_multipart import MIMEMultipart
from email import utils
import logging
logger = logging.getLogger(__name__)
def enrich(validation, email_settings, runtime_context=None):
""" Enriches the validation with a custom email message.
:param validation: The validation object.
:param email_settings: A dictionary object containing settings for email
subject, body, sender and recipients. See below for details.
:param runtime_context: - Additional replacement context settings available
at runtime. See below for details.
email_settings should be a dictionary of the form:
{
"email_type": "An environment-specific e-mail type as
defined in the email publisher config",
"subject": "The name of the Jinja template for the e-mail subject",
"body": "The name of the Jinja template for the e-mail body",
"sender": "A dictionary of the form
{"real_name": "Real Name", "address": "[email protected]"}",
"recipients": "An iterable of dicionaries of the form
{"real_name": "Real Name", "address": "[email protected]"}"
}
Note that the location of the Jinja templates is defined in the email
publisher config.
runtime_context is a dictionary whose values are consumed at runtime inside
the Jinja templates defined in email_settings.
"""
logger.debug("Enriching {}".format(validation))
if not validation:
raise ValueError("validation is required.")
if not email_settings:
raise ValueError("email_settings are required.")
else:
if not "email_type" in email_settings:
raise KeyError("email_settings.email_type is required.")
if not "subject" in email_settings:
raise KeyError("email_settings.subject template is required.")
if not "body" in email_settings:
raise KeyError("email_settings.body template name is required.")
if not "sender" in email_settings:
raise KeyError("email_settings.sender is required.")
if not "recipients" in email_settings:
raise KeyError("email_settings.recipients are required.")
if not runtime_context:
runtime_context = {}
enrichment = {'email_settings': email_settings,
'runtime_context': runtime_context}
#hack because we need an instance. there is probably a better way
temp_pub = EmailPublisher({"fake": "config"})
validation.enrich(temp_pub, enrichment, force_namespace=True)
return validation
class SilentUndefined(Undefined):
"""Dont break pageloads because vars arent there!"""
def _fail_with_undefined_error(self, *args, **kwargs):
"""jinja2 hack to allow silent ignoring of missing values."""
return None
class SimpleEmailPublisher(Publisher):
"""A publisher that publishes incidents to e-mail.
:param config: A config object containing email config information. See
below for a detailed description.
:param email_notifications_config_key: The config key that contains
the email configuration.
:param name: The name of the publisher.
:param defaults: Default email templating values.
:param priority_threshold: Will publish validations of this priority or
higher if they are appropriately enriched.
:param connect_timeout_seconds: How long to attempt to connect to the SMTP
server.
:param environment: The environment that tests are being run in.
"""
def __init__(self, sender_address, recipient_addresses,
host=None, port=None, name='EmailPublisher',
priority_threshold=None, connect_timeout_seconds=10,
environment=None):
Publisher.__init__(self, name,
priority_threshold=priority_threshold,
environment=environment)
# Set the initial replacement context to the defaults.
# Overrides will be applied to this dictionary individually.
self._connect_timeout = connect_timeout_seconds
self.sender_address = None
self.recipient_addresses = []
if sender_address:
self.sender_address = self.configure_sender(sender_address)
if recipient_addresses:
self.recipient_addresses =\
self.configure_recipients(recipient_addresses)
self.host = host
self.port = port
def __repr__(self):
return "{}: sender {}, recipient {}, host {}, port {}, timeout {}".format(
type(self).__name__, self.sender_address,
self.recipient_addresses, self.host, self.port,
self._connect_timeout)
def send(self, result):
"""Constructs a message from a result and send it as an email.
This will only send if the priority threshold is met **and** the
original validation was appropriately enriched.
:param result: The result to publish.
"""
logger.debug("Checking if we should send {}".format(result))
if result.is_failure() and self.will_publish(result):
message_body = result.description()
message_subject = result.test_name()
msg = self.configure_message(self.sender_address,
self.recipient_addresses,
message_subject,
message_body)
logger.debug("Sending {} to {} from server {}:{}".format(result, self.recipient_addresses, self.host, self.port))
""" A note regarding recipient addresses:
smtplib.sendmail requires that multiple recipient addresses
are structured as an array of addresses.
MIMEMultipart messages require that multiple recipients
are structured as a comma-separated list.
"""
smtpObj = self.configure_smtp_object(self.host, self.port)
smtpObj.sendmail(msg['From'],
self.recipient_addresses,
msg.as_string())
def configure_message(self, sender_address, recipient_addresses,
subject, body):
""" Creates a MIMEMultipart message with a plain-text body.
:param sender_address: The address the message will be sent from.
:param recipient_addresses: The addresses the message will be sent to.
:param subject: The subject of the email.
:param body: The body of the email.
"""
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = sender_address
# MIMEMultipart requires the 'To' header to be a comma separated list
msg['To'] = ", ".join(recipient_addresses)
msg.attach(MIMEText(body, 'plain'))
return msg
def configure_sender(self, sender):
"""Properly formats the sender address.
:param sender: A dictionary containing information about the sender.
"""
return utils.formataddr((sender['real_name'], sender['address']))
def configure_recipients(self, recipients):
"""Properly formats the list of recipient addresses.
:param recipients: A list containing dictionaries of information about
the recipients.
"""
# Recipients are expected to be in an
# array of objects containing {real_name, address}
addresses = []
for recipient in recipients:
addresses.append(utils.formataddr((recipient['real_name'],
recipient['address'])))
# sendmail requires the recipients to be an array of addresses
return addresses
def configure_smtp_object(self, host, port):
"""Helper method to configure the SMTP object."""
if not(host and port):
if not(host):
# If host isn't specified, try localhost
smtpObj = smtplib.SMTP('localhost',
timeout=self._connect_timeout)
else:
# otherwise, use the host with the default port
smtpObj = smtplib.SMTP(host,
timeout=self._connect_timeout)
else:
smtpObj = smtplib.SMTP(host, port, timeout=self._connect_timeout)
return smtpObj
class EmailPublisher(SimpleEmailPublisher):
"""A publisher that publishes incidents to e-mail.
For validations to be published by this publisher, they must be enriched
with additional data. See :py:func:.emailer.enrich
:param config: A config object containing email config information. See
below for a detailed description.
:param email_notifications_config_key: The config key that contains
the email configuration.
:param name: The name of the publisher.
:param defaults: Default email templating values.
:param priority_threshold: Will publish validations of this priority or
higher if they are appropriately enriched.
:param connect_timeout_seconds: How long to attempt to connect to the SMTP
server.
:param environment: The environment that tests are being run in.
config is an Alarmageddon config object that contains at least the
following:
{email_template_directory : Directory containing the e-mail templates.
Can be relative to the location of the alarmageddon script or an
absolute directory location,
environment : EMAIL_NOTIFICATIONS
Where EMAIL_NOTIFICATIONS is a dictionary of the form:
"email_notifications" : {
EMAIL_TYPE: {
"email_recipients" : [
{"real_name" : "Some other recipient",
"address" : "[email protected]"},...
],
"email_custom_message" : "Custom email message. Can contain
Jinja replacement tokens."
},...
}
}
and EMAIL_TYPE is a name that will identify which validations should
use that config.
"""
EMAIL_NOTIFICATIONS_CONFIG_KEY = 'email_notifications'
def __init__(self, config, email_notifications_config_key=None,
name='EmailPublisher', defaults=None,
priority_threshold=None, connect_timeout_seconds=10,
environment=None):
if not config:
raise ValueError("config parameter is required.")
if defaults is None:
defaults = {}
SimpleEmailPublisher.__init__(self, None, None, name=name,
priority_threshold=priority_threshold,
environment=environment)
# Set the initial replacement context to the defaults.
# Overrides will be applied to this dictionary individually.
self._replacement_context = defaults
self._config = config
self._template_environment = None
self._connect_timeout = connect_timeout_seconds
if not email_notifications_config_key:
self._email_notifications_config_key = \
self.EMAIL_NOTIFICATIONS_CONFIG_KEY
else:
self._email_notifications_config_key = \
email_notifications_config_key
def __repr__(self):
return "{}: replacement {}, config {}, env {}, timeout {}, key {}".format(
type(self).__name__, self._replacement_context, self._config,
self._template_environment, self._connect_timeout,
self._email_notifications_config_key)
def send(self, result):
"""Constructs a message from a result and send it as an email.
This will only send if the priority threshold is met **and** the
original validation was appropriately enriched.
:param result: The result to publish.
"""
if result.is_failure() and self.will_publish(result):
self.configure_replacement_context(result)
fileSystemLoader =\
FileSystemLoader(self._config['email_template_directory'])
self._template_environment = Environment(loader=fileSystemLoader,
undefined=SilentUndefined)
email_settings = self.get_email_settings(result)
recipient_addresses =\
self.configure_recipients(email_settings['recipients'])
sender_address = self.configure_sender(email_settings['sender'])
message_body = self.replace_tokens(email_settings['body'],
self._replacement_context)
message_subject = self.replace_tokens(email_settings['subject'],
self._replacement_context)
msg = self.configure_message(sender_address,
recipient_addresses,
message_subject,
message_body)
""" A note regarding recipient addresses:
smtplib.sendmail requires that multiple recipient addresses
are structured as an array of addresses.
MIMEMultipart messages require that multiple recipients
are structured as a comma-separated list.
"""
if 'email_host' in self._config:
host = self._config['email_host']
if 'email_port' in self._config:
port = self._config['email_port']
smtpObj = self.configure_smtp_object(host, port)
smtpObj.sendmail(msg['From'], recipient_addresses, msg.as_string())
def replace_tokens(self, template, token_dictionary):
"""Replace templated values with their contents.
Loops multiple times, to handle the case of a template that contains
templates.
Templates should be valid Jinja templates:
http://jinja.pocoo.org/
:param template: The template string.
:param token_dictionary: A mapping from template names to values.
"""
# Loop over the string five times in case
# tokens have tokens inside them.
tokenized_template = self._template_environment.get_template(template)
detokenized_string = tokenized_template.render(token_dictionary)
if "{{" in detokenized_string:
for _num in range(1, 4):
tokenized_template = Template(detokenized_string,
undefined=SilentUndefined)
detokenized_string = tokenized_template\
.render(token_dictionary)
return detokenized_string
""" To implement a custom email publisher, override the methods below """
def _can_publish(self, result):
"""Determines if the email validation has the required enrichment.
:param result: The result to be published.
"""
try:
email_settings = self.get_email_settings(result)
email_settings['email_type']
email_settings['subject']
email_settings['body']
email_settings['sender']
email_settings['recipients']
return True
except (AttributeError, KeyError):
return False
def configure_replacement_context(self, result):
""" Configures the replacement context for this email publisher
Supported template variables:
{{test_name}}
The name of the test.
{{test_description}}
The description of the failure.
{{env}}
The environment name.
{{email_custom_message}}
A custom message used in email alerts. This field can be used to
summarize a particular type of alert or include additional details
Runtime Context:
All dictionary items contained in runtime context are available.
:param result: The test result whose values will populate the
replacement context.
"""
# Configure the replacement context
if result.test_name():
self._replacement_context['test_name'] = result.test_name()
if result.description():
self._replacement_context['test_description'] = \
result.description()
if self._config.environment_name():
self._replacement_context['env'] = self._config.environment_name()
email_settings = self.get_email_settings(result)
if 'email_type' in email_settings:
email_type = email_settings['email_type']
self._replacement_context['email_type'] = email_type
email_config_key = self._email_notifications_config_key
email_config = \
self._config.environment_config[email_config_key]
if email_type in email_config:
email_config_settings = email_config[email_type]
if 'email_custom_message' in email_config_settings:
self._replacement_context['email_custom_message'] = \
email_config_settings['email_custom_message']
runtime_context = self.get_runtime_context(result)
if runtime_context is not None:
self._replacement_context.update(self.get_runtime_context(result))
def get_email_settings(self, result):
"""Returns the email settings of the given result."""
return result.validation.get_enriched(self, True)['email_settings']
def get_runtime_context(self, result):
"""Returns the runtime context of the given result."""
return result.validation.get_enriched(self, True)['runtime_context'] | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/emailer.py | emailer.py |
from alarmageddon.publishing.publisher import Publisher
import xml.etree.cElementTree as ET
import logging
logger = logging.getLogger(__name__)
class JUnitPublisher(Publisher):
"""A Publisher that writes results to JUnit formatted XML.
:param filename: The file to write the XML to.
:param priority_threshold: Will publish validations of this priority or
higher.
:param environment: The environment that tests are being run in.
"""
def __init__(self, filename, priority_threshold=None,
environment=None):
if not filename:
raise ValueError("filename parameter is required")
logger.debug("Constructing publisher with filename:{},"
"priority_threshold:{}, environment:{}"
.format(filename, priority_threshold, environment))
Publisher.__init__(self, "JUnit",
priority_threshold=priority_threshold,
environment=environment)
self.filename = filename
def __repr__(self):
return "JUnit, publishes to {} (threshold: {})".format(
self.filename, self.priority_threshold)
def send_batch(self, results):
"""Write a set of results to an XML file.
:param results: The validation results to write to file.
"""
tree = self._construct_tree(results)
tree.write(self.filename)
def _construct_tree(self, results):
failures = 0
errors = 0
skips = 0
tests = len(results)
time = 0
for result in results:
if result.is_failure():
failures += 1
time += result.time
root = ET.Element("testsuite")
root.set("errors", str(errors))
root.set("failures", str(failures))
root.set("name", "alarmageddon")
root.set("skips", str(skips))
root.set("tests", str(tests))
root.set("time", "{:f}".format(time))
for result in results:
self._append_result(result, root)
return ET.ElementTree(root)
def _append_result(self, result, parent):
"""Add a result as a child of the given parent XML element.
:param result: The validation result to add.
:param parent: The parent XML element to add onto.
"""
case = ET.SubElement(parent, "testcase")
#remove brackets around type name
case.set("classname", str(type(result.validation))[8:-2])
case.set("name", result.test_name())
case.set("time", "{:f}".format(result.time))
if result.is_failure():
failure = ET.SubElement(case, "failure")
failure.set("message", "test failure")
failure.text = str(result)
def send(self, result):
"""This publisher cannot write only a single result"""
raise NotImplementedError | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/junit.py | junit.py |
import os
import requests
import json
import collections
from alarmageddon.publishing.publisher import Publisher
from alarmageddon.publishing.exceptions import PublishFailure
import logging
logger = logging.getLogger(__name__)
FALLBACK_TEXT = "There were Alarmageddon failures"
def _get_collapsed_message(results):
"""Helper function to collapse similar failures together.
If several results have the same reason for failing, combine the
results to save space and cognitive load on users.
:param results: List of result objects.
"""
description = results[0].description()
names = [result.test_name() for result in results]
message = ("(failed) {0}\nDescription: {1}").format(", ".join(names),
description)
return message
class TeamsPublisher(Publisher):
"""A Publisher that sends results to Teams.
Publishes all failures to the provided Teams room.
:param hook_url: The Teams Hook URL
:param priority_threshold: Will publish validations of this priority or
higher.
:param environment: The environment that tests are being run in.
"""
def __init__(self, hook_url, environment=None, priority_threshold=None):
logger.debug("Constructing publisher with url:{}, priority_threshold:{}, environment:()"
.format(hook_url, priority_threshold, environment))
if not hook_url:
raise ValueError("hook_url parameter is required")
Publisher.__init__(self, "Teams", priority_threshold=priority_threshold)
self._hook_url = hook_url
def __str__(self):
return "Teams: {}".format(self._hook_url, self.priority_threshold)
def __repr__(self):
return "Teams: {}".format(self._hook_url, self.priority_threshold)
def send(self, result):
"""sends a result to Teams if the result is a failure."""
if result.is_failure() and self.will_publish(result):
message = "(failed) Failure in {0}\nTest:{1}\nFailed because: {2}".format(
self.environment,
result.test_name(),
result.description())
message_text = self._build_message(
FALLBACK_TEXT,
self._get_jenkins_job_url(),
message)
self._send_to_teams(message_text)
def send_batch(self, results):
"""Send a batch of results to Teams.
Collapses similar failures together to save space.
"""
collapsed = collections.defaultdict(list)
errors = 0
for result in results:
if result.is_failure() and self.will_publish(result):
collapsed[result.description()].append(result)
errors += 1
if errors == 0:
return
message = "{0} failure(s) :\n".format(errors)
message += "\n".join(_get_collapsed_message(collapsed_result)
for collapsed_result in list(collapsed.values()))
message_text = self._build_message(
FALLBACK_TEXT,
self._get_jenkins_job_url(),
message)
self._send_to_teams(message_text)
def _build_message(self, FALLBACK_TEXT, run_link, text):
pretext = "Alarmageddon run completed."
if run_link is not None:
pretext = "{} <{}|View Result>".format(pretext, run_link)
jenkins_url = self._get_jenkins_job_url()
payload = {
"title": os.environ.get('JOB_NAME'),
"text": text,
"potentialAction": [
{
"@type": "OpenUri",
"name": "View Build",
"targets": [{
"os": "default",
"uri": jenkins_url
}]
}
]
}
return payload
def _send_to_teams(self, message):
"""Send a message to Teams.
:param message: The message to be published.
"""
headers = {
"Content-Type": "application/json"
}
data = json.dumps(message)
logger.info("Sending {} to {}".format(data, self._hook_url))
resp = requests.post(self._hook_url, data=data, headers=headers)
if resp.status_code < 200 or resp.status_code >= 300:
raise PublishFailure(self, "{0} - {1}".format(message, resp.text))
def _get_jenkins_job_url(self):
"""If we're running in jenkins use environment vars to
construct a job URL. If we are not in running in jenkins
return None
"""
jenkins_host = os.environ.get('JENKINS_URL')
if jenkins_host is not None:
jenkins_job = os.environ.get('JOB_NAME')
jenkins_build = os.environ.get('BUILD_ID')
job_url = "{}job/{}/{}/console".format(
jenkins_host,
jenkins_job,
jenkins_build
)
return job_url
else:
return None | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/teams.py | teams.py |
import time
import requests
from alarmageddon.publishing.publisher import Publisher
from alarmageddon.publishing.exceptions import PublishFailure
class HttpPublisher(Publisher):
"""Creates an HTTP Publisher that publishes successes and/or failures
to either one or two HTTP end points.
If you want the same URL to be published to whether or not the
Validation result being published failed or succeeded, please
supply only the url parameter and omit the failure_url and
success_url parameters.
Conversely, if you want different URLs to be requested based on
whether or not the Validation result being published succeeded,
please omit the url parameter and supply the success_url and
failure_url parameters. The HttpPublisher will use the same
method, headers, and authentication parameters when requesting
both of those URLs. If that is not acceptable, please override
the relevent getter methods.
:param url: The URL that this publisher should publish successful and
failed Validation results to.
:param success_url: The URL that this publisher should publish successful
Validation results to.
:param failure_url: The URL that this publisher should publish failed
Validation results to.
:param method: The HTTP method to use when posting. POST is the default
because it is the only HTTP method that allows you to send the results
of the published Validation. The GET method is allowed but cannot send
the details of the Validation result along with the request.
:param headers: headers to send along with the request
:param auth: if your URLs require authentication you can supply a value
like the following: ``auth=('user', 'pass')``
:param attempts: the number of times to try to publish to your URL(s).
:param retry_after_seconds: how many seconds to wait after a failed
attempt.
:param timeout_seconds: how long a single attempt can take before it is
considered a failed attempt.
:param publish_successes: specify True if you want this HTTP Publisher to
publish successful results too. If you provide a success_url, then
this HttpPublisher will assume you want to publish successes.
:param expected_status_code: the HTTP status code to expect from your
HTTP server if the Validation result was successfully published.
:param name: The name of this publisher.
:param priority_threshold: Will publish validations of this priority or
higher.
:param environment: The environment that tests are being run in.
"""
def __init__(self, url=None, success_url=None, failure_url=None,
method="POST", headers=None, auth=None, attempts=1,
retry_after_seconds=2, timeout_seconds=5,
publish_successes=False, expected_status_code=200,
name=None, priority_threshold=None, environment=None):
Publisher.__init__(self, name or "HttpPublisher",
priority_threshold=priority_threshold,
environment=environment)
self._success_url = success_url or url
if not self._success_url:
raise ValueError("either success_url or url parameter is required")
self._failure_url = failure_url or url
if not self._failure_url:
raise ValueError("either failure_url or url parameter is required")
self._publish_successes = (success_url is not None) or publish_successes
self._method = method
if not self._method:
raise ValueError("method parameter is requried")
self._headers = headers
self._auth = auth
self._attempts = attempts
if self._attempts <= 0:
raise ValueError("attempts parameter must be at least one")
self._retry_after_seconds = retry_after_seconds
if self._retry_after_seconds < 0:
raise ValueError("retry_after_seconds parameter must be positive")
self._timeout_seconds = timeout_seconds
self._expected_status_code = expected_status_code
def _get_method(self, result):
"""Returns the HTTP method (e.g. GET, POST, etc.) that the
HttpPublisher should use when publishing.
"""
return self._method
def _get_url(self, result):
"""Returns the URL that the HttpPublisher should publish to."""
if result.is_failure():
return self._failure_url
else:
return self._success_url
def _get_headers(self, result):
"""return the headers, as a dict, that this HttpPublisher should
include when it publishes.
"""
return self._headers
def _get_auth(self, result):
"""Returns None or Authentication information (e.g. ``auth=('user',
'pass')``) that this HttpPublisher should send along with the
request.
"""
return self._auth
def _get_data(self, result):
"""Returns the data that this HttpPublisher should send along with the
request.
It is only relevant when the HTTP Method is ``POST``.
"""
if self._method == "POST":
return str(result)
else:
return None
def send(self, result):
"""Publish a test result.
:param result: The :py:class:`~.result.TestResult` of a test.
"""
if result.is_failure() or self._publish_successes:
published = False
for i in range(self._attempts):
try:
response = requests.request(self._get_method(result),
self._get_url(result),
data=self._get_data(result),
headers=self._get_headers(result),
auth=self._get_auth(result),
timeout=self._timeout_seconds)
if response.status_code == self._expected_status_code:
published = True
break
except Exception:
time.sleep(self._retry_after_seconds)
if not published:
raise PublishFailure(self, result)
def __repr__(self):
"""Returns a string representation of this HttpPublisher"""
return "HttpPublisher: '{0}', Method: {1}, Success URL: {2}," +\
"Failure URL: {3}".format(self._name, self._method,
self._success_url, self._failure_url) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/http.py | http.py |
import requests
import json
import collections
from alarmageddon.publishing.publisher import Publisher
from alarmageddon.publishing.exceptions import PublishFailure
import logging
logger = logging.getLogger(__name__)
def _get_collapsed_message(results):
"""Helper function to collapse similar failures together.
If several results have the same reason for failing, combine the
results to save space and cognitive load on users.
:param results: List of result objects.
"""
description = results[0].description()
names = [result.test_name() for result in results]
message = ("(failed) {0}\nDescription: {1}").format(", ".join(names),
description)
return message
class HipChatPublisher(Publisher):
"""A Publisher that sends results to HipChat.
Publishes all failures to the designated HipChat room. Will publish all
results in a single message, collapsings similar errors together to
save space.
:param api_end_point: The HipChat API endpoint.
:param api_token: A HipChat API token.
:param environment: The environment that tests are being run in.
:param room_name: The HipChat room to publish results to.
:param priority_threshold: Will publish validations of this priority or
higher.
"""
def __init__(self, api_end_point, api_token, environment, room_name,
priority_threshold=None):
logger.debug("Constructing publisher with endpoint:{}, token:{}, room name:{},"
"priority_threshold:{}, environment:{}"
.format(api_end_point, api_token, room_name,
priority_threshold, environment))
if not api_end_point:
raise ValueError("api_end_point parameter is required")
if not api_token:
raise ValueError("api_token parameter is required")
if not environment:
raise ValueError("environment parameter is required")
if not room_name:
raise ValueError("room_name parameter is required")
Publisher.__init__(self, "HipChat: {0}".format(room_name),
priority_threshold=priority_threshold,
environment=environment)
self._api_token = api_token
self._api_end_point = api_end_point
self._room_name = room_name
def __str__(self):
return "Hipchat: {}, room {}, env {}".format(
self._api_end_point, self._room_name, self.environment
)
def __repr__(self):
return "Hipchat: {} ({}), room {}, env {}".format(
self._api_end_point, self._api_token,
self._room_name, self.environment
)
def send_batch(self, results):
"""Send a batch of results to HipChat.
Collapses similar failures together to save space.
"""
collapsed = collections.defaultdict(list)
errors = 0
for result in results:
if result.is_failure() and self.will_publish(result):
collapsed[result.description()].append(result)
errors += 1
if errors == 0:
return
message = "{0} failure(s) in {1}:\n".format(errors, self.environment)
message += "\n".join(_get_collapsed_message(collapsed_result)
for collapsed_result in list(collapsed.values()))
self._send_to_hipchat(message)
def _send_to_hipchat(self, message):
"""Send a message to HipChat.
:param message: The message to be published.
"""
url = ("{0}/rooms/message?format=json&room_id={1}&auth_token={2}" +
"&message={3}&from={4}&color=red").format(self._api_end_point,
self._room_name,
self._api_token,
message,
'Alarmageddon')
headers = {
"Content-Type": "application/json"
}
data = json.dumps({
"message": message,
"message_format": "text",
"color": "red"
})
logger.debug("Sending {} to {}".format(data, url))
resp = requests.post(url, data=data, headers=headers, stream=True)
if resp.status_code < 200 or resp.status_code >= 300:
raise PublishFailure(self, "{0} - {1}".format(message, resp.text))
def send(self, result):
"""sends a result to HipChat if the result is a Failure."""
if result.is_failure() and self.will_publish(result):
message = ("<b>(failed) Failure in {0}</b><br/><b>Test:</b> " +
"{1}<br/><b>Failed because:" +
"</b> {2}").format(self.environment,
result.test_name(),
result.description())
self._send_to_hipchat(message) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/hipchat.py | hipchat.py |
import os
import requests
import json
import collections
from alarmageddon.publishing.publisher import Publisher
from alarmageddon.publishing.exceptions import PublishFailure
import logging
logger = logging.getLogger(__name__)
FALLBACK_TEXT = "There were Alarmageddon failures"
def _get_collapsed_message(results):
"""Helper function to collapse similar failures together.
If several results have the same reason for failing, combine the
results to save space and cognitive load on users.
:param results: List of result objects.
"""
description = results[0].description()
names = [result.test_name() for result in results]
message = ("(failed) {0}\nDescription: {1}").format(", ".join(names),
description)
return message
class SlackPublisher(Publisher):
"""A Publisher that sends results to Slack.
Publishes all failures to the provided Slack room.
:param hook_url: The Slack Hook URL
:param priority_threshold: Will publish validations of this priority or
higher.
:param environment: The environment that tests are being run in.
"""
def __init__(self, hook_url, environment, priority_threshold=None):
logger.debug("Constructing publisher with url:{}, priority_threshold:{}, environment:{}"
.format(hook_url, priority_threshold, environment))
if not hook_url:
raise ValueError("hook_url parameter is required")
if not environment:
raise ValueError("environment parameter is required")
Publisher.__init__(self, "Slack",
priority_threshold=priority_threshold,
environment=environment)
self._hook_url = hook_url
def __str__(self):
return "Slack: {}".format(self._hook_url)
def send(self, result):
"""sends a result to Slack if the result is a faliure."""
if result.is_failure() and self.will_publish(result):
message = "(failed) Failure in {0}\nTest:{1}\nFailed because: {2}".format(
self.environment,
result.test_name(),
result.description())
message_text = self._build_message(
FALLBACK_TEXT,
self._get_jenkins_job_url(),
message)
self._send_to_slack(message_text)
def send_batch(self, results):
"""Send a batch of results to Slack.
Collapses similar failures together to save space.
"""
collapsed = collections.defaultdict(list)
errors = 0
for result in results:
if result.is_failure() and self.will_publish(result):
collapsed[result.description()].append(result)
errors += 1
if errors == 0:
return
message = "{0} failure(s) in {1}:\n".format(errors, self.environment)
message += "\n".join(_get_collapsed_message(collapsed_result)
for collapsed_result in list(collapsed.values()))
message_text = self._build_message(
FALLBACK_TEXT,
self._get_jenkins_job_url(),
message)
self._send_to_slack(message_text)
def _build_message(self, FALLBACK_TEXT, run_link, text):
pretext = "Alarmageddon run completed."
if run_link is not None:
pretext = "{} <{}|View Result>".format(pretext, run_link)
payload = {
"attachments": [
{
"fallback": FALLBACK_TEXT,
"author_name": "Alarmageddon",
"color": "danger",
"pretext": pretext,
"text": text,
"mrkdwn": True
}
]
}
return payload
def _send_to_slack(self, message):
"""Send a message to Slack.
:param message: The message to be published.
"""
headers = {
"Context-Type": "application/json"
}
data = json.dumps(message)
logger.info("Sending {} to {}".format(data, self._hook_url))
resp = requests.post(self._hook_url, data=data, headers=headers)
if resp.status_code < 200 or resp.status_code >= 300:
raise PublishFailure(self, "{0} - {1}".format(message, resp.text))
def _get_jenkins_job_url(self):
"""If we're running in jenkins use enviroment vars to
construct a job url. If we are not in running in jenkins
return None
"""
jenkins_host = os.environ.get('JENKINS_URL')
if jenkins_host is not None:
jenkins_job = os.environ.get('JOB_NAME')
jenkins_build = os.environ.get('BUILD_ID')
job_url = "{}job/{}/{}/console".format(
jenkins_host,
jenkins_job,
jenkins_build
)
return job_url
else:
return None | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/slack.py | slack.py |
class Publisher(object):
"""Base class for all test result publishers.
Publishers take test results and publish them to another service.
:param name: The name of this publisher.
:param priority_threshold: Will publish validations of this priority or
higher.
:param environment: The environment that tests are being run in.
"""
def __init__(self, name=None, priority_threshold=None,
environment=None):
self._name = name
self.priority_threshold = priority_threshold
self.environment = environment
def name(self):
"""Return the name of the publisher."""
return self._name
def send(self, result):
"""Publish a test result.
:param result: The :py:class:`~.result.TestResult` of a test.
"""
pass
def send_batch(self, results):
"""Publish a collection of test results.
Directly called by the :py:class:`~.reporter.Reporter` .
:param result: An iterable of :py:class:`~.result.TestResult` objects.
"""
for result in results:
self.send(result)
def __repr__(self):
return "Publisher: '{}'".format(self._name)
def will_publish(self, result):
"""Determine if the publisher will publish the result
To publish a result, the publisher must both be able to publish
(_can_publish) and have its priority threshold met (_should_publish).
:param result: The :py:class:`~.result.TestResult` of a test.
"""
return self._should_publish(result) and self._can_publish(result)
def _should_publish(self, result):
"""Determine if the publisher should publish the given result.
Whether or not a result should be published depends on its priority.
:param result: The :py:class:`~.result.TestResult` of a test.
"""
if self.priority_threshold is None:
return True
priority = result.priority
return self.priority_threshold <= priority
def _can_publish(self, result):
"""Determine if the publisher can publish the given result.
Whether or not a result can be published depends on if the publisher
requires extra information from the validation belonging to the result,
and whether or not that validation contains the needed information.
:param result: The :py:class:`~.result.TestResult` of a test.
"""
return True | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/publisher.py | publisher.py |
import statsd
from alarmageddon.publishing.publisher import Publisher
import logging
logger = logging.getLogger(__name__)
class GraphitePublisher(Publisher):
"""A Publisher that sends results to Graphite.
Logs the number of successes and failures, and potentially logs how long a
validation takes.
:param host: The graphite host.
:param port: The port that graphite is listening on.
:param failed_tests_counter: Name of the graphite counter for failed tests.
:param passed_tests_counter: Name of the graphite coutner for successful
tests.
:param prefix: Prefix applied to all graphite fields this publisher will
write to.
:param priority_threshold: Will publish validations of this priority or
higher.
:param environment: The environment that tests are being run in.
"""
def __init__(self, host, port,
failed_tests_counter='failed',
passed_tests_counter='passed',
prefix='alarmageddon',
priority_threshold=None,
environment=None):
if not host:
raise ValueError("host parameter is required")
logger.debug("Constructing publisher with host:{}, port:{}, failed counter:{},"
"passed counter:{}, prefix:{}, priority_threshold:{}, environment:{}"
.format(host, port, failed_tests_counter, passed_tests_counter,
prefix, priority_threshold, environment))
Publisher.__init__(self, "Graphite",
priority_threshold=priority_threshold,
environment=environment)
self._prefix = prefix
self._host = host
if port is not None:
self._port = int(port)
else:
self._port = None
self._failed_tests_counter = failed_tests_counter
self._passed_tests_counter = passed_tests_counter
self._graphite = statsd.StatsClient(
host=self._host, prefix=self._prefix, port=self._port)
def sanitize(self, text):
#graphite doesn't like colons
return text.replace(":",".")
def send(self, result):
"""Sends a result to Graphite.
Logs the result as either a success or a failure. Additionally,
logs how long the validation took, if a timer_name field is present on
the result.
"""
logger.debug("Checking if we should send {}".format(result))
if self.will_publish(result):
if result.is_failure():
logger.info("Sending {} to {}".format(result,
self._failed_tests_counter))
self._graphite.incr(self._failed_tests_counter)
else:
logger.info("Sending {} to {}".format(result,
self._passed_tests_counter))
self._graphite.incr(self._passed_tests_counter)
if result.timer_name:
logger.info("Sending {} to {}".format(result,
result.timer_name))
self._graphite.gauge(self.sanitize(result.timer_name), result.time)
def __repr__(self):
return "Graphite Publisher: {}:{} with prefix {} ({}/{}). {}".format(
self._host, self._port, self._prefix,
self._failed_tests_counter, self._passed_tests_counter,
self._graphite) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/graphite.py | graphite.py |
class PublishFailure(Exception):
"""An exception thrown when sending a test result to a publisher fails.
:param publisher: The publisher that failed to publish.
:param result: The result that failed to publish.
"""
def __init__(self, publisher, result):
Exception.__init__(self, "Exception while publishing a TestResult.")
self._publisher = publisher
self._result = result
def result(self):
"""Returns the result that could not be published."""
return self._result
def publisher(self):
"""Returns the publisher that could not be published to."""
return self._publisher
def __str__(self):
return repr(self)
def __repr__(self):
return "Could not publish {0} to {1}".format(self._result,
self._publisher)
class EnrichmentFailure(Exception):
"""An exception thrown when the enrichment of a validation fails.
:param publisher: The publisher the validation was enriched for.
:param validation: The validation that failed to be enriched.
:param values: The values that the validation was enriched with.
"""
def __init__(self, publisher, validation, values):
Exception.__init__(self, "Exception while enriching a Validation.")
self._publisher = publisher
self._validation = validation
self._values = values
try:
self._valid_values = self._values._enriched_data
except AttributeError:
self._valid_values = "Missing enriched data field on validation"
def validation(self):
"""Returns the validation that failed to enrich."""
return self._validation
def publisher(self):
"""Returns the publisher that the enrichment was for."""
return self._publisher
def values(self):
"""Returns the enrichment values."""
return self._values
def __str__(self):
return repr(self)
def __repr__(self):
return "Could not enrich {} with {} (for {}). Validation contained" +\
"these enriched fields at time of failure:" +\
"{}".format(self._validation,
self._values,
self._publisher,
self._valid_values) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/publishing/exceptions.py | exceptions.py |
from abc import abstractmethod
SECONDS_PER_MINUTE = 60
SECONDS_PER_HOUR = SECONDS_PER_MINUTE * 60
SECONDS_PER_DAY = SECONDS_PER_HOUR * 24
def _avg(readings):
"""Python 2.7 does not have an average function"""
return sum(readings, 0.0) / len(readings)
def _delta_str(delta):
"""Convert a timedelta to a nice string.
timedelta.__str__ prints out days and times awkwardly.
"""
days, rem = divmod(delta.total_seconds(), SECONDS_PER_DAY)
hours, rem = divmod(rem, SECONDS_PER_HOUR)
minutes, rem = divmod(rem, SECONDS_PER_MINUTE)
result = []
if days:
result.append('{0} day(s)'.format(days))
if hours:
result.append('{0} hour(s)'.format(hours))
if minutes:
result.append('{0} minute(s)'.format(minutes))
return ', '.join(result)
class GraphiteExpectation(object):
"""An expectation placed on a list of Graphte readings"""
def __init__(self, validation, name):
self._validation = validation
self._name = name
@abstractmethod
def validate(self, readings, time_range):
"""make sure the expectation is met"""
pass
def _validate(self, bad_readings, higher_values_are_worse):
"""Derived instances should call this method passing it any readings
that were outside of specified parameters.
"""
num_bad_readings = len(bad_readings)
if num_bad_readings:
bad_readings = list(set(bad_readings))
bad_readings.sort(reverse=higher_values_are_worse)
self._validation.fail(
"In the last {0} there were {1} readings that " +
"exceeded allowed parameters. For example: {2}"
.format(_delta_str(self._validation.time_range),
num_bad_readings,
', '.join([str(x) for x in bad_readings[:20]])))
def _validate_avg(self, average, is_bad_average):
"""Derived instances should call this method passing the average
reading and whether or not that average was within specified
parameters
"""
if is_bad_average:
self._validation.fail(
"In the last {0} the average reading was {1}"
.format(_delta_str(self._validation.time_range), average))
class GreaterThanExpectation(GraphiteExpectation):
"""Expect that a graphite metric is greater than a specified number"""
def __init__(self, validation, lower_bound):
GraphiteExpectation.__init__(self, validation,
"All values must be greater than {0}".format(lower_bound))
self._lower_bound = lower_bound
def validate(self, readings, time_range):
self._validate([x for x in readings if x is not None and
x <= self._lower_bound], False)
def __repr__(self):
return "{}: all > {} on {}".format(type(self).__name__, self._lower_bound, self._validation)
class LessThanExpectation(GraphiteExpectation):
"""Expect that a graphite metric is less than than a specified number"""
def __init__(self, validation, upper_bound):
GraphiteExpectation.__init__(self, validation,
"All values must be less than {0}".format(upper_bound))
self._upper_bound = upper_bound
def validate(self, readings, time_range):
self._validate([x for x in readings if x is not None and
x >= self._upper_bound], True)
def __repr__(self):
return "{}: all < {} on {}".format(type(self).__name__, self._upper_bound, self._validation)
class AverageGreaterThanExpectation(GraphiteExpectation):
"""Expect that the average of a graphite metric is greater than a
specified number
"""
def __init__(self, validation, lower_bound):
GraphiteExpectation.__init__(self, validation,
"Average of all values must be greater than {0}"
.format(lower_bound))
self._lower_bound = lower_bound
def validate(self, readings, time_range):
average = _avg([x for x in readings if x is not None])
self._validate_avg(average, average <= self._lower_bound)
def __repr__(self):
return "{}: average > {} on {}".format(type(self).__name__, self._upper_bound, self._validation)
class AverageLessThanExpectation(GraphiteExpectation):
"""Expect that the average of a graphite metric is less than a
specified number
"""
def __init__(self, validation, upper_bound):
GraphiteExpectation.__init__(self, validation,
"Average of all values must be less than {0}"
.format(upper_bound))
self._upper_bound = upper_bound
def validate(self, readings, time_range):
average = _avg([x for x in readings if x is not None])
self._validate_avg(average, average >= self._upper_bound)
def __repr__(self):
return "{}: average < {} on {}".format(type(self).__name__, self._upper_bound, self._validation) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/graphite_expectations.py | graphite_expectations.py |
from alarmageddon.validations.validation import Priority
from alarmageddon.validations.ssh import SshValidation
from alarmageddon.validations.utilities import format_node, format_cluster
import os
import re
import logging
logger = logging.getLogger(__name__)
# The output for Cassandra's nodetool status command has changed
# between versions. This new parser is designed to provide a more
# robust way of parsing the output of the nodetool status command. It
# has to make some assumptions about the output but it tries to make as
# few as possible.
#
# Assumptions:
# 1) There is a header line that lists all of the field names
# 2) The header line begins with a '-- '
# 3) All lines after the header describe nodes
# 4) All lines before the header can be ignored
# 5) Other assumptions I am even aware that I have made
#
# I'd like to illustrate how the output of the nodetool status command
# will be processed. Here is some example output for Cassandra
# version 2.0.9:
#
# xss = -ea -javaagent:/usr/share/cassandra/lib/jamm-0.2.5.jar -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -Xms10240m -Xmx10240m -Xmn2048m -XX:+HeapDumpOnOutOfMemoryError -Xss256k
# Note: Ownership information does not include topology; for complete information, specify a keyspace
# Datacenter: us-east
# ===================
# Status=Up/Down
# |/ State=Normal/Leaving/Joining/Moving
# -- Address Load Tokens Owns Host ID Rack
# UN 10.168.7.222 77.82 GB 256 19.9% 27600dd2-9ebf-4501-820c-37dec6ea2e33 1c
# UN 10.168.14.117 80.9 GB 256 20.5% 064fd4da-6af8-4647-826c-a68ba038bc8d 1b
# UN 10.168.4.76 64.07 GB 256 16.4% a5cc2101-4806-47d6-9228-5a4a45e047fc 1d
# UN 10.168.7.208 85.2 GB 256 21.9% c56f5b4a-4863-4a24-a2fd-ee3f82baebf8 1c
# UN 10.168.4.72 83.75 GB 256 21.2% dc8cbbdc-d95f-4836-884e-2e12f4adb13a 1d
#
# We ignore every line up until the header line (it starts with a '--'
# sequence). That leaves the following:
#
# -- Address Load Tokens Owns Host ID Rack
# UN 10.168.7.222 77.82 GB 256 19.9% 27600dd2-9ebf-4501-820c-37dec6ea2e33 1c
# UN 10.168.14.117 80.9 GB 256 20.5% 064fd4da-6af8-4647-826c-a68ba038bc8d 1b
# UN 10.168.4.76 64.07 GB 256 16.4% a5cc2101-4806-47d6-9228-5a4a45e047fc 1d
# UN 10.168.7.208 85.2 GB 256 21.9% c56f5b4a-4863-4a24-a2fd-ee3f82baebf8 1c
# UN 10.168.4.72 83.75 GB 256 21.2% dc8cbbdc-d95f-4836-884e-2e12f4adb13a 1d
#
# We now parse the header line to find the boundaries of each node's
# fields. Here is the header line:
#
# -- Address Load Tokens Owns Host ID Rack
#
# We assume that all header names are made up of non-space characters
# possibly delimited by single spaces. That ensures that 'Host ID' is
# treated as a single header name and not two. Once we have the boundaries:
#
# |-- |Address |Load |Tokens |Owns |Host ID |Rack|
#
# we can use those boundaries to start chopping up the remaining lines
# in the output and creating Nodes from them.
#
# +---+-------------+----------+-------+------+-------------------------------------+----+
# |-- |Address |Load |Tokens |Owns |Host ID |Rack|
# +---+-------------+----------+-------+------+-------------------------------------+----+
# |UN |10.168.7.222 |77.82 GB |256 |19.9% |27600dd2-9ebf-4501-820c-37dec6ea2e33 |1c |
# |UN |10.168.14.117|80.9 GB |256 |20.5% |064fd4da-6af8-4647-826c-a68ba038bc8d |1b |
# |UN |10.168.4.76 |64.07 GB |256 |16.4% |a5cc2101-4806-47d6-9228-5a4a45e047fc |1d |
# |UN |10.168.7.208 |85.2 GB |256 |21.9% |c56f5b4a-4863-4a24-a2fd-ee3f82baebf8 |1c |
# |UN |10.168.4.72 |83.75 GB |256 |21.2% |dc8cbbdc-d95f-4836-884e-2e12f4adb13a |1d |
# +---+-------------+----------+-------+------+-------------------------------------+----+
#
def _is_header_line(line):
"""Determines if line represents the headers for the nodetool status
output.
"""
return line.startswith('-- ')
def _is_data_center_line(line):
"""Determines if line introduces nodes from a Datacenter."""
return line.startswith('Datacenter: ')
def _parse_status(text):
"""receives a Node Status (e.g. 'U' or 'D') and returns the
corresponding Status code.
"""
if text:
return Status.from_text(text[0])
else:
return Status.UNKNOWN
def _parse_state(text):
"""receives a Node State (e.g. 'J', 'L', etc.) and returns the
corresponding State code.
"""
if text:
return State.from_text(text[1])
else:
return State.UNKNOWN
def _get_percent(text):
"""If text is formatted like '33.2%', remove the percent and convert
to a float. Otherwise, just convert to a float.
"""
if not text:
return None
if text.endswith('%'):
text = text[:-1]
return float(text.strip())
class Status(object):
"""An enum-like object that represents the status of a Cassandra Node
"""
UNKNOWN, UP, DOWN = list(range(3))
@staticmethod
def from_text(text):
if text == 'U':
return Status.UP
elif text == 'D':
return Status.DOWN
else:
return Status.UNKNOWN
@staticmethod
def to_text(value):
"""Convert Status to String"""
if value == Status.UP:
return 'Up'
elif value == Status.DOWN:
return 'Down'
else:
return 'Unknown'
class State(object):
"""An enum-like object that represents the state of a Cassandra Node
"""
UNKNOWN, NORMAL, LEAVING, JOINING, MOVING = list(range(5))
@staticmethod
def from_text(text):
if text == 'N':
return State.NORMAL
elif text == 'L':
return State.LEAVING
elif text == 'J':
return State.JOINING
elif text == 'M':
return State.MOVING
else:
return State.UNKNOWN
@staticmethod
def to_text(value):
"""Convert State to String"""
if value == State.NORMAL:
return 'Normal'
elif value == State.LEAVING:
return 'Leaving'
elif value == State.JOINING:
return 'Joining'
elif value == State.MOVING:
return 'Moving'
else:
return 'Unknown'
class Node(object):
"""Information about a Cassandra node including its load, what percent
of the ring it owns, its state, etc.
"""
def __init__(self, ip_address, status=Status.UNKNOWN,
state=State.UNKNOWN, load=None, tokens=None,
owns=None, host_id=None, rack=None):
self.ip_address = ip_address
self.status = status
self.state = state
self.load = load
self.tokens = tokens
self.owns = owns
self.host_id = host_id
self.rack = rack
def __str__(self):
return ("Address: %s, Status: %s, State: %s, Load: %s, " +
"Tokens: %d, Owns: %s, Host ID: %s, Rack: %s") % (
self.ip_address, Status.to_text(self.status),
State.to_text(self.state), self.load, self.tokens,
self.owns, self.host_id, self.rack)
class _Header(object):
"""Information about a field header."""
def __init__(self, name, start_pos, length, last_header=False):
self.name = name
self.start_pos = start_pos
self.length = length
self.last_header = last_header
def __str__(self):
return "%s (%s,%s)" % (self.name, self.start_pos, self.length)
class NodetoolStatusParser(object):
"""Parses the output of the Cassandra nodetool status command and
tries to make sense of it despite changes made to the format.
"""
def __init__(self):
self.__headers = []
def parse(self, status_output):
found_header = False
nodes = []
for line in status_output.split(os.linesep):
if _is_data_center_line(line):
found_header = False
self.__headers = None
elif _is_header_line(line):
found_header = True
self.__headers = self.__parse_headers(line)
elif found_header:
# If we've already parsed one node and we find a blank line, ignore the rest of the
# output because it's not information about nodes; it's some other text output that
# we won't parse at the moment.
if nodes and not line.strip():
break
nodes.append(self.__parse_node(line))
logger.info("Found these Cassandra nodes:{}".format(nodes))
return nodes
def __parse_headers(self, line):
headers = []
# All headers are deliminated by 2 or more spaces. Note the
# sentinel added to the end to simplify header processing
tokens = re.split(r"(\s{2,})", line) + ['']
name = None
start_pos = 0
for token in tokens:
if len(token) and not re.match(r'\s+', token):
name = token
else:
length = len(name) + len(token)
headers.append(_Header(name, start_pos, length,
len(token) == 0))
start_pos = start_pos + length
return headers
def __parse_node(self, line):
"""Parses a line and returns a Node object"""
node = Node(self.__get_ip_address(line),
self.__get_status(line),
self.__get_state(line),
self.__get_load(line),
self.__get_tokens(line),
self.__get_owns(line),
self.__get_host_id(line),
self.__get_rack(line))
return node
# These methods are broken out because different fields have different
# types and field names might change in a future release.
def __get_ip_address(self, line):
return self.__get_field('address', line)
def __get_status(self, line):
return _parse_status(self.__get_field('--', line))
def __get_state(self, line):
return _parse_state(self.__get_field('--', line))
def __get_load(self, line):
return self.__get_field('load', line)
def __get_tokens(self, line):
tokens = self.__get_field('tokens', line)
if tokens:
return int(tokens)
else:
return None
def __get_owns(self, line):
# The following Cassandra issue (https://issues.apache.org/jira/browse/CASSANDRA-10176) causes
# question mark characters (?) to appear in the 'Owns' column of nodetool status' output.
owns = self.__get_field('owns', line)
if owns == '?':
return None
else:
return _get_percent(owns)
def __get_host_id(self, line):
return self.__get_field('host id', line)
def __get_rack(self, line):
return self.__get_field('rack', line)
def __get_field(self, field_name, line):
header = self.__find_header(field_name)
if header:
if header.last_header:
# It's the last header so grab all the rest of the
# text on the line.
return line[header.start_pos:].strip()
else:
# It's not the last header so just grab as much text
# as the header length calls for.
return line[header.start_pos:header.start_pos +
header.length].strip()
else:
return None
def __find_header(self, name):
"""find a header by name case-insensitively"""
for header in self.__headers:
if header.name.lower() == name.lower():
return header
return None
class CassandraStatusValidation(SshValidation):
"""Validate that the Cassandra ring is within expected parameters.
Check that the specified Cassandra ring is in the specified
state and that the ring ownership of the nodes is within a certain
threshold.
:param ssh_contex: An SshContext class, for accessing the hosts.
:param service_state: The expected service state value (defaults to
"UN").
:param number_nodes: The expected number of cassandra nodes in the ring.
:param owns_threshold: The maximum percentage of the ring owned by a node.
:param priority: The Priority level of this validation.
:param timeout: How long to attempt to connect to the host.
:param hosts: The hosts to connect to.
:param cluster_name: the name of the cluster (helps when you're monitoring
multiple clusters. Defaults to 'anonymous'.
.. note:
This is not designed for multi region Cassandra clusters.
"""
def __init__(self, ssh_context, service_state="UN",
number_nodes=5, owns_threshold=40,
priority=Priority.NORMAL, timeout=None,
hosts=None, cluster_name='anonymous'):
SshValidation.__init__(self, ssh_context,
"Cassandra nodetool status",
priority=priority,
timeout=timeout, hosts=hosts)
# Service State is different from Service Status but I don't
# want to break backwards compatibility so we parse the
# service_state which is really made up of the service state
# and service status values.
if service_state and len(service_state) == 2:
self.service_status = Status.from_text(service_state[0])
self.service_state = State.from_text(service_state[1])
else:
self.service_state = State.UP
self.service_status = Status.NORMAL
self.number_nodes = number_nodes
self.owns_threshold = owns_threshold
self.cluster_name = cluster_name
def perform_on_host(self, connection):
"""Runs nodetool status and parses the output."""
output = connection.run('nodetool status', warn=True)
host = connection.host
if "Exception" in output:
self.fail_on_host(host, ("An exception occurred while " +
"checking Cassandra cluster health " +
"on {0} ({1})").format(
format_node(self.cluster_name, host),
output))
parsed = NodetoolStatusParser().parse(output)
self.check(host, parsed)
def check(self, host, nodes):
"""Compares the results of nodetool status to the expected results."""
#Number of nodes check
if len(nodes) < self.number_nodes:
self.fail_on_host(host,
("Cassandra cluster: {0} has {1} nodes but " +
"should have {2} nodes.").format(
format_cluster(self.cluster_name),
len(nodes), self.number_nodes))
# Validate each node's properties in nodetool's nodes
for node in nodes:
logger.debug("Checking node: {}".format(node))
# If a node is joining the cluster, don't check it for errors.
if node.state == State.JOINING:
continue
# check for state
if node.state != self.service_state:
self.fail_on_host(host, ("Cassandra node {0} is in " +
"state {1} but the expected state is {2}").format(
format_node(self.cluster_name, node.ip_address),
State.to_text(node.state),
State.to_text(self.service_state)))
# check for status
if node.status != self.service_status:
self.fail_on_host(host, ("Cassandra node {0} has " +
"status {1} but the expected status is {2}").format(
format_node(self.cluster_name, node.ip_address),
Status.from_text(node.status),
Status.from_text(self.service_status)))
# check for owns threshold
if node.owns is not None:
if node.owns > self.owns_threshold:
self.fail_on_host(host,
("Cassandra node {0} owns {1} " +
"percent of the ring which exceeds " +
"threshold of {2}").format(
format_node(self.cluster_name, node.ip_address),
node.owns,
self.owns_threshold)) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/cassandra.py | cassandra.py |
import time
import os
import requests
import copy
import six.moves.urllib.parse as urlparse
from alarmageddon.validations.validation import Validation, Priority
from alarmageddon.validations.json_expectations import \
ExpectedJsonValueLessThan, \
ExpectedJsonValueGreaterThan, \
ExpectedJsonEquality
from alarmageddon.validations.http_expectations import \
ExpectedContentType, \
ResponseExpectation, \
ExpectedHeader, \
ExpectContainsText, \
_ExpectedStatusCodes
import logging
logger = logging.getLogger(__name__)
class HttpValidation(Validation):
"""A Validation that executes an HTTP request and then performs zero or
more checks on the response.
"""
def __init__(self, method, url, data=None, headers=None,
priority=Priority.NORMAL, timeout=None,
group=None, retries=1, ignore_ssl_cert_errors=False,
auth=None):
"""Creates an HttpValidation object that will make an HTTP request to
the provided URL passing the provided headers.
"""
Validation.__init__(self, "{0} {1}".format(method, url),
priority=priority,
timeout=timeout,
group=group)
self._url = url
self._data = data
self._method = method
self._headers = copy.copy(headers) or {}
self._response_code_expectation = _ExpectedStatusCodes(set([200]))
self._expectations = []
self._retries = retries
self._ignore_ssl_cert_errors = ignore_ssl_cert_errors
self._auth = auth or ()
self._elapsed_time = -1
@staticmethod
def get(url, **kwargs):
"""Create an HttpValidation that will GET to the specified url passing
specified headers.
headers - a dictionary where each key is a header name and the
value that corresponds to the key is the header value.
priority - the priority of the call; this determines how
failures are routed.
timeout - the number of seconds the HTTP request is allowed to take.
group - the group to include this Validation in
"""
return HttpValidation("GET", url, **kwargs)
@staticmethod
def post(url, **kwargs):
"""Create an HttpValidation that will POST to the specified url passing
specified headers and payload.
headers - a dictionary where each key is a header name and the
value that corresponds to the key is the header value.
data - data that is sent along with the request
priority - the priority of the call; this determines how
failures are routed.
timeout - the number of seconds the HTTP request is allowed to take.
group - the group to include this Validation in
"""
return HttpValidation("POST", url, **kwargs)
@staticmethod
def put(url, **kwargs):
"""Create an HttpValidation that will PUT to the specified url passing
specified headers and payload.
headers - a dictionary where each key is a header name and the
value that corresponds to the key is the header value.
data - data that is sent along with the request
priority - the priority of the call; this determines how
failures are routed.
timeout - the number of seconds the HTTP request is allowed to take.
group - the group to include this Validation in
"""
return HttpValidation("PUT", url, **kwargs)
@staticmethod
def options(url, **kwargs):
"""Create an HttpValidation that will retrieve OPTIONS for the
specified url passing specified headers.
headers - a dictionary where each key is a header name and the
value that corresponds to the key is the header value.
priority - the priority of the call; this determines how
failures are routed.
timeout - the number of seconds the HTTP request is allowed to take.
group - the group to include this Validation in
"""
return HttpValidation("OPTIONS", url, **kwargs)
@staticmethod
def head(url, **kwargs):
"""Create an HttpValidation that will retrieve the HEAD of the
specified url passing specified headers.
headers - a dictionary where each key is a header name and the
value that corresponds to the key is the header value.
priority - the priority of the call; this determines how
failures are routed.
timeout - the number of seconds the HTTP request is allowed to take.
group - the group to include this Validation in
"""
return HttpValidation("HEAD", url, **kwargs)
def perform(self, group_failures):
"""Perform the HTTP request and validate the response."""
for i in range(self._retries):
logger.debug("Attempt {} for {} {}".format(i, self._method, self._url))
try:
resp = requests.request(
self._method, self._url, data=self._data,
headers=self._headers, verify=self._get_verify(),
auth=self._auth, timeout=self.timeout)
logger.debug("Got response {}".format(resp))
self._elapsed_time = resp.elapsed.total_seconds()
self._check_expectations(resp)
break
except Exception as ex:
if type(ex) is requests.exceptions.ReadTimeout:
self._elapsed_time = self.timeout
if i == self._retries - 1:
raise ex
time.sleep(1)
def get_elapsed_time(self):
return self._elapsed_time
def fail(self, reason):
"""Causes this HttpValidation to fail with the given reason."""
Validation.fail(self, reason)
def duplicate_with_hosts(self, host_names, port=None):
"""Returns a list of new HttpValidation that are identical to this
HttpValidation except with the host name replaced by the
elements of host_names.
"""
parts = urlparse.urlsplit(self._url)
# If no port is specified, see if the original URL has a port
# and use it.
if not port:
sub_parts = parts.netloc.split(':')
if len(sub_parts) == 2:
port = int(sub_parts[1])
results = []
for host_name in host_names:
if port:
host_name = "{0}:{1}".format(host_name, port)
modified_parts = urlparse.SplitResult(
scheme=parts.scheme,
netloc=host_name,
path=parts.path,
query=parts.query,
fragment=parts.fragment)
url = urlparse.urlunsplit(modified_parts)
result = HttpValidation(
self._method,
url,
data=self._data,
headers=copy.deepcopy(self._headers),
priority=self.priority,
timeout=self.timeout,
group=self.group,
retries=self._retries,
ignore_ssl_cert_errors=self._ignore_ssl_cert_errors)
for expectation in self._expectations:
result.add_expectation(expectation)
results.append(result)
return results
def timer_name(self):
parsed = urlparse.urlparse(self._url)
tokens = parsed.netloc.split(".")
tokens = [parsed.scheme] + tokens[::-1]
#[:1] to skip the first /
path = parsed.path[1:].split("/")
tokens.extend(path)
if parsed.query:
tokens.append(parsed.query)
tokens.append(self._method)
result = ".".join(tokens)
return result
def send_header(self, name, value):
"""adds an HTTP header with the specified name and value to the
request when it's sent
"""
self._headers[name] = value
return self
def add_expectation(self, expectation):
"""Add a custom expecation to the Validation"""
if isinstance(expectation, ResponseExpectation):
self._expectations.append(expectation)
return self
else:
raise ValueError("attempt to add expectation that does not" +
" derive from ResponseExpectation.")
def expect_header(self, name, value):
"""Add an expectation that the HTTP response will contain a header
with the specified name and value.
"""
self.add_expectation(ExpectedHeader(name, value))
return self
def expect_contains_text(self, text):
"""Add an expectation that the HTTP response will contain a particular
string.
"""
self.add_expectation(ExpectContainsText(text))
return self
def expect_status_codes(self, status_codes):
"""Add an expectation that the HTTP response will have one of the
specified status_codes.
"""
self._response_code_expectation = _ExpectedStatusCodes(status_codes)
return self
def expect_content_type(self, content_type):
"""Add an expectation that the HTTP response's content type will be
equal to the specified content_type.
"""
self.add_expectation(ExpectedContentType(content_type))
return self
def expect_json_property_value(self, json_property_path, expected_value):
"""Add an expectation that the HTTP response will be JSON and contain a
property (found by traversing json_property_path) with the
specified value.
"""
self.add_expectation(ExpectedJsonEquality(
json_property_path, expected_value))
return self
def expect_json_property_value_less_than(self, json_property_path,
less_than):
"""Add an expectation that the HTTP response will be JSON and contain
a numeric property (found by traversing json_property_path) less
than less_than.
"""
self.add_expectation(ExpectedJsonValueLessThan(
json_property_path, less_than))
return self
def expect_json_property_value_greater_than(self, json_property_path,
greater_than):
"""Add an expectation that the HTTP response will be JSON and contain
a numeric property (found by traversing json_property_path) greater
than greater_than.
"""
self.add_expectation(ExpectedJsonValueGreaterThan(
json_property_path, greater_than))
return self
def _check_expectations(self, response):
"""An HttpValidation without any expectations always fails"""
self._expectations.append(self._response_code_expectation)
if not self._expectations:
self.fail("no expectations set")
else:
for expectation in self._expectations:
expectation.validate(self, response)
def _get_verify(self):
"""returns the verify parameter we send to the HTTP requests request
method.
"""
if self._ignore_ssl_cert_errors:
return False
certificates_file = HttpValidation._get_certificates_file()
if certificates_file:
return certificates_file
else:
return True
@staticmethod
def _get_certificates_file():
"""returns the path to the certificates file (not needed on all
platforms)
"""
return os.getenv("CERTS_FILE")
def __str__(self):
return "HTTP Validation - method: {0}, url: {1}"\
.format(self._method, self._url)
def __repr__(self):
return "HTTP Validation - method: {0}, url: {1}"\
.format(self._method, self._url) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/http.py | http.py |
from alarmageddon.validations.validation import Priority
from alarmageddon.validations.ssh import SshValidation
import re
from collections import Counter
from alarmageddon.validations.utilities import format_node, format_cluster
import logging
logger = logging.getLogger(__name__)
class KafkaStatusValidation(SshValidation):
"""Validate that the Kafka cluster has all of it's partitions
distributed across the cluster.
:param ssh_contex: An SshContext class, for accessing the hosts.
:param zookeeper_nodes: Kafka zookeeper hosts and ports in CSV.
e.g. "host1:2181,host2:2181,host3:2181"
:param kafka_list_topic_command: Kafka command to list topics
(defaults to "/opt/kafka/bin/kafka-list-topic.sh")
:param priority: The Priority level of this validation.
:param timeout: How long to attempt to connect to the host.
:param hosts: The hosts to connect to.
:param cluster_name: the name of the cluster (helps when you're monitoring
multiple clusters. Defaults to 'anonymous'.
"""
def __init__(self, ssh_context,
zookeeper_nodes,
kafka_list_topic_command="/opt/kafka/bin/kafka-list-topic.sh",
priority=Priority.NORMAL, timeout=None,
hosts=None, cluster_name='anonymous'):
SshValidation.__init__(self, ssh_context,
"Kafka partition status",
priority=priority,
timeout=timeout,
hosts=hosts)
self.kafka_list_topic_command = kafka_list_topic_command
self.zookeeper_nodes = zookeeper_nodes
self.cluster_name = cluster_name
def perform_on_host(self, connection):
"""Runs kafka list topic command on host"""
host = connection.host
output = connection.run(
self.kafka_list_topic_command +
" --zookeeper " +
self.zookeeper_nodes, warn=True)
error_patterns = [
'No such file', 'Missing required argument', 'Exception']
if any(x in output for x in error_patterns):
self.fail_on_host(host, ("An exception occurred while " +
"checking Kafka cluster health " +
"on {0} ({1})").format(
format_node(self.cluster_name,
host),
output))
parsed = re.split(r'\t|\n', output)
topics = [parsed[i] for i in range(0, len(parsed), 5)]
leaders = [parsed[i] for i in range(2, len(parsed), 5)]
tuples = list(zip(topics, leaders))
duplicates = [x for x, y in list(Counter(tuples).items()) if y > 1]
if len(duplicates) != 0:
duplicates_str =", ".join("%s has %s" %
(dup[0], dup[1]) for dup in duplicates)
self.fail_on_host(host, ("Kafka partitions are out of sync " +
"on {0}. Multiple leaders for the same " +
"partition for the same replica: {1}").format(
format_cluster(self.cluster_name),
duplicates_str)
) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/kafka.py | kafka.py |
import re
from alarmageddon.validations.http_expectations import ResponseExpectation
class ExpectedJsonPredicate(ResponseExpectation):
"""An expectation that an HTTP response will be JSON and have a property
with a specified value.
"""
def __init__(self, json_property_path, value):
"""Creates an expectation that an HTTP response will have a JSON
payload with a property equal to the specified value.
"""
ResponseExpectation.__init__(self)
self.json_property_path = json_property_path
self.value = value
def validate(self, validation, response):
"""Validates that the HTTP response is JSON and that it contains a
property (found by traversing self.json_property_path) equal to
self.value
"""
try:
json = response.json()
except ValueError:
validation.fail(
"response body was not JSON: {0}, Status Code: {1}"
.format(response.text, response.status_code))
actual_value = _JsonQuery.find(json, self.json_property_path)
self.validate_value(validation, self.value, actual_value)
def validate_value(self, validation, expected_value, actual_value):
"""validates a JSON value"""
validation.fail("validate_value must be overriden by derived classes")
def __repr__(self):
return "{}: {} should be {}".format(type(self).__name__, self.json_property_path, self.value)
class ExpectedJsonEquality(ExpectedJsonPredicate):
"""expects that a JSON value is equal to a specified value"""
def __init__(self, json_property_path, value):
ExpectedJsonPredicate.__init__(self, json_property_path, value)
def validate_value(self, validation, expected_value, actual_value):
if type(actual_value) is list:
if expected_value not in actual_value:
validation.fail(
("expected JSON property {0}"
" to contain '{1}', actual value: '{2}'")
.format(
self.json_property_path, expected_value, actual_value))
else:
if actual_value is None or actual_value != expected_value:
validation.fail(
("expected JSON property {0}"
" to be '{1}', actual value: '{2}'")
.format(
self.json_property_path, expected_value, actual_value))
def __repr__(self):
return "{}: {} should be {}".format(type(self).__name__, self.json_property_path, self.value)
class ExpectedJsonValueLessThan(ExpectedJsonPredicate):
"""Expects that a numeric JSON value is less than a specified value"""
def __init__(self, json_property_path, value):
ExpectedJsonPredicate.__init__(self, json_property_path, value)
def validate_value(self, validation, expected_value, actual_value):
if type(actual_value) is list:
validation.fail(
"cannot use array wildcard with less than assertion")
elif actual_value is None:
validation.fail(
"missing JSON property {0}".format(self.json_property_path))
elif float(actual_value) >= float(expected_value):
validation.fail("expected JSON property {0} to be less "
"than {1:.2f} but it was {2:.2f}"
.format(self.json_property_path,
float(expected_value),
float(actual_value)))
def __repr__(self):
return "{}: {} < {}".format(type(self).__name__, self.json_property_path, self.value)
class ExpectedJsonValueGreaterThan(ExpectedJsonPredicate):
"""Expects that a numeric JSON value is greater than a specified value"""
def __init__(self, json_property_path, value):
ExpectedJsonPredicate.__init__(self, json_property_path, value)
def validate_value(self, validation, expected_value, actual_value):
if type(actual_value) is list:
validation.fail(
"cannot use array wildcard with greater than assertion")
elif actual_value is None:
validation.fail(
"missing JSON property {0}".format(self.json_property_path))
elif float(actual_value) <= float(expected_value):
validation.fail("expected JSON property {0} to be greater "
"than {1:.2f} but it was {2:.2f}"
.format(self.json_property_path,
float(expected_value),
float(actual_value)))
def __repr__(self):
return "{}: {} > {}".format(type(self).__name__, self.json_property_path, self.value)
INDEXED_ARRAY = re.compile(r"([^[]+)\[(\d+|\*)\]")
class _JsonQuery(object):
"""Simple JSON query executor"""
def __init__(self):
pass
# If your JSON response contains arrays, you can specify a particular
# element of that array with the following syntax:
#
# path.to.array[index]
#
# Example:
#
# person.address[0]
#
# This regular expression helps match property paths using this syntax.
@staticmethod
def find(json, property_path):
"""Finds a property by traversing self.json_property_path"""
root = json
try:
for path_elem in property_path.split('.'):
match = INDEXED_ARRAY.search(path_elem)
if match:
# We have an array property and the caller has specified
# which element of the array to validate against
name = match.group(1)
if match.group(2) == "*":
# caller has supplied a wildcard
# return the list element
root = root[name]
else:
index = int(match.group(2))
root = root[name][index]
else:
root = root[path_elem]
except Exception:
return None
return root | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/json_expectations.py | json_expectations.py |
import time
from alarmageddon.validations.validation import Validation, Priority
from pika import ConnectionParameters
from pika.adapters import BlockingConnection
from pika.credentials import PlainCredentials
from pika.exceptions import AMQPError
import logging
logger = logging.getLogger(__name__)
class RabbitMqContext(object):
"""information needed to connect and interact with RabbitMQ"""
def __init__(self, host, port, user_name, password):
self.host = host
self.port = port
self.user_name = user_name
self.password = password
def get_credentials(self):
"""get "plain" credentials based on this object's user name and
password
"""
return PlainCredentials(self.user_name, self.password)
def get_connection(self, timeout=None):
"""Connects to RabbitMQ and returns the connection object
Third Party (pika) Bug: https://github.com/pika/pika/issues/354 - Once
this bug is fixed we can take out our own retrying logic and use pika's
retry logic. In the mean time, connection failure messages will be
inaccurate; they'll say that only one connection attempt was made.
"""
return BlockingConnection(
ConnectionParameters(host=self.host,
credentials=self.get_credentials(),
connection_attempts=1,
retry_delay=0,
socket_timeout=timeout))
class RabbitMqValidation(Validation):
"""A Validation that can be held against a RabbitMQ server"""
def __init__(self, rabbitmq_context, name, queue_name, max_queue_size,
priority=Priority.NORMAL, timeout=None, num_attempts=4,
seconds_between_attempts=2, group=None,
ignore_connection_failure=False):
"""Creates a RabbitMqValidation object."""
Validation.__init__(self,
("queue '{0}' should have less than {1} messages in in " +
"it on RabbitMQ host: '{2}' ({3})")
.format(queue_name, max_queue_size, rabbitmq_context.host, name),
priority=priority, timeout=timeout, group=group)
self.rabbitmq_context = rabbitmq_context
self.max_queue_size = max_queue_size
self.queue_name = queue_name
self.num_attempts = num_attempts
self.seconds_between_attempts = seconds_between_attempts
self.ignore_connection_failure = ignore_connection_failure
def perform(self, group_failures):
"""Perform the validation. If the validation fails, call self.fail
passing it the reason for the failure.
"""
try:
(conn, chan) = self._connect()
except AMQPError as ex:
#if we're here we're intentionally ignoring the failure
return
try:
queue = chan.queue_declare(self.queue_name, passive=True)
message_count = queue.method.message_count
if message_count > self.max_queue_size:
self.fail("Too many messages in queue ({0} messages)."
.format(message_count))
except AMQPError as ex:
self.fail("RabbitMQ exception throw from host: {0}. {1}"
.format(self.rabbitmq_context.host, repr(ex)))
finally:
if conn:
conn.close()
def _connect(self):
"""connect to the RabbitMQ server"""
for attempt in range(1, self.num_attempts + 1):
try:
conn = self.rabbitmq_context.get_connection(self.timeout)
chan = conn.channel()
return (conn, chan)
except AMQPError as ex:
if attempt >= self.num_attempts:
if self.ignore_connection_failure:
raise ex
else:
self.fail(
"Could not access RabbitMQ host {0} because {1}"
.format(self.rabbitmq_context.host, repr(ex)))
else:
time.sleep(self.seconds_between_attempts) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/rabbitmq.py | rabbitmq.py |
from .exceptions import EnrichmentFailure, ValidationFailure
GLOBAL_NAMESPACE = "GLOBAL"
class Priority(object):
"""Priority levels that indicate how severe a validation failure is.
Validations have a priority that publishers use to determine whether
or not to publish in the case of failure.
"""
# If a LOW priority Validation fails, the failure be recorded for posterity
# but not reported to a human (e.g. logged in continuous integration server
# job history but not reported to IRC/HipChat)
LOW = 1
# If a NORMAL priority Validation fails, the failure should be reported to
# a human but it should not wake up a human (e.g. send a notification to
# IRC/HipChat, Graphite). Additionally the failure should be logged for
# posterity as in the case of LOW priority failures.
NORMAL = 2
# If a CRITICAL priority Validation fails, a human should be woken up
# (e.g. create an incident in PagerDuty). Additionally the failure should
# be more politely reported to humans (e.g. via IRC/HipChat, Graphite,
# etc.) and recorded for posterity as in the case of NORMAL and LOW
# priority failures.
CRITICAL = 3
@staticmethod
def string(priority):
"""Return the name of the priority (e.g. normal, low, critical)"""
if priority == Priority.NORMAL:
return "normal"
elif priority == Priority.LOW:
return "low"
elif priority == Priority.CRITICAL:
return "critical"
else:
return "unknown priority: {0}".format(priority)
class Validation(object):
"""The base class for validations.
The base class for all classes that represent some form of validation
(e.g. some expected system property that can be checked and categorized as
either passing or failing). Examples of Validations include: an HTTP
service returning an expected result in a specified amount of time, an
Upstart process on a Linux server is in the running state, a Message
Queue's queue length is lower than a maximum value.
:param name: The name of this validation.
:param priority: The :py:class:`.Priority` level of this validation.
:param timeout: How long this validation can take before being considered
a failure. If None, then the validation will never be considered a
failure due to timing out.
:param group: The group this validation belongs to.
"""
def __init__(self, name, priority=Priority.NORMAL,
timeout=None, group=None):
"""Creates a Validation object with the supplied name and priority.
Arguments:
name -- The name of this Validation
Keyword Arguments
priority -- The priority of this Validation.
timeout -- If this validation takes longer than this many seconds,
it will be considered a failure.
group -- The group this validation belongs to.
"""
self.name = name
self.priority = priority
self.timeout = timeout
self.group = group
#this should never be directly manipulated without very good reason
#it is used to store extra data for publishers, and the primary
#method of interaction should be the enric and get_enriched
#functions in publisher.py
self._enriched_data = {GLOBAL_NAMESPACE: {}}
#determines the partial ordering of the validations
#Alarmageddon guarantees that all Validations with lower order than
#this Validation's order will run before this Validation runs.
#most validations have no reason to change this
self.order = 0
def perform(self, group_failures):
"""Perform the validation.
If the validation fails, call self.fail passing it the reason for
the failure.
:param kwargs: A dictionary containing information from the whole
Alarmageddon run.
"""
pass
def fail(self, reason):
"""Log the validation as a failure.
:param reason: The cause of the failure.
:param stack_track: Whether or not to include a stack trace in the
result.
"""
raise ValidationFailure(reason)
def get_elapsed_time(self):
"""Return the amount of time this validation took.
The :py:class:`.reporter.Reporter` will check here before using
the call time.
Overwrite this if you need more precise timing -
eg, if you want to know how long an http request took, as opposed
to how long that whole test took to execute.
This function should return a number, not a timedelta.
"""
raise NotImplementedError
def __str__(self):
return "Validation {{ name: '{0}' priority: '{1}' timeout: {2}}}"\
.format(self.name,
Priority.string(self.priority),
self.timeout)
def timer_name(self):
"""Return the name of the timer that corresponds to this validation.
Used to indicate where a publisher should log the time taken.
"""
return None
def enrich(self, publisher, values, force_namespace=False):
"""Adds publisher-specific information to the validation.
Override at your own peril! Publishers are expected to assume the
standard behavior from this function.
:param publisher: The publisher to add enriched data for.
:param values: The enriched data to add to this validation.
:param force_namespace: If True, will never add the data to the global
namespace.
"""
namespace = str(type(publisher))
enriched = self._enriched_data
if namespace in enriched:
raise EnrichmentFailure(publisher, self, values)
enriched[namespace] = {}
for key, value in list(values.items()):
if force_namespace:
enriched[namespace][key] = value
else:
if key not in enriched[GLOBAL_NAMESPACE]:
enriched[GLOBAL_NAMESPACE][key] = value
else:
enriched[namespace][key] = value
return self
def get_enriched(self, publisher, force_namespace=False):
"""Retrieve the appropriate publisher-specific data.
Will retrieve all global enriched data along with any extra
publisher specific data. This means that if you enrich a
validation for more than one publisher, this function may
return a superset of the enriched data for a given publisher.
Override at your own peril! Publishers are expected to assume the
standard behavior from this function.
:param publisher: The publisher to retrieve enriched data for.
:param force_namespace: If True, will not retrieve global enrichments.
"""
namespace = str(type(publisher))
enriched = self._enriched_data
#copy global
data = {}
if not force_namespace:
data.update(enriched[GLOBAL_NAMESPACE])
try:
data.update(enriched[namespace])
except KeyError:
pass
return data
class GroupValidation(Validation):
"""A validation that checks the number of failures in a test group.
The priority level will be set dynamically based on the number of
failures and the supplied thresholds.
:param name: The name of this validation.
:param checked_group: The name of the group this validation will check.
:param low_threshold: The number of failures at which this validation
will itself fail.
:param normal_threshold: The number of failures at which this validation
will become NORMAL priority.
:param critical_threshold: The number of failures at which this validation
will become CRITICAL priority.
:param order: This validation will run after all validations of lower
order have run. Used when order matters - eg, creating a GroupValidation
for a group of GroupValidations.
:param group: The group this validation belongs to.
"""
def __init__(self, name, checked_group, low_threshold=float("inf"),
normal_threshold=float("inf"),
critical_threshold=float("inf"),
order=1, group=None):
Validation.__init__(self,
name, priority=Priority.LOW, timeout=None, group=group)
self.low_threshold = low_threshold
self.normal_threshold = normal_threshold
self.critical_threshold = critical_threshold
self._clean_thresholds()
self.order = order
self.checked_group = checked_group
def _clean_thresholds(self):
"""Ensure that the thresholds are consistent.
`low_threshold` must be less than `normal_threshold` which must be
less than `critical_threhsold`. If necessary, this function will alter
the thresholds to ensure this condition is met.
"""
if self.normal_threshold > self.critical_threshold:
self.normal_threshold = self.critical_threshold
if self.low_threshold > self.normal_threshold:
self.low_threshold = self.normal_threshold
def perform(self, group_failures):
"""Perform the validation."""
failures = len(group_failures[self.checked_group])
messages = group_failures[self.checked_group]
if failures >= self.low_threshold:
self._set_priority(failures)
self.fail("Group {0} had {1} failures! \n{2}".format(
self.checked_group, failures, messages))
def _set_priority(self, failures):
"""Set priority of this validation based on the number of failures.
:param failures: The number of failures in this validation's checked
group.
"""
if failures >= self.critical_threshold:
self.priority = Priority.CRITICAL
elif failures >= self.normal_threshold:
self.priority = Priority.NORMAL
else:
self.priority = Priority.LOW | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/validation.py | validation.py |
import datetime
import requests
from alarmageddon.validations.validation import Validation
from alarmageddon.validations.graphite_expectations import \
LessThanExpectation, \
GreaterThanExpectation, \
AverageLessThanExpectation, \
AverageGreaterThanExpectation
import logging
import six
logger = logging.getLogger(__name__)
class GraphiteContext(object):
"""Create one of these and then pass it to all of the
GraphiteValidation objects you create.
"""
def __init__(self, graphite_host):
"""Creates a GraphiteContext object"""
self._graphite_host = graphite_host
def get_graphite_host(self):
"""returns the Graphite host name"""
return self._graphite_host
def __repr__(self):
return "{}: {}".format(type(self).__name__, self._graphite_host)
class GraphiteValidation(Validation):
"""A Validation that queries Graphite for data and then validates any
defined expecations against that data.
"""
def __init__(self, context, name, metric_name,
time_range=datetime.timedelta(hours=1),
**kwargs):
"""Creates a GraphiteValidation object"""
Validation.__init__(self, name, **kwargs)
self._context = context
self.time_range = time_range
self.metric_name = metric_name
self._expectations = []
def perform(self, group_failures):
"""Perform the validation and propagate any failures to reporters"""
readings = self._get_readings()
if len(readings) == 0:
self.fail("No readings for {0} were found"
.format(self.metric_name))
for expectation in self._expectations:
expectation.validate(readings, self.time_range)
def fail(self, reason):
"""Causes this GraphiteValidation to fail with the given reason."""
Validation.fail(self, reason)
def expect_average_in_range(self, lower_bound, upper_bound):
"""The average reading of the specified time range should fall between
the upper and lower bound
"""
self.expect_average_less_than(upper_bound)
self.expect_average_greater_than(lower_bound)
return self
def expect_in_range(self, lower_bound, upper_bound):
"""All readings in the specified time range should fall between the
upper and lower bound
"""
self.expect_greater_than(lower_bound)
self.expect_less_than(upper_bound)
return self
def expect_less_than(self, upper_bound):
"""All readings in the specified time range should fall below the
upper bound
"""
self._expectations.append(LessThanExpectation(self, upper_bound))
return self
def expect_average_less_than(self, upper_bound):
"""The average reading of the specified time range should fall below
the upper bound
"""
self._expectations.append(AverageLessThanExpectation(self,
upper_bound))
return self
def expect_greater_than(self, lower_bound):
"""All readings in the specified time range should fall above the
lower bound
"""
self._expectations.append(GreaterThanExpectation(self, lower_bound))
return self
def expect_average_greater_than(self, lower_bound):
"""The average reading of the specified time range should fall above
the lower bound
"""
self._expectations.append(
AverageGreaterThanExpectation(self, lower_bound))
return self
def _build_url(self):
"""Builds the URL for retrieving Graphite data for a metric"""
return "{0}/render/?target={1}&format=raw&from=-{2}seconds"\
.format(self._context.get_graphite_host(),
self.metric_name,
self.time_range.total_seconds())
def _get_readings(self):
"""Return a list of readings for the metric as a list of floats and/or
None values. A None reading means no data was sent to
Graphite for that time period.
"""
url = self._build_url()
logger.debug("Hitting graphite server at {}".format(url))
resp = requests.get(url)
logger.debug("Graphite response: {}".format(resp))
if resp.status_code < 200 or resp.status_code >= 300:
self.fail(("Could not get data from Graphite. " +
"URL: {0}, Metric: {1}, Status Code: {2}," +
"Response: {3}").format(url, self.metric_name,
resp.status_code, resp.text))
chunks = resp.text.strip().split('|')
if len(chunks) == 2:
readings = []
for tok in chunks[1].split(','):
if tok == six.u('None'):
readings.append(None)
else:
readings.append(float(tok))
return readings
else:
self.fail("Unexpected response from Graphite: {0}"
.format(resp.text)) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/graphite.py | graphite.py |
class ResponseExpectation(object):
"""An expectation placed on an HTTP response."""
def __init__(self):
pass
def validate(self, validation, response):
"""If the expectation is met, do nothing. If the expectation is
not met, call validation.fail(...)
"""
pass
class _ExpectedStatusCodes(ResponseExpectation):
"""An expectation about an HTTP response's status code"""
def __init__(self, status_codes):
"""Create an ExpectedStatusCodes object that expects the HTTP
response's status code to be one of the elements in status_codes.
"""
ResponseExpectation.__init__(self)
self.status_codes = status_codes
def validate(self, validation, response):
"""This expectation is met if the HTTP response code is one of the
elements of self.status_codes
"""
if response.status_code not in self.status_codes:
string_code = ' or '.join(str(status)
for status in self.status_codes)
if len(string_code) > 33:
string_code = string_code[:34] + "..."
validation.fail(
"expected status code: {0}, actual status code: {1} ({2})"
.format(string_code, response.status_code, response.reason))
def __repr__(self):
return "{}: Code {}".format(type(self).__name__, self.status_codes)
class ExpectContainsText(ResponseExpectation):
"""An expectation that an HTTP response will include some text."""
def __init__(self, text):
"""Creates an ExpectContainsText object that expects the HTTP response
text to contain the specified text.
"""
ResponseExpectation.__init__(self)
self.text = text
def validate(self, validation, response):
if not self.text in response.text:
validation.fail("could not find '{0}' in response body: '{1}'"
.format(self.text, response.text))
def __repr__(self):
return "{}: expect {}".format(type(self).__name__, self.text)
class ExpectedHeader(ResponseExpectation):
"""An expectation that an HTTP response will include a header with a
specific name and value.
"""
def __init__(self, name, value):
"""Creates an ExpectedHeader object."""
ResponseExpectation.__init__(self)
self.name = name
self.value = value
def validate(self, validation, response):
if self.name not in response.headers:
validation.fail("No header named: '{0}'. Found header names: {1}"
.format(self.name,
', '.join(list(response.headers.keys()))))
elif self.value != response.headers[self.name]:
validation.fail(
"The value of the '{0}' header is '{1}', expected '{2}'"
.format(self.name, response.headers[self.name], self.value))
def __repr__(self):
return "{}: {} should be {}".format(type(self).__name__, self.name, self.value)
class ExpectedContentType(ExpectedHeader):
"""An expectation that an HTTP response will have
a particular content type
"""
def __init__(self, content_type):
ExpectedHeader.__init__(self, "Content-Type", content_type) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/http_expectations.py | http_expectations.py |
import sys
import os
import time
import re
import pytest
import warnings
import paramiko
from fabric import Connection
from alarmageddon.validations.validation import Validation, Priority
import logging
logger = logging.getLogger(__name__)
class SshContext(object):
"""Context that SSH commands execute in: the user and the user's key file.
Note that the list of hosts is not part of the SshContext because it
changes at a very high rate compared to the user name and their key file.
"""
def __init__(self, user, key_file):
"""Creates an SshContext object"""
if not user:
raise ValueError("user parameter is required")
if not key_file:
raise ValueError("key_file parameter is required")
if not os.path.exists(os.path.expanduser(key_file)):
raise ValueError("key_file: {0} does not exist".format(key_file))
self.user = user
self.key_file = key_file
def __str__(self):
"""return a string representation of an SshContext object"""
return "SSH Context {{ User: {0}, Key File: {1} }}"\
.format(self.user, self.key_file)
def __repr__(self):
return "{}: {} {}".format(type(self).__name__, self.user, self.key_file)
class SshValidation(Validation):
"""A Validation that is performed using SSH (more specifically, fabric)"""
def __init__(self, ssh_context, name,
priority=Priority.NORMAL, timeout=None,
group=None, connection_retries=0,
hosts=None):
"""Creates an SshValidation object"""
Validation.__init__(self, name, priority, timeout, group=group)
self.context = ssh_context
if hosts is not None:
self.hosts = hosts
else:
self.hosts = []
self.expectations = []
self.retries = connection_retries
self._exit_code_expectation = _ExitCodeEquals(self, 0)
def add_hosts(self, hosts):
"""Add additional hosts to run validations against"""
warnings.warn("Add hosts in the constructor rather than through this" +
" method", FutureWarning)
self.hosts.extend(hosts)
return self
def perform(self, group_failures):
"""Perform validation against all of this object's hosts"""
if not self.hosts:
self.fail("no hosts specified.")
ssh_kwargs = {"key_filename": self.context.key_file}
#now we can add our default expectation
self.expectations.append(self._exit_code_expectation)
for host in self.hosts:
with Connection(host=host, user=self.context.user, connect_kwargs=ssh_kwargs) as connection:
for i in range(self.retries + 1):
try:
self.perform_on_host(connection)
break
except paramiko.SSHException as ex:
# TODO: Paramiko doesn't surface a separate sort of exception
# for timeouts like fabric1 did. This probably needs more logic
# to not catch issues that could allow for retrying
# we connected, so don't retry
self.fail_on_host(
host,
"SSH Command timed out: {0}".format(str(ex)))
except Exception as ex:
if i >= self.retries:
self.fail_on_host(
host,
"SSH Command Exception: {0}"
.format(str(ex)))
time.sleep(2)
def fail_on_host(self, host, reason):
"""signal failure the test on a particular host"""
self.fail("[{0}] {1}".format(host, reason))
def perform_on_host(self, connection):
"""perform a validation against a particular host"""
self.fail_on_host(
connection.host, "perform_on_host must be overriden by derived classes")
def add_expectation(self, expectation):
"""Adds an expectation deriving from SshCommandExpectation to the list
of expectations to be performed as part of the validation.
"""
if isinstance(expectation, SshCommandExpectation):
self.expectations.append(expectation)
return self
else:
raise ValueError("attempt to add expectation that does not" +
" derive from SshCommandExpectation.")
def expect_exit_code(self, exit_code):
"""Add the expectation that the SSH command's exit code is equal
to exit_code
"""
self._exit_code_expectation = _ExitCodeEquals(self, exit_code)
return self
def expect_output_contains(self, text):
"""Add the expectation that the SSH command's output contains text"""
self.add_expectation(OutputContains(self, text))
return self
def expect_output_does_not_contain(self, text):
"""Add the expectation that the SSH command's output does not
contain text
"""
self.add_expectation(OutputDoesNotContain(self, text))
return self
class SshCommandValidation(SshValidation):
"""A validation that runs a command and checks zero or more expectations
against its exit code and/or output.
"""
def __init__(self, ssh_context, name, command, working_directory=None,
environment=None, priority=Priority.NORMAL, use_sudo=False,
timeout=None, connection_retries=0, group=None, hosts=None):
SshValidation.__init__(self,
ssh_context,
name,
priority=priority,
timeout=timeout,
connection_retries=connection_retries,
group=group,
hosts=hosts)
self.command = command
self.working_directory = working_directory # Not supported yet
self.environment = environment or {} # Not supported yet
self.use_sudo = use_sudo
self.expectations = []
def perform_on_host(self, connection):
"""Runs the SSH Command on a host and checks to see if all expectations
are met.
"""
if self.use_sudo:
output = connection.sudo(self.command, err_stream=sys.stdout,
timeout=self.timeout, warn=True)
else:
output = connection.run(self.command, err_stream=sys.stdout,
timeout=self.timeout, warn=True)
logger.info("Got output {} from host {}".format(output, connection.host))
exit_code = output.return_code
for expectation in self.expectations:
expectation.validate(self, connection.host, output.stdout, exit_code)
class UpstartServiceValidation(SshCommandValidation):
"""Validates that the specified upstart process is in the specified state
(e.g. running)
"""
def __init__(self, ssh_context, service_name, service_state="running",
priority=Priority.NORMAL, timeout=None, group=None,
hosts=None):
SshCommandValidation.__init__(self,
ssh_context,
"{0} service should be {1}"
.format(service_name, service_state),
"status {0}".format(service_name),
use_sudo=True, timeout=None,
group=group,
hosts=hosts)
self.expect_output_contains(service_state)
class LoadAverageValidation(SshValidation):
"""Validates that a server's load average falls within a set of
parameters
"""
def __init__(self, ssh_context, priority=Priority.NORMAL, timeout=None,
group=None, hosts=None):
"""Creates an SshLoadAverageValidation object"""
SshValidation.__init__(self, ssh_context, "load average",
priority=priority, timeout=None, group=group,
hosts=hosts)
# If a limit is None, then there is no limit.
self.limits = {
1: {'min': None, 'max': None},
5: {'min': None, 'max': None},
15: {'min': None, 'max': None}
}
def expect_min_1_minute_load(self, min_load):
"""expect a minimum 1 minute load"""
self.limits[1]['min'] = min_load
return self
def expect_min_5_minute_load(self, min_load):
"""expect a minimum 5 minute load"""
self.limits[5]['min'] = min_load
return self
def expect_min_15_minute_load(self, min_load):
"""expect a minimum 15 minute load"""
self.limits[15]['min'] = min_load
return self
def expect_max_1_minute_load(self, max_load):
"""expect a maximum 1 minute load"""
self.limits[1]['max'] = max_load
return self
def expect_max_5_minute_load(self, max_load):
"""expect a maximum 5 minute load"""
self.limits[5]['max'] = max_load
return self
def expect_max_15_minute_load(self, max_load):
"""expect a maximum 15 minute load"""
self.limits[15]['max'] = max_load
return self
def perform_on_host(self, connection):
"""Runs the SSH Command on a host and checks to see if all expectations
are met.
"""
(load_1, load_5, load_15) = SshCommands.get_uptime(connection)
self.check(connection.host, 1, load_1)
self.check(connection.host, 5, load_5)
self.check(connection.host, 15, load_15)
def check(self, host, minutes, load):
"""Make sure that the n-minute load average for the given host is
within the allowed range.
"""
# Check if the maximum was exceeded (if it was defined)
if self.limits[minutes]['max'] and load > self.limits[minutes]['max']:
self.fail_on_host(host,
("{0} minute load too high on {1}. " +
"Maximum Load: {2:.2f}, Current Load: {3:.2f}")
.format(minutes, host,
self.limits[minutes]['max'],
load))
# Check if the Minimum was exceeded (if it was defined)
if self.limits[minutes]['min'] and load < self.limits[minutes]['min']:
self.fail_on_host(host,
("{0} minute load too low on {1}. " +
"Minimum Load: {2:.2f}, Current Load: {3:.2f}")
.format(minutes, host,
self.limits[minutes]['min'], load))
def add_expectation(self, expectation):
raise NotImplementedError
def expect_exit_code(self, exit_code):
raise NotImplementedError
class SshCommandExpectation(object):
"""Base class for expectations that can be placed on an SshValidation"""
def __init__(self, validation):
self.validation = validation
def validate(self, validation, host, command_output, exit_code):
"""Defined by derived classes"""
pass
def fail_on_host(self, host, reason):
"""Report a failure and the host the failure occurred on"""
self.validation.fail_on_host(host, reason)
def __repr__(self):
return "{}: on {}".format(type(self).__name__, self.validation)
class _ExitCodeEquals(SshCommandExpectation):
"""Expects that the exit code of an SSH command is equal to a
specific value
"""
def __init__(self, validation, exit_code):
SshCommandExpectation.__init__(self, validation)
self.exit_code = exit_code
def validate(self, validation, host, command_output, exit_code):
if exit_code != self.exit_code:
self.fail_on_host(host,
"Exit Code should have been {0} but was {1}"
.format(self.exit_code, exit_code))
def __repr__(self):
return "{}:Code {} on {}".format(type(self).__name__, self.exit_code, self.validation)
class OutputContains(SshCommandExpectation):
"""Expects that the output of an SSH command is contains specified text"""
def __init__(self, validation, text):
SshCommandExpectation.__init__(self, validation)
self.text = text
def validate(self, validation, host, command_output, exit_code):
if self.text not in command_output:
self.fail_on_host(host,
("Command output should contain: '{0}'. " +
"Output: '{1}'")
.format(self.text, command_output))
def __repr__(self):
return "{}:Expect {} on {}".format(type(self).__name__, self.text, self.validation)
class OutputDoesNotContain(SshCommandExpectation):
"""Expects that the output of an SSH command does not contain
specified text
"""
def __init__(self, validation, text):
SshCommandExpectation.__init__(self, validation)
self.text = text
def validate(self, validation, host, command_output, exit_code):
if self.text in command_output:
self.fail_on_host(host,
("Command output should not contain: '{0}'. " +
"Output: '{1}'")
.format(self.text, command_output))
def __repr__(self):
return "{}:Do not expect {} on {}".format(type(self).__name__, self.text, self.validation)
class OutputLessThan(SshCommandExpectation):
"""Expects that the output of an SSH command is less than the
specified value. This method casts the command_output string to a float
to do the comparison.
"""
def __init__(self, validation, value):
SshCommandExpectation.__init__(self, validation)
self.value = value
def validate(self, validation, host, command_output, exit_code):
command_output_as_float = float(command_output)
if not command_output_as_float < self.value:
self.fail_on_host(host,
"Command output greater than or equal to " +
"expected: '{0}'. Output: '{1}'"
.format(self.value, str(command_output)))
def __repr__(self):
return "{}:Value < {} on {}".format(type(self).__name__, self.value, self.validation)
class OutputGreaterThan(SshCommandExpectation):
"""Expects that the output of an SSH command is greater than the
specified value. This method casts the command_output string to a float
to do the comparison.
"""
def __init__(self, validation, value):
SshCommandExpectation.__init__(self, validation)
self.value = value
def validate(self, validation, host, command_output, exit_code):
command_output_as_float = float(command_output)
if not command_output_as_float > self.value:
self.fail_on_host(host,
"Command output was less than or equal to " +
"expected: '{0}'. Output: '{1}'"
.format(self.value, command_output))
def __repr__(self):
return "{}:Value > {} on {}".format(type(self).__name__, self.value, self.validation)
UPTIME_REGEX = re.compile(r"load average: (\d+\.\d+), (\d+\.\d+), (\d+\.\d+)")
class SshCommands(object):
"""Some commands that might be helpful"""
def __init__(self):
pass
@staticmethod
def get_cpu_count(connection):
"""return the number of processors on the server"""
return int(connection.run("grep processor /proc/cpuinfo | wc -l"), warn=True)
@staticmethod
def get_uptime(connection):
"""return the system uptime"""
output = connection.run("uptime", warn=True)
match = UPTIME_REGEX.search(output)
if match:
return (float(match.group(1)),
float(match.group(2)),
float(match.group(3)))
else:
pytest.fail("Could not get uptime. Command output was: {0}"
.format(output)) | Alarmageddon | /Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/ssh.py | ssh.py |
# Bert-BiLSTM-CRF-pytorch
使用谷歌预训练bert做字嵌入的BiLSTM-CRF序列标注模型
本模型使用谷歌预训练bert模型(https://github.com/google-research/bert),
同时使用pytorch-pretrained-BERT(https://github.com/huggingface/pytorch-pretrained-BERT)
项目加载bert模型并转化为pytorch参数,CRF代码参考了SLTK(https://github.com/liu-nlper/SLTK)
准备数据格式参见data
模型参数可以在config中进行设置
运行代码
python main.py train --use_cuda=False --batch_size=10
预测
python main.py test --load_path result/pytorch_model.bin
python main.py test --load_path result/pytorch_model.bin --use_cuda=False
python main.py test --load_path data/bert/run
albert 版本
<!-- 大约6.5g内存 -->
<!-- 使用base模型 -->
<!-- 下载模型 https://www.kaggle.com/terrychanorg/pytorch-albert-zh -->
python al_main.py train --use_cuda=False --batch_size=50 --base_epoch=1
python al_main.py test --load_path result/pytorch_model.bin --use_cuda=False --load_model=Ture
预测
python al_main.py pre_one --text "柯基犬真是和牛逼"
pytorch.bin 百度网盘链接 链接:https://pan.baidu.com/s/160cvZXyR_qdAv801bDY2mQ 提取码:q67r
作者也是新手,很希望看到的大家能够提意见,共同学习
| AlbertMarker | /AlbertMarker-0.0.1.1.tar.gz/AlbertMarker-0.0.1.1/README.md | README.md |
import torch
import os
import datetime
import unicodedata
import json
class InputFeatures(object):
def __init__(self, input_id, label_id, input_mask):
self.input_id = input_id
self.label_id = label_id
self.input_mask = input_mask
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def read_corpus(path, max_length, label_dic, vocab):
"""
:param path:数据文件路径
:param max_length: 最大长度
:param label_dic: 标签字典
:return:
"""
# file = open(path, encoding='utf-8')
# content = file.readlines()
# file.close()
result = []
# with open(path, 'r', encoding = 'utf-8') as f:
tjson=Tjson(file_path=path)
for line in tjson.load():
# text, label = line.strip().split('|||')
# tokens = text.split()
# label = label.split()
# print(line)
tokens=line['text']
label=line['label']
if len(tokens) > max_length-2:
tokens = tokens[0:(max_length-2)]
label = label[0:(max_length-2)]
tokens_f =['[CLS]'] + tokens + ['[SEP]']
label_f = ["<start>"] + label + ['<eos>']
input_ids = [int(vocab[i]) if i in vocab else int(vocab['[UNK]']) for i in tokens_f]
label_ids = [label_dic[i] for i in label_f]
input_mask = [1] * len(input_ids)
while len(input_ids) < max_length:
input_ids.append(0)
input_mask.append(0)
label_ids.append(label_dic['<pad>'])
assert len(input_ids) == max_length
assert len(input_mask) == max_length
assert len(label_ids) == max_length
feature = InputFeatures(input_id=input_ids, input_mask=input_mask, label_id=label_ids)
result.append(feature)
return result
class InputPreFeatures(object):
def __init__(self, input_id, input_mask):
self.input_id = input_id
# self.label_id = label_id
self.input_mask = input_mask
def build_input(content, max_length, vocab):
"""
:param content: 输入句子列表
:param max_length: 最大长度
:return:
"""
# file = open(path, encoding='utf-8')
# content = file.readlines()
# file.close()
result = []
for line in content:
# text, label = line.strip().split('|||')
tokens = line.split()
# label = label.split()
if len(tokens) > max_length-2:
tokens = tokens[0:(max_length-2)]
# label = label[0:(max_length-2)]
# print(tokens)
tokens_f =['[CLS]'] + tokens + ['[SEP]']
# print('tokens_f',tokens_f)
# label_f = ["<start>"] + label + ['<eos>']
input_ids = [int(vocab[i]) if i in vocab else int(vocab['[UNK]']) for i in tokens_f]
# label_ids = [label_dic[i] for i in label_f]
input_mask = [1] * len(input_ids)
while len(input_ids) < max_length:
input_ids.append(0)
input_mask.append(0)
# label_ids.append(label_dic['<pad>'])
assert len(input_ids) == max_length
assert len(input_mask) == max_length
# assert len(label_ids) == max_length
feature = InputPreFeatures(input_id=input_ids, input_mask=input_mask)
result.append(feature)
return result
def save_model(model, epoch, path='result', **kwargs):
"""
默认保留所有模型
:param model: 模型
:param path: 保存路径
:param loss: 校验损失
:param last_loss: 最佳epoch损失
:param kwargs: every_epoch or best_epoch
:return:
"""
if not os.path.exists(path):
os.mkdir(path)
if kwargs.get('name', None) is None:
# cur_time = datetime.datetime.now().strftime('%Y-%m-%d#%H:%M:%S')
# name = cur_time + '--epoch:{}'.format(epoch)
name="pytorch_model.bin"
full_name = os.path.join(path, name)
torch.save(model.state_dict(), full_name)
# print('Saved model at epoch {} successfully'.format(epoch))
with open('{}/checkpoint'.format(path), 'w') as file:
file.write(name)
# print('Write to checkpoint')
def load_model(model, path='result', **kwargs):
if kwargs.get('name', None) is None:
with open('{}/checkpoint'.format(path)) as file:
content = file.read().strip()
name = os.path.join(path, content)
else:
name=kwargs['name']
# name = os.path.join(path,name)
name = os.path.join(name)
# print('name',name)
model.load_state_dict(torch.load(name, map_location=lambda storage, loc: storage))
# print('load model {} successfully'.format(name))
return model
class Tjson:
"""
处理json信息函数
"""
def __init__(self,file_path="data.json"):
self.file_path=file_path
def save(self,data):
"""
保存数据函数
逐行写入
>>> data=[{'a':'ess'}]
"""
with open(self.file_path, 'a+', encoding='utf-8') as f:
for item in data:
line = json.dumps(item, ensure_ascii=False)
f.write(line+'\n')
def load(self):
"""
加载数据
"""
with open(self.file_path, 'r', encoding = 'utf-8') as f:
for line in f:
data=json.loads(line[:-1])
yield data
"""
#使用
data=[{'a':'ess'}]
Json().save(data)
print(Json().load())
""" | AlbertMarker | /AlbertMarker-0.0.1.1.tar.gz/AlbertMarker-0.0.1.1/marker/utils.py | utils.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from config import Config
from model import ALBERT_LSTM_CRF
import torch.optim as optim
from utils import load_vocab, read_corpus, load_model, save_model,build_input,Tjson
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import fire
class Pre:
"""
执行预测操作
content=["柯基犬是个狗子"]
P=Pre()
result=P.pre(content)
print(result)
>>>[('柯基犬真是和牛逼', [{'type': '实体', 'words': ['柯', '基', '犬']}])]
"""
def __init__(self):
self.args={
"load_path":"result/pytorch_model.bin",
"load_model":True,
"use_cuda":False,
}
config = Config()
config.update_json(self.args)
self.config=config
print( self.config)
self.load_model()
def load_model(self):
"""
加载模型
"""
# config = Config()
# config.update_json(self.args)
config=self.config
# print('当前设置为:\n', config)
if config.use_cuda:
torch.cuda.set_device(config.gpu)
# print('loading corpus')
vocab = load_vocab(config.vocab)
label_dic = load_vocab(config.label_file)
tagset_size = len(label_dic)
model = ALBERT_LSTM_CRF(config.albert_path, tagset_size, config.albert_embedding, config.rnn_hidden, config.rnn_layer, dropout_ratio=config.dropout_ratio, dropout1=config.dropout1, use_cuda=config.use_cuda)
if config.load_model:
assert config.load_path is not None
#
model = load_model(model, name=config.load_path)
if config.use_cuda:
model.cuda()
# return model, vocab, label_dic
self.model, self.vocab, self.label_dic=model, vocab, label_dic
def build_input(self,content_array):
"""
基于文本列表构建训练数据集
"""
config=self.config
model, vocab, label_dic=self.model, self.vocab, self.label_dic
new_content_array=[]
content_dict={}
for i, content in enumerate (content_array):
new_content_array.append(" ".join(list(content)))
# content_dict[i]=content
# print(content)
# print(content_dict)
input_data = build_input(content=new_content_array, max_length=self.config.max_length, vocab=self.vocab)
# print(input_data)
input_ids = torch.LongTensor([temp.input_id for temp in input_data])
input_masks = torch.LongTensor([temp.input_mask for temp in input_data])
input_dataset = TensorDataset(input_ids, input_masks)
input_loader = DataLoader(input_dataset, shuffle=True, batch_size=self.config.batch_size)
return input_loader
def pre(self,content_array=[]):
"""
执行预测
输入多个句子即可
"""
config=self.config
model, vocab, label_dic=self.model, self.vocab, self.label_dic
content_dict={}
for i, content in enumerate (content_array):
# new_content_array.append(" ".join(list(content)))
content_dict[i]=content
output=[]
for i, batch in enumerate(self.build_input(content_array)):
inputs, masks = batch
# print('inputs',inputs)
inputs, masks= Variable(inputs), Variable(masks)
# print("masks",masks)
if self.config.use_cuda:
inputs, masks = inputs.cuda(), masks.cuda()
feats = model(inputs)
# print("feats",feats)
path_score, best_path = model.crf(feats, masks.bool())
# print("feats",path_score, best_path)
for item in best_path.numpy():
# print(item.tolist())
words=[]
for n,id in enumerate( item.tolist()):
word_id=inputs.numpy().tolist()[0][n]
l=list(label_dic)[id]
w=list(self.vocab)[word_id]
words.append((w,l))
# print('words',words)
wd={"type":None,"words":[]}
wd_list=[]
for w,l in words:
# print(wd)
if l.startswith("B-"):
wd["type"]=l.split("B-")[1]
wd["words"].append(w)
elif l.startswith("M-"):
wd["type"]=l.split("M-")[1]
wd["words"].append(w)
elif l.startswith("E-"):
wd["type"]=l.split("E-")[1]
wd["words"].append(w)
wd_list.append(wd)
wd={"type":None,"words":[]}
elif l.startswith("S-"):
wd["type"]=l.split("S-")[1]
wd["words"].append(w)
wd_list.append(wd)
wd={"type":None,"words":[]}
# print('wd_list',wd_list)
# print(i)
output.append((content_dict[i],wd_list))
return output
def pre_one(**kwargs):
# content="柯基犬性格活泼可爱,但是饲养柯基犬会有着六个坏处,你还敢饲养柯基犬吗?"
# print(**kwargs)
P=Pre()
result=P.pre([kwargs['text']])
print(result)
if __name__ == '__main__':
fire.Fire()
# test() | AlbertMarker | /AlbertMarker-0.0.1.1.tar.gz/AlbertMarker-0.0.1.1/marker/marker.py | marker.py |
[](https://pypi.org/project/AlbertUnruhUtils)
[](https://pypi.org/project/AlbertUnruhUtils)
[](https://github.com/AlbertUnruh/AlbertUnruhUtils.py/actions/workflows/python-publish.yml)
[](https://choosealicense.com/licenses/mit)
[](https://github.com/psf/black)
[![[tokei.rs - Code Lines]](https://tokei.rs/b1/github/AlbertUnruh/AlbertUnruhUtils.py?category=code)](https://tokei.rs)
[![[tokei.rs - Total Lines]](https://tokei.rs/b1/github/AlbertUnruh/AlbertUnruhUtils.py?category=lines)](https://tokei.rs)
[![[tokei.rs - Files]](https://tokei.rs/b1/github/AlbertUnruh/AlbertUnruhUtils.py?category=files)](https://tokei.rs)
# **Note:**
**Please head over to [AlbertUnruhUtils.py](https://pypi.org/project/AlbertUnruhUtils.py).**
**This project is discontinued here!**
**But it 'll be continued here ([AlbertUnruhUtils.py](https://pypi.org/project/AlbertUnruhUtils.py)) and follows [SemVer](https://semver.org).**
---
# AlbertUnruhUtils
A collection of utils written in Python
# Installing
**Python 3.9 or higher is required!**
via pip: (recommended)
```shell
pip3 install -U AlbertUnruhUtils
```
latest develop-version:
*(requires git to be installed)*
```shell
git clone https://github.com/AlbertUnruh/AlbertUnruhUtils.py
cd AlbertUnruhUtils.py
pip3 install -U .
```
or simply
```shell
pip3 install -U git+https://github.com/AlbertUnruh/AlbertUnruhUtils.py.git#egg=AlbertUnruhUtils
```
# Docs
coming soon...
| AlbertUnruhUtils | /AlbertUnruhUtils-2022.1.29.2.tar.gz/AlbertUnruhUtils-2022.1.29.2/README.md | README.md |
__all__ = ("TeX",)
import typing
from pathlib import Path
from io import BytesIO
from os import PathLike
import matplotlib.pyplot as plt
from PIL import Image, ImageChops
from ..utils import not_implemented
from .. import __url__
_PathLike = typing.Union[
PathLike,
Path,
str,
]
_Color = typing.Union[
typing.Iterable[float],
str,
]
class TeX:
_color: _Color
_file: Path
_format: str
_tex: str
__slots__ = (
"_color",
"_file",
"_format",
"_tex",
)
def __init__(
self,
tex: str,
*,
file: _PathLike = "tex.png",
format: str = "png", # noqa
color: _Color = "#fe4b03", # aka "blood orange"
) -> None:
"""
Parameters
----------
tex: str
The input which should be displayed.
file: _PathLike
Sets the default value for `file`.
format
Sets the default value for `format`.
color
Sets the default value for `color`.
"""
if not tex.startswith("$"):
tex = f"${tex}$"
self._tex = tex
self._file = Path(file)
self._format = format
self._color = color
@classmethod
@not_implemented(
f"This feature is coming soon. Feel free to push it and open a PR on GitHub ({__url__})."
)
def from_python_code(
cls,
function: typing.Callable,
*,
file: _PathLike = "tex.png",
format: str = "png", # noqa
color: _Color = "#fe4b03", # aka "blood orange"
) -> "TeX":
"""
Creates TeX from a function.
Parameters
----------
function: typing.Callable
The Function which should be converted to TeX.
file: _PathLike
Sets the default value for `file`.
format
Sets the default value for `format`.
color
Sets the default value for `color`.
Returns
-------
TeX
"""
@property
def tex(self) -> str:
return self._tex
@property
def default_color(self) -> _Color:
return self._color
@property
def default_file(self) -> Path:
return self._file
@property
def default_format(self) -> str:
return self._format
def create_image(
self,
*,
format: typing.Optional[str] = None, # noqa
color: typing.Optional[_Color] = None,
) -> Image.Image:
"""
Creates the TeX-image.
Parameters
----------
format: str, optional
If `None` the default for `format` 'll be used.
color: _Color, optional
If `None` the default for `color` 'll be used.
Returns
-------
Image.Image
The TeX-image.
"""
if format is None:
format = self.default_format # noqa
if color is None:
color = self.default_color
buffer = BytesIO()
plt.rc("text", usetex=True)
plt.axis("off")
plt.text(0, 0, self._tex, size=40, color=color)
plt.savefig(buffer, format=format, transparent=True)
plt.close()
image = Image.open(buffer)
bg = Image.new(image.mode, image.size, (0,) * 4) # type: ignore
diff = ImageChops.difference(image, bg)
bbox = diff.getbbox()
return image.crop(bbox)
def save_to_file(
self,
file: typing.Optional[_PathLike] = None,
/,
format: typing.Optional[str] = None, # noqa
color: typing.Optional[_Color] = None,
) -> Path:
"""
Saves the TeX-image to a file.
Parameters
----------
file: _PathLike, optional
If `None` the default for `file` 'll be used.
format: str, optional
If `None` the default for `format` 'll be used.
color: _Color, optional
If `None` the default for `color` 'll be used.
Returns
-------
Path
The path to the saved TeX-image.
"""
if file is None:
file = self.default_file
else:
file = Path(file)
if format is None:
format = self.default_format # noqa
if color is None:
color = self.default_color
image = self.create_image(format=format, color=color)
image.save(file)
return file
def __main():
"""
This is just a little function to test our TeX-cLaSs.
"""
eulers_identity = r"e^{i\pi}+1=0"
file = TeX(eulers_identity).save_to_file()
print(f"saved Euler's Identity to {file.absolute()}") | AlbertUnruhUtils.py | /AlbertUnruhUtils.py-2.3.0-py3-none-any.whl/AlbertUnruhUtils/visual/tex.py | tex.py |
__all__ = ("ServerRateLimit",)
import functools
import typing
import uuid
from redis import Redis
from time import time
C_IN = typing.TypeVar("C_IN")
C_OUT = typing.TypeVar("C_OUT")
class ServerRateLimit:
"""Docs 'll come soon... (If you want docs right now you can take a look into ``__init__``)"""
section: dict[str, dict[str, int]]
retrieve_section: typing.Callable[[...], tuple[str, typing.Union[str, int]]]
__slots__ = (
"sections",
"retrieve_section",
"_redis",
)
def __init__(
self,
sections: dict[str, dict[str, int]],
retrieve_section: typing.Callable[[...], tuple[str, typing.Union[str, int]]],
*,
redis: Redis = None,
):
"""
Parameters
----------
sections: dict[str, dict[str, int]]
Parameter ``sections`` requires following structure:
```py
>>> {
... "<NAME or TYPE (e.g. user, admin etc.)>": {
... # type: int
... "amount": 10
...
... # type: int
... "interval": 60 # in seconds
...
... # type: int
... "timeout": 60 # in seconds # if a section requests to often then the timeout 'll be applied
... },
... "<second NAME or TYPE>": {
... ...
... },
... ...
... }
```
retrieve_section: typing.Callable[[...], tuple[str, typing.Union[str, int]]]
This function 'll feed all it's data from the original callable.
e.g. ```py
>>> @ServerRateLimit({"user": {...}, "admin": {...}}, retrieve)
... def foo(*args, **kwargs) -> ...:
... pass
...
>>> def retrieve(*args, **kwargs) -> (str, str):
... '''This is just an example, you have to manage yourself how you
... set it (can also be static by using a simple lambda-expression)'''
... if "admin_id" in kwargs:
... return "admin", 0
... return "user", 0
```
redis: Redis, optional
An own redis can optionally be set.
Notes
-----
The first return value from ``retrieve_section``
is the ``section``, the second is the ``id`` to
have every section separated.
"""
self.sections = sections
self.retrieve_section = retrieve_section
if redis is None:
redis = Redis("127.0.0.1", 6262, 0)
self._redis = redis
def __call__(
self,
func: typing.Callable[[C_IN], C_OUT],
) -> typing.Callable[[C_IN], tuple[tuple[bool, dict[str, int]], C_OUT]]:
def decorator(*args, **kwargs) -> tuple[tuple[bool, dict[str, int]], C_OUT]:
"""
Returns
-------
tuple[tuple[bool, dict[str, int]], C_OUT]
"""
section, id = self.retrieve_section(*args, **kwargs) # noqa
if section not in self.sections:
raise RuntimeError(
"Can't use key {section!r}. You have to return one of the following: {possible}".format(
section=section,
possible=", ".join(f"{k!r}" for k in self.sections),
)
)
self._check_timeout(section, id)
timeout = self._calculate_timeout(section, id)
remaining = self._calculate_remaining_calls(section, id)
data = {
"request": {
"remaining": -1,
"limit": self.sections[section]["amount"],
"period": self.sections[section]["interval"],
"timeout": timeout,
}
}
if not remaining > 0 or timeout:
data["request"]["remaining"] = self._calculate_remaining_calls(
section, id
)
return (False, data), ()
self._record_call(section, id)
data["request"]["remaining"] = self._calculate_remaining_calls(section, id)
return (True, data), func(*args, **kwargs)
return functools.update_wrapper(decorator, func)
def _record_call(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> None:
"""
Parameters
----------
section: str
id: str, int
"""
key = f"call-{section}-{id}"
self._redis.execute_command(
f"ZADD {key} {time()+self.sections[section]['interval']} {uuid.uuid4()}"
)
self._redis.expire(key, self.sections[section]["interval"])
def _calculate_remaining_calls(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> int:
"""
Parameters
----------
section: str
id: str, int
Returns
-------
int
"""
key = f"call-{section}-{id}"
# cleanup
self._redis.zremrangebyscore(key, 0, time())
return self.sections[section]["amount"] - int(
self._redis.zcount(key, 0, 2**62) or 0
)
def _check_timeout(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> None:
"""
Parameters
----------
section: str
id: str, int
"""
key = f"cooldown-{section}-{id}"
if not self._calculate_remaining_calls(section, id) > 0:
if not self._redis.exists(key):
self._redis.append(key, 1)
self._redis.expire(key, self.sections[section]["timeout"])
def _calculate_timeout(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> int:
"""
Parameters
----------
section: str
id: str, int
Returns
-------
int
"""
key = f"cooldown-{section}-{id}"
return max(0, self._redis.ttl(key))
if __name__ == "__main__":
r = Redis()
@ServerRateLimit(
{"default": {"interval": 10, "amount": 10, "timeout": 20}},
lambda: ("default", 0),
redis=r,
)
def test():
return "<--test()-->"
from time import sleep
while True:
print(test.__name__, test())
sleep(0.5) | AlbertUnruhUtils.py | /AlbertUnruhUtils.py-2.3.0-py3-none-any.whl/AlbertUnruhUtils/ratelimit/server.py | server.py |
__all__ = (
"deprecated",
"copy_docs",
"not_implemented",
)
import re
import typing
import warnings
from copy import copy
from functools import wraps
_Version = typing.Union[str, tuple[int]]
_Function = typing.TypeVar("_Function", bound=typing.Callable)
_Docs_NumPy = typing.Literal["NumPy", "numpy", "NumPyDoc", "numpydoc"]
_Doc_Style = typing.Union[_Docs_NumPy]
def deprecated(
since: _Version = None,
*,
instead: typing.Union[str, typing.Callable] = None,
update_docs: bool = True,
doc_style: _Doc_Style = "NumPy",
) -> typing.Callable[[_Function], _Function]:
"""
Marks a function/method as deprecated.
Parameters
----------
since: _Version
Since when the function/method is deprecated.
instead: str, typing.Callable, optional
What function/method should be used instead.
update_docs: bool
Whether the docs should be updated or not.
doc_style: _Doc_Style
The DocStyle to use to update the docs.
"""
def outer(func: _Function) -> _Function:
message = "{0.__name__} is deprecated".format(func)
if since is not None:
if not isinstance(since, str):
version = ".".join(str(v) for v in since)
else:
version = since
message += " since version {0}".format(version)
message += "."
if instead is not None:
if not isinstance(instead, str):
name = instead.__name__
else:
name = instead
message += " Use {0} instead.".format(name)
if update_docs:
# know issue: if a class isn't initialized
# the method will be called function
f_o_c = func.__class__.__name__
# NumPy
if doc_style in _Docs_NumPy.__args__: # type: ignore
lines = [
f"",
f"Deprecation",
f"-----------",
f"This {f_o_c} " + message.split(maxsplit=1)[-1],
f"",
]
# invalid
else:
supported = []
for g in _Doc_Style.__args__: # type: ignore
for s in g.__args__:
supported.append(s)
raise ValueError(
f"Unknown or unsupported formatting style {doc_style!r} for docstrings! "
f"Use one of them instead: {', '.join(supported)}"
)
tab = ""
if func.__doc__:
res = re.findall(r"^\s+", func.__doc__)
if res:
tab = res[0].removeprefix("\n")
func.__doc__ += "\n".join(tab + line for line in lines)
@wraps(func)
def inner(*args, **kwargs):
filters = copy(warnings.filters)
warnings.filters = []
warnings.warn(message=message, category=DeprecationWarning)
warnings.filters = filters
return func(*args, **kwargs)
return inner
return outer
def not_implemented(
reason: str = None,
*,
update_docs: bool = True,
doc_style: _Doc_Style = "NumPy",
) -> typing.Callable[[_Function], _Function]:
"""
Marks a function/method as not implemented, but as coming soon.
(And they might open a PR to implement it for you :D)
Parameters
----------
reason: str
The reason why this function/method is not implemented yet.
update_docs: bool
Whether the docs should be updated or not.
doc_style: _Doc_Style
The DocStyle to use to update the docs.
"""
def outer(func: _Function) -> _Function:
message = "{0.__name__} is not implemented yet".format(func)
if reason:
message += " with following reason: {0}".format(reason)
if not message.endswith("."):
message += "."
if update_docs:
# know issue: if a class isn't initialized
# the method will be called function
f_o_c = func.__class__.__name__
# NumPy
if doc_style in _Docs_NumPy.__args__: # type: ignore
# this goes on top of the __doc__
lines0 = [
f"",
f"This {f_o_c} " + message.split(maxsplit=1)[-1],
f"",
]
# this goes at the end of the __doc__
lines1 = [
f"",
f"Raises",
f"------",
f"NotImplementedError",
f"",
]
if reason:
lines1.insert(-1, " " * 4 + reason)
# invalid
else:
supported = []
for g in _Doc_Style.__args__: # type: ignore
for s in g.__args__:
supported.append(s)
raise ValueError(
f"Unknown or unsupported formatting style {doc_style!r} for docstrings! "
f"Use one of them instead: {', '.join(supported)}"
)
tab = ""
if func.__doc__:
res = re.findall(r"^\s+", func.__doc__)
if res:
tab = res[0].removeprefix("\n")
else:
func.__doc__ = ""
doc_start = "\n" if func.__doc__.startswith("\n") else ""
func.__doc__ = (
doc_start
+ ("\n".join(tab + line for line in lines0))
+ (func.__doc__ or "")
)
func.__doc__ += "\n".join(tab + line for line in lines1)
@wraps(func)
def inner(*_, **__):
filters = copy(warnings.filters)
warnings.filters = []
warnings.warn(message=message, category=UserWarning)
warnings.filters = filters
raise NotImplementedError(message)
return inner
return outer
def copy_docs(
docs: typing.Union[str, object],
*,
append: bool = False,
add_copy_note: bool = False,
custom_origin: str = None,
) -> typing.Callable[[_Function], _Function]:
"""
Copies docs from an object or str to the decorated function/method/class.
Parameters
----------
docs: str, object
The docs to copy.
append: bool
Whether the docs should be appended or be replaced.
add_copy_note: bool
Whether the docs should have a note that it was copied or not.
custom_origin: str, optional
The origin which should be displayed if ``add_copy_note`` is set to ``True``.
"""
_docs = copy(docs)
if not isinstance(docs, str):
docs = docs.__doc__
if add_copy_note:
if custom_origin:
origin = custom_origin
else:
try:
origin = _docs.__name__
except AttributeError:
origin = _docs.__class__.__name__
lines = [
f"",
f"",
f"copied from {origin}",
f"",
]
else:
lines = []
def decorator(func: _Function) -> _Function:
# to keep the indentation-style
tab = ""
if func.__doc__:
res = re.findall(r"^\s+", func.__doc__)
if res:
tab = res[0].removeprefix("\n")
if not append:
func.__doc__ = ""
# to be able to replace it later with ``tab``
docs_tab = ""
if docs:
res = re.findall(r"^\s+", docs)
if res:
docs_tab = res[0].removeprefix("\n")
func.__doc__ += "\n".join(
tab + line.removeprefix(docs_tab) for line in docs.splitlines()
)
func.__doc__ += "\n".join(tab + line for line in lines)
return func
return decorator | AlbertUnruhUtils.py | /AlbertUnruhUtils.py-2.3.0-py3-none-any.whl/AlbertUnruhUtils/utils/decorator.py | decorator.py |
__all__ = (
"get_version",
"is_version",
"is_lower_version",
"is_higher_version",
)
import importlib
import re
import types
import typing
def _version_to_tuple(
version: str,
/,
*,
sep: str = ".",
keep_dev_version: bool = True,
semver: bool = False,
) -> tuple[int, ...]:
"""
Parameters
----------
version: str
The raw version.
sep: str
The separator for e.g. major and minor.
keep_dev_version: bool
Whether to keep developer-versions or not.
semver: bool
Whether the return should be structured like a semantic version (https://semver.org)
Returns
-------
tuple[int, ...]
Notes
-----
If ``keep_dev_version`` is set to ``True`` the return may also contain ``str``.
"""
t_version = ()
for v in version.split(sep):
try:
t_version += (int(v),)
except ValueError:
# eg "0a3"
first = re.findall(r"^[\d]+", v)
if first:
t_version += (int(first[0]),)
if keep_dev_version:
covered = len(sep.join(str(v) for v in t_version))
t_version += (version[covered:].removeprefix(sep),)
break
if semver:
while len(t_version) < 3:
t_version += (0,)
if len(t_version) > 3:
t_version = t_version[:3] + tuple([sep.join(str(v) for v in t_version[3:])])
return t_version
def get_version(
module_o_name: typing.Union[types.ModuleType, str],
/,
*,
sep: str = ".",
keep_dev_version: bool = True,
semver: bool = False,
) -> tuple[int, ...]:
"""
Parameters
----------
module_o_name: types.ModuleType, str
The module of which you want the version.
sep: str
The separator for e.g. major and minor.
keep_dev_version: bool
Whether to keep developer-versions or not.
semver: bool
Whether the return should be structured like a semantic version (https://semver.org)
Returns
-------
tuple[int, ...]
Notes
-----
If ``keep_dev_version`` is set to ``True`` the return may also contain ``str``.
"""
# get module
if isinstance(module_o_name, types.ModuleType):
module = module_o_name
elif module_o_name in globals():
module = globals()[module_o_name]
else:
module = importlib.import_module(module_o_name)
# get version
if hasattr(module, "version_info"):
version = sep.join(str(v) for v in module.version_info)
elif hasattr(module, "__version__"):
version = module.__version__
elif hasattr(module, "version"):
version = module.version
else:
version = "0.0.0a" # one of the lowest versions if no version is set
return _version_to_tuple(
version,
sep=sep,
keep_dev_version=keep_dev_version,
semver=semver,
)
def is_version(
module_o_version: typing.Union[types.ModuleType, str, tuple[int, ...]],
version: typing.Union[tuple[int, ...], str],
/,
*,
sep: str = ".",
keep_dev_version: bool = True,
semver: bool = False,
) -> bool:
"""
MODULE_o_VERSION == VERSION
Parameters
----------
module_o_version: types.ModuleType, str, tuple[int, ...]
The module or version of which you want to check the version.
version: tuple[int, ...], str
The version to check against.
sep: str
The separator for e.g. major and minor.
keep_dev_version: bool
Whether to keep developer-versions or not.
semver: bool
Whether the return should be structured like a semantic version (https://semver.org)
Returns
-------
bool
"""
try:
# module?
m_version = get_version(
module_o_version,
sep=sep,
keep_dev_version=keep_dev_version,
semver=semver,
)
except ModuleNotFoundError:
# version!
if isinstance(module_o_version, str):
m_version = _version_to_tuple(
module_o_version,
sep=sep,
keep_dev_version=keep_dev_version,
semver=semver,
)
else:
m_version = module_o_version
if isinstance(version, str):
version = _version_to_tuple(
version,
sep=sep,
keep_dev_version=keep_dev_version,
semver=semver,
)
return m_version == version
def is_lower_version(
module_o_version: typing.Union[types.ModuleType, str, tuple[int, ...]],
version: typing.Union[tuple[int, ...], str],
/,
*,
sep: str = ".",
keep_dev_version: bool = True,
semver: bool = False,
) -> bool:
"""
MODULE_o_VERSION < VERSION
Parameters
----------
module_o_version: types.ModuleType, str, tuple[int, ...]
The module or version of which you want to check the version.
version: tuple[int, ...], str
The version to check against.
sep: str
The separator for e.g. major and minor.
keep_dev_version: bool
Whether to keep developer-versions or not.
semver: bool
Whether the return should be structured like a semantic version (https://semver.org)
Returns
-------
bool
"""
try:
# module?
m_version = get_version(
module_o_version,
sep=sep,
keep_dev_version=keep_dev_version,
semver=semver,
)
except ModuleNotFoundError:
# version!
if isinstance(module_o_version, str):
m_version = _version_to_tuple(
module_o_version,
sep=sep,
keep_dev_version=keep_dev_version,
semver=semver,
)
else:
m_version = module_o_version
if isinstance(version, str):
version = _version_to_tuple(
version,
sep=sep,
keep_dev_version=keep_dev_version,
semver=semver,
)
# workaround for dev-versions
if len(m_version) != len(version):
for m, v in zip(m_version, version):
if m != v:
break
else:
# one of these have e.g. "alpha" at the end
return len(m_version) > len(version)
return m_version < version
def is_higher_version(
module_o_version: typing.Union[types.ModuleType, str, tuple[int, ...]],
version: typing.Union[tuple[int, ...], str],
/,
*,
sep: str = ".",
keep_dev_version: bool = True,
semver: bool = False,
) -> bool:
"""
MODULE_o_VERSION > VERSION
Parameters
----------
module_o_version: types.ModuleType, str, tuple[int, ...]
The module or version of which you want to check the version.
version: tuple[int, ...], str
The version to check against.
sep: str
The separator for e.g. major and minor.
keep_dev_version: bool
Whether to keep developer-versions or not.
semver: bool
Whether the return should be structured like a semantic version (https://semver.org)
Returns
-------
bool
"""
args = [
module_o_version,
version,
]
kwargs = {
"sep": sep,
"keep_dev_version": keep_dev_version,
"semver": semver,
}
lt = is_lower_version(*args, **kwargs) # noqa
eq = is_version(*args, **kwargs) # noqa
return not (lt or eq) | AlbertUnruhUtils.py | /AlbertUnruhUtils.py-2.3.0-py3-none-any.whl/AlbertUnruhUtils/utils/version.py | version.py |
__all__ = ("ServerRateLimit",)
import functools
import typing
import uuid
from aioredis import Redis
from time import time
C_IN = typing.TypeVar("C_IN")
C_OUT = typing.TypeVar("C_OUT")
class ServerRateLimit:
"""Docs 'll come soon... (If you want docs right now you can take a look into ``__init__``)"""
sections: dict[str, dict[str, int]]
retrieve_section: typing.Callable[[...], typing.Awaitable[tuple[str, str]]]
__slots__ = ("sections", "retrieve_section", "_redis")
def __init__(
self,
sections: dict[str, dict[str, int]],
retrieve_section: typing.Callable[
[...], typing.Awaitable[tuple[str, typing.Union[str, int]]]
],
*,
redis: Redis = None,
):
"""
Parameters
----------
sections: dict[str, dict[str, int]]
Parameter ``sections`` requires following structure:
```py
>>> {
... "<NAME or TYPE (e.g. user, admin etc.)>": {
... # type: int
... "amount": 10
...
... # type: int
... "interval": 60 # in seconds
...
... # type: int
... "timeout": 60 # in seconds # if a section requests to often then the timeout 'll be applied
... },
... "<second NAME or TYPE>": {
... ...
... },
... ...
... }
```
retrieve_section: typing.Callable[[...], typing.Awaitable[tuple[str, typing.Union[str, int]]]]
This function 'll feed all it's data from the original callable.
e.g. ```py
>>> @ServerRateLimit({"user": {...}, "admin": {...}}, retrieve)
... async def foo(*args, **kwargs) -> ...:
... pass
...
>>> async def retrieve(*args, **kwargs) -> (str, str):
... '''This is just an example, you have to manage yourself how you
... set it (can also be static by using a simple lambda-expression)'''
... if "admin_id" in kwargs:
... return "admin", 0
... return "user", 0
```
redis: Redis, optional
An own redis can optionally be set.
Notes
-----
The first return value from ``retrieve_section``
is the ``section``, the second is the ``id`` to
have every section separated.
"""
self.sections = sections
self.retrieve_section = retrieve_section
if redis is None:
redis = Redis(host="127.0.0.1", port=6262, db=0)
self._redis = redis
def __call__(
self,
func: typing.Callable[[C_IN], typing.Awaitable[C_OUT]],
) -> typing.Callable[
[C_IN], typing.Awaitable[tuple[tuple[bool, dict[str, int]], C_OUT]]
]:
async def decorator(
*args, **kwargs
) -> tuple[tuple[bool, dict[str, int]], C_OUT]:
"""
Returns
-------
tuple[tuple[bool, dict[str, int]], C_OUT]
"""
section, id = await self.retrieve_section(*args, **kwargs) # noqa
if section not in self.sections:
raise RuntimeError(
"Can't use key {section!r}. You have to return one of the following: {possible}".format(
section=section,
possible=", ".join(f"{k!r}" for k in self.sections),
)
)
await self._check_timeout(section, id)
timeout = await self._calculate_timeout(section, id)
remaining = await self._calculate_remaining_calls(section, id)
data = {
"request": {
"remaining": -1,
"limit": self.sections[section]["amount"],
"period": self.sections[section]["interval"],
"timeout": timeout,
}
}
if not remaining > 0 or timeout:
data["request"]["remaining"] = await self._calculate_remaining_calls(
section, id
)
return (False, data), ()
await self._record_call(section, id)
data["request"]["remaining"] = await self._calculate_remaining_calls(
section, id
)
return (True, data), await func(*args, **kwargs)
return functools.update_wrapper(decorator, func)
async def _record_call(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> None:
"""
Parameters
----------
section: str
id: str, int
"""
key = f"call-{section}-{id}"
await self._redis.execute_command(
f"ZADD {key} {time()+self.sections[section]['interval']} {uuid.uuid4()}"
)
await self._redis.expire(key, self.sections[section]["interval"])
async def _calculate_remaining_calls(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> int:
"""
Parameters
----------
section: str
id: str, int
Returns
-------
int
"""
key = f"call-{section}-{id}"
# cleanup
await self._redis.zremrangebyscore(key, 0, time())
return self.sections[section]["amount"] - int(
await self._redis.zcount(key, 0, 2**62) or 0
)
async def _check_timeout(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> None:
"""
Parameters
----------
section: str
id: str, int
"""
key = f"cooldown-{section}-{id}"
if not await self._calculate_remaining_calls(section, id) > 0:
if not await self._redis.exists(key):
await self._redis.append(key, 1)
await self._redis.expire(key, self.sections[section]["timeout"])
async def _calculate_timeout(
self,
section: str,
id: typing.Union[str, int], # noqa
) -> int:
"""
Parameters
----------
section: str
id: str, int
Returns
-------
int
"""
key = f"cooldown-{section}-{id}"
return max(0, await self._redis.ttl(key))
if __name__ == "__main__":
r = Redis()
async def _():
return "default", 0 # noqa
@ServerRateLimit(
{"default": {"interval": 10, "amount": 10, "timeout": 20}},
_,
redis=r,
)
async def test():
return "<--test()-->"
async def main():
from asyncio import sleep
while True:
print(test.__name__, await test())
await sleep(0.5)
from asyncio import run
run(main()) | AlbertUnruhUtils.py | /AlbertUnruhUtils.py-2.3.0-py3-none-any.whl/AlbertUnruhUtils/asynchronous/ratelimit/server.py | server.py |
Albertson
=========
A library for super easy to use, reliable, and scalable counters.
Homepage: [https://github.com/FocusLab/Albertson][3]
**Albertson is in an alpha state and probably shouldn't be used in
production by anybody just yet.**

*"Worst library ever!"*
Why
---
Creating counters that handle incrementing at high levels of concurrency while
also being highly available and fault tolerant is *really* hard. Thankfully,
Amazon has solved these really hard bits with their [DynamoDB][2] service.
Albertson provides a simple, clean, and easy to use, Python interface to
DynamoDb for this specific use case.
Who
---
Albertson was created by the dev team at [FocusLab][1].
What's with the name?
---------------------
Internally at FocusLab Albertson is used for real-time, authoritative, counts
that are often used to correct less frequently updated counts throughout our
system. Accordingly, we've named the library after our favorite fictional
pedant, Comic Book Guy a.k.a. Jeff Albertson.
[1]: https://www.focuslab.io
[2]: http://aws.amazon.com/dynamodb/
[3]: https://github.com/FocusLab/Albertson
| Albertson | /Albertson-0.1.8.tar.gz/Albertson-0.1.8/README.md | README.md |
from datetime import datetime
import boto
from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
class CounterPool(object):
'''
Handles schema level interactions with DynamoDB and generates individual
counters as needed.
'''
table_name = None
schema = {
'hash_key_name': 'counter_name',
'hash_key_proto_value': 'S',
}
read_units = 3
write_units = 5
def __init__(self, aws_access_key=None, aws_secret_key=None, table_name=None, schema=None, read_units=None, write_units=None, auto_create_table=True, ):
"""
:aws_access_key:
AWS Acccess Key ID with permissions to use DynamoDB
:aws_secret_key:
AWS Access Secret Key for the given Access Key ID
:table_name:
The DynamoDB table that should be used to store this pool's
counters. See http://bit.ly/DynamoDBModel for details on
DynamoDB's data model.
:schema:
The schema that will be used to create a table if one does not
already exist. See the `boto`<http://bit.ly/BotoCreateTable>_
docs for details on what's expected for a schema.
:read_units:
Read throughput to be set when a table is created. See
http://bit.ly/DynamoThoughput for details on Dynamo's provisioned
throughput system.
:write_units:
Write throughput to be set when a table is created.
:auto_create_table:
Should Albertson create a dynamodb table if the provided
`table_name` doesn't exist.
"""
self.conn = self.get_conn(aws_access_key, aws_secret_key)
self.table_name = table_name or self.table_name
self.schema = schema or self.schema
self.read_units = read_units or self.read_units
self.write_units = write_units or self.write_units
self.auto_create_table = auto_create_table
super(CounterPool, self).__init__()
def get_conn(self, aws_access_key=None, aws_secret_key=None):
'''
Hook point for overriding how the CounterPool gets its connection to
AWS.
'''
return boto.connect_dynamodb(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
def get_table_name(self):
'''
Hook point for overriding how the CounterPool determines the table name
to use.
'''
if not self.table_name:
raise NotImplementedError(
'You must provide a table_name value or override the get_table_name method'
)
return self.table_name
def get_schema(self):
'''
Hook point for overriding how the CounterPool determines the schema
to be used when creating a missing table.
'''
if not self.schema:
raise NotImplementedError(
'You must provide a schema value or override the get_schema method'
)
return self.conn.create_schema(**self.schema)
def get_read_units(self):
'''
Hook point for overriding how the CounterPool determines the read
throughput units to set on a newly created table.
'''
return self.read_units
def get_write_units(self):
'''
Hook point for overriding how the CounterPool determines the write
throughput units to set on a newly created table.
'''
return self.write_units
def create_table(self):
'''
Hook point for overriding how the CounterPool creates a new table
in DynamooDB
'''
table = self.conn.create_table(
name=self.get_table_name(),
schema=self.get_schema(),
read_units=self.get_read_units(),
write_units=self.get_write_units(),
)
if table.status != 'ACTIVE':
table.refresh(wait_for_active=True, retry_seconds=1)
return table
def get_table(self):
'''
Hook point for overriding how the CounterPool transforms table_name
into a boto DynamoDB Table object.
'''
if hasattr(self, '_table'):
table = self._table
else:
try:
table = self.conn.get_table(self.get_table_name())
except boto.exception.DynamoDBResponseError:
if self.auto_create_table:
table = self.create_table()
else:
raise
self._table = table
return table
def create_item(self, hash_key, start=0, extra_attrs=None):
'''
Hook point for overriding how the CouterPool creates a DynamoDB item
for a given counter when an existing item can't be found.
'''
table = self.get_table()
now = datetime.utcnow().replace(microsecond=0).isoformat()
attrs = {
'created_on': now,
'modified_on': now,
'count': start,
}
if extra_attrs:
attrs.update(extra_attrs)
item = table.new_item(
hash_key=hash_key,
attrs=attrs,
)
return item
def get_item(self, hash_key, start=0, extra_attrs=None):
'''
Hook point for overriding how the CouterPool fetches a DynamoDB item
for a given counter.
'''
table = self.get_table()
try:
item = table.get_item(hash_key=hash_key)
except DynamoDBKeyNotFoundError:
item = None
if item is None:
item = self.create_item(
hash_key=hash_key,
start=start,
extra_attrs=extra_attrs,
)
return item
def get_counter(self, name, start=0):
'''
Gets the DynamoDB item behind a counter and ties it to a Counter
instace.
'''
item = self.get_item(hash_key=name, start=start)
counter = Counter(dynamo_item=item, pool=self)
return counter
class Counter(object):
'''
Interface to individual counters.
'''
def __init__(self, dynamo_item, pool):
self.dynamo_item = dynamo_item
self.pool = pool
@property
def name(self):
return self.dynamo_item['counter_name']
@property
def count(self):
print self.dynamo_item
return self.dynamo_item['count']
@property
def created_on(self):
return datetime.strptime(self.dynamo_item['created_on'], ISO_FORMAT)
@property
def modified_on(self):
return datetime.strptime(self.dynamo_item['modified_on'], ISO_FORMAT)
def refresh(self):
self.dynamo_item = self.pool.get_item(hash_key=self.name)
def increment(self, amount=1):
item = self.dynamo_item
item.add_attribute('count', amount)
item.put_attribute(
'modified_on',
datetime.utcnow().replace(microsecond=0).isoformat()
)
result = item.save(return_values='UPDATED_NEW')
item.update(result['Attributes'])
return self.count
def decrement(self, amount=1):
return self.increment(amount * -1) | Albertson | /Albertson-0.1.8.tar.gz/Albertson-0.1.8/albertson/base.py | base.py |
# Albot.Online Python Library
A simple library for communicating with the [Albot.Online](https://Albot.Online) client.
This is great for getting you up and running fast, allowing you to focus more on the AI logic.
<br><br>
## Getting Started
This library is available at [PyPi](https://Pypi.org), so to download and install simply use the command:<br>
```
pip install Albot.Online
```
Alternatively you can download the files directly from this repository and include them in your project.
## Example
Following is a short example of the Python Library being put to use on the [Snake](https://www.albot.online/snake/) game.
For exact information of how to use the library see the [documentation Wiki](https://github.com/Albot-Online/Albot-Python-Library/wiki).
```python
from AlbotOnline.Snake.SnakeGame import SnakeGame
import random
game = SnakeGame() #Connects you to the Client
while(game.awaitNextGameState() == "ongoing"):
board = game.currentBoard
board.printBoard("Current Board")
playerMoves, enemyMoves = game.getPossibleMoves(board)
game.makeMove(random.choice(playerMoves))
```
This bot will simply connect to the client, look at what moves it currently has available and pick one at random.
<br><br>
## Versioning
Python: <b>3.6.5</b> <br>
Library: <b>0.64</b>
## Authors
Fredrik Carlsson
## License
This project is licensed under the MIT License - see the [LICENSE.md](https://github.com/Albot-Online/Albot-Python-Library/blob/master/LICENSE) file for details
| Albot.Online | /Albot.Online-0.65.tar.gz/Albot.Online-0.65/README.md | README.md |
from AlbotOnline import AlbotConnection as AO
from AlbotOnline.Snake.SnakeBoard import SnakeBoard
import AlbotOnline.JsonProtocol as Prot
import json
class SnakeGame:
def __init__(self, IP='127.0.0.1', Port=4000):
self.connection = AO.AlbotConnection(bufferSize=16384, IP=IP, Port=Port, gameOverObj=self)
self._initGameVars()
def _initGameVars(self):
self.currentBoard = SnakeBoard()
self.gameOver = False
def getNextBoard(self, oldBoard=None):
if (self.connection.awaitingData == False):
self.makePassMove()
jMsg = self.connection.getNextJsonMsg()
if (oldBoard != None):
return SnakeBoard(oldBoard=oldBoard, jUpdate=jMsg)
else:
return SnakeBoard(jUpdate=jMsg)
def awaitNextGameState(self):
jMsg = self.connection.getNextJsonMsg()
self.currentBoard = SnakeBoard(jUpdate=jMsg)
self.boardState = jMsg[Prot.FIELDS.boardState]
self.gameOver = self.boardState != Prot.STATES.ongoing
return self.boardState
def restartGame(self):
self.connection.restartGame()
self._initGameVars()
def setGameOver(self, msg):
self.gameOver = True
self.currentBoard.gameOver = True
print(msg)
# makeMoves
def makePassMove(self):
self.connection.sendString(" ")
def moveUp(self):
self.connection.sendString(Prot.ACTIONS.Snake.up)
def moveDown(self):
self.connection.sendString(Prot.ACTIONS.Snake.down)
def moveLeft(self):
self.connection.sendString(Prot.ACTIONS.Snake.left)
def moveRight(self):
self.connection.sendString(Prot.ACTIONS.Snake.right)
def makeMove(self, dir):
if (dir == Prot.ACTIONS.Snake.up):
self.moveUp()
elif (dir == Prot.ACTIONS.Snake.down):
self.moveDown()
elif (dir == Prot.ACTIONS.Snake.left):
self.moveLeft()
elif (dir == Prot.ACTIONS.Snake.right):
self.moveRight()
def makeMoveInt(self, dir):
self.makeMove(self.intToMove(dir))
def intToMove(self, dir):
if (dir == 0):
return Prot.ACTIONS.Snake.right
elif (dir == 1):
return Prot.ACTIONS.Snake.up
elif (dir == 2):
return Prot.ACTIONS.Snake.left
elif (dir == 3):
return Prot.ACTIONS.Snake.down
# Raw msg handling
def getnextJsonMsg(self):
return self.connection.getNextJsonMsg()
def getNextTCPStringMsg(self):
return self.connection.getNextString()
# TCP API
def simulateMove(self, board, playerMove, enemyMove):
jCommand = {Prot.FIELDS.action: Prot.ACTIONS.Snake.simMoveDelta, Prot.FIELDS.player: board.raw2Player,
Prot.FIELDS.enemy: board.rawEnemy}
jCommand[Prot.FIELDS.Snake.playerMove] = playerMove
jCommand[Prot.FIELDS.Snake.enemyMove] = enemyMove
self.connection.sendJsonDict(jCommand)
return self.getNextBoard(oldBoard=board)
def evaluateBoard(self, board):
jCommand = {Prot.FIELDS.action: Prot.ACTIONS.evalBoard, Prot.FIELDS.board: board.getAPIBoard()}
self.connection.sendJsonDict(jCommand)
return self.connection.getNextJsonField(Prot.FIELDS.boardState)
def getPossibleMoves(self, board):
jCommand = {Prot.FIELDS.action: Prot.ACTIONS.getPossMoves, Prot.FIELDS.player: board.player.dir,
Prot.FIELDS.enemy: board.enemy.dir}
self.connection.sendJsonDict(jCommand)
jResponse = self.connection.getNextJsonMsg()
return jResponse[Prot.FIELDS.Snake.playerMoves], jResponse[Prot.FIELDS.Snake.enemyMoves]
def playGame(self, decideMoveFunc, autoRestart=False):
while (True):
if (self.awaitNextGameState() != Prot.STATES.ongoing):
if (autoRestart):
self.restartGame()
continue
else:
break
move = decideMoveFunc(self.currentBoard)
self.makeMove(move) | Albot.Online | /Albot.Online-0.65.tar.gz/Albot.Online-0.65/AlbotOnline/Snake/SnakeGame.py | SnakeGame.py |
import AlbotOnline.GridBoard as Grid
from AlbotOnline.Snake.SnakeConstants import Constants as C
import AlbotOnline.JsonProtocol as Prot
import copy
class SnakeBoard(Grid.GridBoard):
def __init__(self, oldBoard = None, jUpdate = None):
self.blocked = []
if(oldBoard == None):
Grid.GridBoard.__init__(self, width=C.width, height=C.height, numbers=False)
elif(oldBoard != None):
self._copyOldBoard(oldBoard)
if(jUpdate != None):
self._parseJsonMsg(jUpdate)
def _parseJsonMsg(self, jUpdate):
self.blocked.extend(jUpdate[Prot.FIELDS.Snake.blocked])
self._parseBlocked(jUpdate[Prot.FIELDS.Snake.blocked])
self.rawPlayer = jUpdate[Prot.FIELDS.player]
self.raw2Player = {}
self.raw2Player['x'] = self.rawPlayer['x']
self.raw2Player['y'] = self.rawPlayer['y']
self.player = self._parseSnakePlayer(self.rawPlayer, C.playerSign)
self.rawEnemy = jUpdate[Prot.FIELDS.enemy]
self.enemy = self._parseSnakePlayer(self.rawEnemy, C.enemySign)
def _parseSnakePlayer(self, snakePlayerField, gridSign):
temp = lambda: None
temp.dir = snakePlayerField[Prot.FIELDS.Snake.direction]
temp.x = snakePlayerField[Prot.FIELDS.Snake.posX]
temp.y = snakePlayerField[Prot.FIELDS.Snake.posY]
if(temp.x < C.width and temp.x >= 0 and temp.y < C.height and temp.y >= 0):
self.grid[temp.y][temp.x] = gridSign
return temp
def _parseBlocked(self, blocked):
for b in blocked:
self.grid[b[Prot.FIELDS.Snake.posY]][b[Prot.FIELDS.Snake.posX]] = C.blockedSign
def _copyOldBoard(self, oldBoard):
self.grid = oldBoard.cloneGrid()
self.height = oldBoard.height
self.width = oldBoard.width
self.blocked = copy.deepcopy(oldBoard.blocked)
def printBoard(self, title=""):
Grid.GridBoard.printBoard(self, title)
#TCP API
def getAPIBoard(self):
return {Prot.FIELDS.player: self.rawPlayer, Prot.FIELDS.enemy: self.rawEnemy, Prot.FIELDS.Snake.blocked: self.blocked}
def getAPIPlayers(self):
return {Prot.FIELDS.player: self.rawPlayer, Prot.FIELDS.enemy: self.rawEnemy} | Albot.Online | /Albot.Online-0.65.tar.gz/Albot.Online-0.65/AlbotOnline/Snake/SnakeBoard.py | SnakeBoard.py |
from AlbotOnline import AlbotConnection as AO
import AlbotOnline.JsonProtocol as Prot
import AlbotOnline.Connect4.Connect4Board as Board
class Connect4Game:
def __init__(self, IP='127.0.0.1', Port=4000):
self.connection = AO.AlbotConnection(bufferSize=1024, IP=IP, Port=Port, gameOverObj=self)
self.gameOver = False
def makeMove(self, move):
self.connection.sendString(str(move))
def setGameOver(self):
self.gameOver = True
def restartGame(self):
self.connection.restartGame()
def getNextBoard(self):
jMsg = self.connection.getNextJsonMsg()
rawBoard = jMsg[Prot.FIELDS.board]
self.currentBoard = Board.Connect4Board(rawBoard)
return self.currentBoard
# Raw msg handling
def getnextJsonMsg(self):
return self.connection.getNextJsonMsg()
def getNextTCPStringMsg(self):
return self.connection.getNextString()
# TCP-API
def getPossibleMoves(self, board):
jCommand = {Prot.FIELDS.action: Prot.ACTIONS.getPossMoves, Prot.FIELDS.board: board.grid}
self.connection.sendJsonDict(jCommand)
return self.connection.getNextJsonField(Prot.FIELDS.possibleMoves)
def evaluateBoard(self, board):
jCommand = {Prot.FIELDS.action: Prot.ACTIONS.evalBoard, Prot.FIELDS.board: board.grid}
self.connection.sendJsonDict(jCommand)
return self.connection.getNextJsonField(Prot.FIELDS.boardState)
def simulateMove(self, board, move, player):
jCommand = {Prot.FIELDS.action: Prot.ACTIONS.simMove, Prot.FIELDS.board: board.grid}
jCommand[Prot.FIELDS.move] = move
jCommand[Prot.FIELDS.player] = player
self.connection.sendJsonDict(jCommand)
rawBoard = self.connection.getNextJsonField(Prot.FIELDS.board)
return Board.Connect4Board(rawBoard)
def awaitNextGameState(self):
jMsg = self.connection.getNextJsonMsg()
rawBoard = jMsg[Prot.FIELDS.board]
self.currentBoard = Board.Connect4Board(rawBoard)
self.boardState = jMsg[Prot.FIELDS.boardState]
self.gameOver = self.boardState != Prot.STATES.ongoing
return self.boardState
def playGame(self, decideMoveFunc, autoRestart=False):
while (True):
if (self.awaitNextGameState() != Prot.STATES.ongoing):
if (autoRestart):
self.restartGame()
continue
else:
break
move = decideMoveFunc(self.currentBoard)
self.makeMove(move) | Albot.Online | /Albot.Online-0.65.tar.gz/Albot.Online-0.65/AlbotOnline/Connect4/Connect4Game.py | Connect4Game.py |
import random
import numpy as np
class Simulation:
def __init__(self, queueSize, aMin, aMax, bMin, bMax):
self.queueSize = int(queueSize)
self.queue = 0
self.flowCost = 0
self.serviceCost = 0
self.waitCost = 0
self.completedJobs = 0
self.aMin = aMin
self.aMax = aMax
self.bMin = bMin
self.bMax = bMax
def tick(self, flowControl, serviceControl):
if(self._canAddMoreJobs()):
B = flowControl(self.queue, self.queueSize, self.bMin, self.bMax)
self._addFlowCost(B)
self._addJobb(B)
self._addCurrentWaitCost()
if(self._canServeJobs()):
A = serviceControl(self.queue, self.queueSize, self.aMin, self.aMax)
self._addServiceCost(A)
self._serveJob(A)
return self.getStateVector(B, A)
def getStateVector(self, immediateFlowCost, immediateServiceCost):
vec = lambda: None
vec.totalWaitTime = self.waitCost
vec.totalFlowCost = self.flowCost
vec.totalServiceCost = self.serviceCost
vec.immWaitCost = self.currentWCost
vec.immFlowPower = immediateFlowCost
vec.immServicePower = immediateServiceCost
vec.completedJobs = self.completedJobs
vec.currentQueue = self.queue
vec.queueSize = self.queueSize
return vec
def _addCurrentWaitCost(self):
self.currentWCost = self.queue
self.waitCost += self.queue
#Flow Controller
def _canAddMoreJobs(self):
return self.queue < self.queueSize
def _addJobb(self, B):
if(random.random() >= B):
self.queue += 1
def _addFlowCost(self, B):
self.flowCost += B
#Service Controller
def _canServeJobs(self):
return self.queue > 0
def _serveJob(self, A):
if(random.random() >= A):
self.queue -= 1
self.completedJobs += 1
def _addServiceCost(self, A):
self.serviceCost += A | Albot.Online | /Albot.Online-0.65.tar.gz/Albot.Online-0.65/AlbotOnline/QueueFlowServiceControl/Simulation.py | Simulation.py |
import os
import re
import json5
import datetime
import itertools
import subprocess
from pathlib import Path
from filehash import FileHash
from argparse import ArgumentParser
from multiprocessing import Pool
class _Cons:
iDiPa: Path
oDiPa: Path
exifRules: dict
typeLogos: dict
################################################################
def __init__(self):
parser = ArgumentParser()
parser.add_argument('i', type=Path)
parser.add_argument('o', type=Path)
args = parser.parse_args()
self.iDiPa = args.i.absolute()
self.oDiPa = args.o.absolute()
assert self.iDiPa.is_dir()
assert not self.oDiPa.exists()
################################################################
self.exifRules = {
'Type': ['FileType'],
'Extension': ['FileTypeExtension'],
'Brand': [
'Make',
# 'HandlerVendorID',
],
'Model': ['Model'],
'Moment': [
"SubSecCreateDate",
# "SubSecDateTimeOriginal",
"MediaCreateDate",
"DateTimeOriginal",
"ModifyDate",
"FileModifyDate",
],
}
self.typeLogos = {
'BMP': '📸',
'DNG': '📸',
'JPEG': '📸',
'MKA': '🎤',
'MOV': '📹',
'M4A': '🎤',
'MP3': '🎤',
'MP4': '📹',
'PNG': '📸',
'WAV': '🎤',
'': '❔',
}
pass
pass
################################################################
def _genTypePart(t: str, typeLogos: dict):
if t in typeLogos:
logo = typeLogos[t]
return '|'.join([logo, t])
pass
else:
print('Unknown Type:', t)
pass
pass
def _genDevicePart(brand: str, model: str):
return '|'.join([_.replace(' ', '-') for _ in [brand, model]])
def _genMomentPart(val: str, lev: int):
val = val.replace('::', ':')
head = val[:19]
tail = val[19:]
dt = datetime.datetime.strptime(head, '%Y:%m:%d %H:%M:%S')
delta = re.search('[+-]\d+:\d+', tail)
if delta:
delta = delta.group(0)
sign = {'+': -1, '-': 1}[delta[0]]
h = int(delta[1:3])
m = int(delta[4:6])
delta = sign * datetime.timedelta(hours=h, minutes=m)
dt += delta
pass
ms = re.search('\.\d+', tail)
ms = ms.group(0)[1:4] if ms else '~~~'
__date = dt.strftime('%Y-%m-%d')
__time = dt.strftime('%H`%M`%S,') + ms
__level = chr(9312 + lev)
return '|'.join([__date, __time, __level])
def _genFiPa(tFiPa: Path, _CONS: _Cons):
cmd = 'ExifTool -j "%s"' % tFiPa
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=False, encoding='UTF-8')
exif = json5.loads(proc.stdout)[0]
keyInfo = dict()
for field, rule in _CONS.exifRules.items():
value = ''
level = None
for lev, tag in enumerate(rule):
if tag in exif:
val = str(exif[tag])
if not val.startswith('0000'):
value = val
level = lev
break
pass
pass
keyInfo[field] = (value, level)
pass
typePart = _genTypePart(keyInfo['Type'][0], _CONS.typeLogos)
devicePart = _genDevicePart(keyInfo['Brand'][0], keyInfo['Model'][0])
momentPart = _genMomentPart(*keyInfo['Moment'])
ext = keyInfo['Extension'][0]
fn = (momentPart + '.' + ext) if ext else momentPart
destFiPa: Path = _CONS.oDiPa / typePart / devicePart / fn
return destFiPa
def _genMeta(iFiPa: Path, tFiPa: Path, _CONS: _Cons):
modify: float = os.path.getmtime(iFiPa)
iFiPa.rename(tFiPa)
destFiPa: Path = _genFiPa(tFiPa, _CONS)
tFiPa.rename(iFiPa)
hCode: str = FileHash('sha1').hash_file(iFiPa)
meta = (
hCode,
modify,
destFiPa,
iFiPa,
)
return meta
################################################################
def _analyze(metas: list[tuple[str, float, Path, Path]]):
quads = sorted(metas)
quadGroups = [tuple(_[1]) for _ in itertools.groupby(quads, lambda _: _[0])]
pairs = sorted([list(_[0])[2:] for _ in quadGroups])
pairGroups: list[tuple[Path, Path]] = [tuple(_[1]) for _ in itertools.groupby(pairs, lambda _: _[0])]
tasks = list()
for group in pairGroups:
for idx, (destFiPa, iFiPa) in enumerate(group):
oFiPa = destFiPa.with_stem('%s|%d' % (destFiPa.stem, idx + 1))
tasks.append((iFiPa, oFiPa))
pass
pass
return tasks
################################################################
def _main(_CONS):
iFiPas = [Path(_[0]) / fn for _ in os.walk(_CONS.iDiPa) for fn in _[2]]
L = len(iFiPas)
cons = [_CONS] * L
fmt = '{:0>%d}' % len(str(L - 1))
tDiPa = _CONS.oDiPa
tFiPas = [tDiPa / fmt.format(_) for _ in range(L)]
args = list(zip(iFiPas, tFiPas, cons))
tDiPa.mkdir(parents=True)
with Pool() as pool:
metas = pool.starmap(_genMeta, args)
pass
tasks: list[tuple[Path, Path]] = _analyze(metas)
for iFiPa, oFiPa in tasks:
oFiPa.parent.mkdir(parents=True, exist_ok=True)
iFiPa.rename(oFiPa)
pass
pass
def CLI():
_CONS = _Cons()
_main(_CONS)
pass
if __name__ == '__main__':
CLI()
pass | Albumize | /Albumize-1.3.tar.gz/Albumize-1.3/Albumize.py | Albumize.py |
# pylint: disable=no-member invalid-name
from socket import socket, gethostbyname
from ssl import SSLContext, SSLZeroReturnError, SSLError
from json import loads, dumps
from time import sleep
from threading import Thread
class Message():
"""Message class"""
def __init__(self, event):
if "ok" in event:
self.ok = event["ok"]
else:
self.ok = True
if "update_id" in event:
self.update_id = event["update_id"]
event = event["message"]
if "from" in event:
self.user = self.From(event["from"])
del event["from"]
if "chat" in event:
self.chat = self.Chat(event["chat"])
del event["chat"]
for i in event:
if isinstance(event[i], list):
if len(event[i]) == 1:
event[i] = event[i][0]
for j in event[i]:
# To test, dynamic class creation
# if isinstance(j, dict):
# for k in j:
# self.__dict__[k] = j[k]
# new_class = type(i, (), {
# "__init__": lambda self: for l in j: self.__dict__[l] = j[l]
# })
if isinstance(event[i][j], dict):
for k in event[i][j]:
self.__dict__[i][j][k] = event[i][j][k]
else:
self.__dict__[j] = event[i][j] # [i][j] ?
else:
self.__dict__[i] = event[i]
class From():
"""from dictionary value"""
def __init__(self, event):
for i in event:
self.__dict__[i] = event[i]
class Chat():
"""chat dictionary value"""
def __init__(self, event):
for i in event:
self.__dict__[i] = event[i]
class TBot():
"""Main Class"""
def __init__(self, TOKEN: str):
self.__url = gethostbyname("api.telegram.org")
self.__token = TOKEN
self.__commands = {}
self.__offset = 0
self.update_frequence = 1 # seconds
self.username: str
self.__process_func = [] #only 1? : function
self.__sock = None
self.__sock = self.__connect(self.__sock) #mhhhhhhh
self.__solve_me()
def __connect(self, sock):
if hasattr(sock, "closed"):
if not sock.closed:
sock.close()
sock = socket(2, 1)
sock.connect((self.__url, 443))
sock = SSLContext().wrap_socket(sock)
return sock
def __solve_me(self):
if (response:= self.__get("getMe")):
self.username = '@' + response.username
def __get(self, method, arguments=None):
request = f"GET /bot{self.__token}/{method}"
multiple = False
if arguments:
for i in arguments:
if multiple:
request += '&'
else:
request += '?'
multiple = True
request += i + '=' + str(arguments[i])
request += f" HTTP/1.1\nHost: {self.__url}\n\n"
self.__sock.send(request.encode())
try:
response = loads(self.__sock.recv(
65535 * 20).decode().split("\r\n\r\n", 1)[1])
if response["ok"]:
retval = []
response = response["result"]
if isinstance(response, list):
for i in response:
retval.append(Message(i))
return retval
return Message(response)
# return False
except IndexError:
# return False
pass
except SSLError:
self.__sock = self.__connect(self.__sock)
def _update(self):
if (response:= self.__get("getUpdates", {"offset": self.__offset})):
self.__offset = response[len(response) - 1].update_id + 1
return response
return None
def _check(self):
response = self._update()
if not response:
return
for i in response:
if hasattr(i, "type"):
if i.type == "bot_command":
match = False
if i.type == "supergroup":
i.text = i.text.replace(
self.username, '')
for j in self.__commands:
if j == i.text:
Thread(target=self.__commands[j](i)).start()
match = True
break
if not match and '*' in self.__commands:
self.__commands['*'](i)
for k in self.__process_func:
Thread(target=k, args=[i, ]).start()
def command(self, *args):
"""Add a command handler"""
def wrapper(func):
self.__commands[args[0]] = func
return func
return wrapper
def process(self, func):
"""
Add a process handler.
Functions added here will be called every time there is an update.
"""
self.__process_func.append(func)
def run(self):
"""Start the bot"""
while 1:
try:
self._check()
except (ConnectionAbortedError, SSLZeroReturnError):#, SSLError):
self.__sock = self.__connect(self.__sock)
sleep(self.update_frequence)
def send_image_from_url(self, event, url):
"""Send an image to an user"""
return self.__get("sendPhoto",
{"chat_id": event.chat_id, "photo": url}).ok
def send(self, event, message, *args):
"""Send a message"""
message = {
"chat_id": event.chat.id,
"text": message.replace("\n", "%0A")
} # Use Message()
for i in args:
message[i[0]] = i[1]
return self.__get("sendMessage", message)
def send_id(self, chat_id, message):
"""Send a message to an id"""
message = {
"chat_id": chat_id,
"text": message.replace("\n", "%0A")
} # Use Message()
return self.__get("sendMessage", message)
def get_chat(self, event):
"""Send chat informations"""
return self.__get("getChat", {"chat_id": event.chat.id})
class Scheduler():
"""Schedule function call"""
def __init__(self):
self.__funcs = []
def schedule(self, func, time, repeat=True, call=False, asynchronous=True): # args=None
"""Schedule a new function call"""
self.__funcs.append(func)
if call:
if asynchronous:
Thread(target=func).start()
else:
func()
if asynchronous:
Thread(target=self.__scheduled, args=[func, time, repeat]).start()
else:
self.__scheduled(func, time, repeat)
def remove(self, func):
"""Remove a scheduled function call"""
del self.__funcs[func]
def __scheduled(self, func, time, repeat):
while 1:
sleep(time)
if func in self.__funcs and repeat:
Thread(target=func).start()
else:
break
def keyboard(keys, temporary=False):
"""
Return a telegram keyboard from a string array.
keys=[[Top Items], [Middle Items], [Bottom Items]]
"""
items = [item for item in keys]
reply_markup = {"keyboard": items}
if temporary:
reply_markup["one_time_keyboard"] = True
return ["reply_markup", dumps(reply_markup)] | Alby7503TBot | /Alby7503TBot-1.0.9-py3-none-any.whl/TBot/tbot.py | tbot.py |
This software subscribes to mqtt-topics that contain raw sensor data and
publishes e.g. average values for configurable time spans.
Available algorithms are: \* ``Average`` - Vanilla average/mean
implementation. \* ``WeightedAverage`` - The weighted average of all
valid data points within the time window. The weight is the inverse time
difference to the time\_to time stamp. \* ``Count`` - Count how many
valid data points are within the give time window. \* ``Maximum`` - The
maximum value of all valid data points within the time window. \*
``Minimum`` - The minimum value of all valid data points within the time
window.
Alcathous [1]_ is the brother of Copreus. Both are sons of Pelops.
[`wiki <https://en.wikipedia.org/wiki/Alcathous,_son_of_Pelops>`__]
.. figure:: img/Microservice%20Overview.png
:alt: Pelops Overview
Pelops Overview
``Alcathous`` is part of the collection of mqtt based microservices
`pelops <https://gitlab.com/pelops>`__. An overview on the microservice
architecture and examples can be found at
(http://gitlab.com/pelops/pelops).
For Users
=========
Installation Core-Functionality
-------------------------------
Prerequisites for the core functionality are:
::
sudo apt install python3 python3-pip
Install via pip:
::
sudo pip3 install pelops alcathous
To update to the latest version add ``--upgrade`` as prefix to the
``pip3`` line above.
Install via gitlab (might need additional packages):
::
git clone [email protected]:pelops/alcathous.git
cd alcathous
sudo python3 setup.py install
This will install the following shell scripts: \* ``alcathous``
The script cli arguments are: \* '-c'/'--config' - config file
(mandatory) \* '--version' - show the version number and exit
YAML-Config
-----------
A yaml [2]_ file must contain three root blocks: \* mqtt - mqtt-address,
mqtt-port, and path to credentials file credentials-file (a file
consisting of two entries: mqtt-user, mqtt-password) \* logger - which
log level and which file to be used \* data-preparation \* general -
parameters for the manager \* methods - mapping of algorithms,
parameters and topic-pub suffix \* datapoints - which topics should be
used and which methods should be applied
::
mqtt:
mqtt-address: localhost
mqtt-port: 1883
credentials-file: ~/credentials.yaml
log-level: INFO
logger:
log-level: DEBUG
log-file: alcathous.log
data-preparation: # alcathous root node
no_data_behavior: last_valid # mute, last_valid, empty_message
update_cycle: 30 # new values published each ... seconds
number_worker: 2 # how many worker threads should be spawned to process task queue
methods:
- name: avg_5min # unique name for method
topic-pub-suffix: avg_5min
algorithm: avg # avg - average, wavg - weighted average, count, min, max
time_window: 5 # use the values from the last ... minutes
- name: wavg_5min # unique name for method
topic-pub-suffix: wavg_5min
algorithm: wavg # avg - average, wavg - weighted average, count, min, max
time_window: 5 # use the values from the last ... minutes
- name: count_2min # unique name for method
topic-pub-suffix: count_2min
algorithm: count # avg - average, wavg - weighted average, count, min, max
time_window: 2 # use the values from the last ... minutes
- name: min_3min # unique name for method
topic-pub-suffix: min_3min
algorithm: min # avg - average, wavg - weighted average, count, min, max
time_window: 3 # use the values from the last ... minutes
- name: max_3min # unique name for method
topic-pub-suffix: max_3min
algorithm: max # avg - average, wavg - weighted average, count, min, max
time_window: 3 # use the values from the last ... minutes
datapoints:
- topic-sub: /test/0/raw
topic-pub-prefix: /test/0/aggregated/
zero_is_valid: False # 0 is valid or rejected
methods: wavg_5min, avg_5min, count_2min, min_3min, max_3min
- topic-sub: /test/1/raw
topic-pub-prefix: /test/1/aggregated/
zero_is_valid: False # 0 is valid or rejected
methods: wavg_5min, avg_5min
systemd
-------
- add systemd example.
For Developers
==============
Getting Started
---------------
The project consists of three main modules: \* ``datapointmanager`` -
loads the config and create all ``Datapoint`` instances. Hosts the main
loop. \* ``datapoint`` - ``Datapoint`` is one of the datapoints in the
config. it holds all data received for the given topic, has its own set
of method instances. \* ``algorithms`` - The configureable algorithms
are then used as data preparation methods in ``DataPoint``. Currently,
two algorithms are implemented: Average and WeightedAverage. The first
one treats all values in a time window equivalent, the later one weights
them with the time span between ``time_from`` and ``time_value``.
``DataPointManager`` has two lists: references to the ``process``
functions from all instantiated methods and a references to the
``purge`` functions from all instantiated ``DataPoint``\ s. The first
list is ordered by an execution cost estimation (highest value first).
Both lists are applied to worker threads (``general.number_worker``) -
please adapt the number of the workers to your needs.
Todos
-----
- Add better validity check for incoming values
- ...
Misc
----
The code is written for ``python3`` (and tested with python 3.5 on an
Raspberry Pi Zero with Raspbian Stretch).
`Merge requests <https://gitlab.com/pelops/alcathous/merge_requests>`__
/ `bug reports <https://gitlab.com/pelops/alcathous/issues>`__ are
always welcome.
.. [1]
The icon used for this project is in fact not Alcathous. Moreover, it
is Odysseus and resembles perfectly my failed journey to find a
fitting symbol.
.. [2]
Currently, pyyaml is yaml 1.1 compliant. In pyyaml On/Off and Yes/No
are automatically converted to True/False. This is an unwanted
behavior and deprecated in yaml 1.2. In copreus this autoconversion
is removed. Thus, On/Off and Yes/No are read from the yaml file as
strings (see module baseclasses.myconfigtools).
| Alcathous | /Alcathous-0.4.0.tar.gz/Alcathous-0.4.0/README.rst | README.rst |
def get_schema():
return {
"data-preparation": {
"description": "Root node for alcathous specific entries.",
"type": "object",
"properties": {
"no_data_behavior": {
"description": "How should the algorithm react in case of no data.",
"type": "string",
"enum": ["mute", "last_valid", "empty_message"]
},
"update_cycle": {
"type": "integer",
"minimum": 0,
"exclusiveMinimum": True,
"description": "new values published each ... seconds"
},
"number_worker": {
"type": "integer",
"minimum": 0,
"exclusiveMinimum": True,
"description": "how many worker threads should be spawned to process task queue"
},
"methods": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"description": "unique name for method",
"type": "string"
},
"topic-pub-suffix": {
"type": "string",
"description": "results are publish to 'datapoints_prefix & topic-pub-suffix'."
},
"algorithm": {
"type": "string",
"enum": ["avg", "wavg", "count", "min", "max"]
},
"time_window": {
"description": "use the values from the last ... minutes",
"type": "number",
"minimum": 0,
"exclusiveMinimum": True
}
},
"required": ["time_window", "algorithm", "topic-pub-suffix"],
"additionalProperties": False
},
"additionalProperties": False
},
"datapoints": {
"type": "array",
"items": {
"type": "object",
"properties": {
"topic-sub": {
"description": "subscribe to this topic and apply the methods to it",
"type": "string"
},
"topic-pub-prefix": {
"description": "publish results to this topic",
"type": "string"
},
"zero_is_valid": {
"description": "0 is valid or rejected",
"type": "boolean"
},
"methods": {
"description": "comma separated list of one or more strings. each entry must be "
"represented by a method entry.",
"type": "string",
"pattern": "(\\d+)(,\\s*\\d+)*"
}
},
"required": ["topic-sub", "topic-pub-prefix", "zero_is_valid", "methods"],
"additionalProperties": False
},
"additionalProperties": False
}
},
"required": ["no_data_behavior", "update_cycle", "number_worker", "methods", "datapoints"],
"additionalProperties": False
}
} | Alcathous | /Alcathous-0.4.0.tar.gz/Alcathous-0.4.0/alcathous/schema.py | schema.py |
from pelops.abstractmicroservice import AbstractMicroservice
import time
import threading
from pelops.mythreading import LoggerThread
from alcathous.datapoint import DataPoint
from alcathous.nodatabehavior import NoDataBehavior
from alcathous.worker import Worker
import alcathous.schema as schema
from alcathous import version
class DataPointManager(AbstractMicroservice):
"""
Manage the data points by creating/start/stopping them and invocation of the process methods of the algorithms
regularly.
Config yaml:
mqtt:
mqtt-address: localhost
mqtt-port: 1883
credentials-file: ~/credentials.yaml
log-level: INFO
logger:
log-level: DEBUG
log-file: alcathous.log
data-preparation: # alcathous root node
no_data_behavior: last_valid # mute, last_valid, empty_message
update_cycle: 30 # new values published each ... seconds
number_worker: 2 # how many worker threads should be spawned to process task queue
methods:
avg_5min:
topic-pub-suffix: avg_5min
algorithm: avg # avg - average, wavg - weighted average, count, min, max
time_window: 5 # use the values from the last ... minutes
wavg_5min:
topic-pub-suffix: wavg_5min
algorithm: wavg # avg - average, wavg - weighted average, count, min, max
time_window: 5 # use the values from the last ... minutes
datapoints:
- topic-sub: /test/0/raw
topic-pub-prefix: /test/0/aggregated/
zero_is_valid: False # 0 is valid or rejected
methods: wavg_5min, avg_5min
"""
_version = version
_data_points = None # list of datapoint instances
_processes = None # list with references to all process functions of the algorithms of the datapoints
_purges = None # list with references to all purge functions of the datapoints
_no_data_behavior = None # defines how the algorithm should react if no data is available.
_update_cycle = None # call all processes every n seconds
_worker = None # instance of the worker manager
_do_loop_thread = None # contains the loop method thread
def __init__(self, config, pubsub_client=None, logger=None, stdout_log_level=None, no_gui=None):
"""
Constructor.
:param config: config yaml structure
:param pubsub_client: instance of an mymqttclient (optional)
:param logger: instance of a logger (optional)
:param no_gui: if False create and control a ui instance
:param stdout_log_level: if set, a logging handler with target sys.stdout will be added
"""
AbstractMicroservice.__init__(self, config, "data-preparation", pubsub_client=pubsub_client, logger=logger,
stdout_log_level=stdout_log_level, no_gui=no_gui)
self._purges = []
self._processes = []
self._data_points = []
self._update_cycle = int(self._config["update_cycle"])
self._no_data_behavior = NoDataBehavior.get_enum(str(self._config["no_data_behavior"]))
self._worker = Worker(self._config["number_worker"], self._logger)
self._do_loop_thread = LoggerThread(target=self._do_loop, name="datapointmanager", logger=self._logger)
_config_methods = {}
for m in self._config["methods"]:
key = m["name"]
if key in _config_methods:
self._logger.error("DataPointManager - method name must be unique ({}).".format(key))
raise ValueError("DataPointManager - method name must be unique ({}).".format(key))
_config_methods[key] = m
for config_data_point in self._config["datapoints"]:
dp = DataPoint(config_data_point, _config_methods, self._pubsub_client, self._logger,
self._no_data_behavior)
self._purges.append(dp.purge_old_values)
for method in dp.methods:
process = method.process
cost = method.execution_points_estimation()
self._logger.info("DataPointManager - adding process '{}' with cost '{}'.".
format(process.__name__, cost))
self._processes.append((process, cost))
self._data_points.append(dp)
self._processes.sort(key=lambda tup: tup[1], reverse=True) # sort processes by their cost most expensive first
@classmethod
def _get_description(cls):
return "This software subscribes to mqtt-topics that contain raw sensor data and publishes average values " \
"for configurable time spans."
def _loop_process(self):
"""
Call all process and purge methods.
"""
timestamp = time.time()
self._logger.info("DataPointManager - started work for timestamp '{} s'.".format(timestamp))
for p in self._processes:
self._worker.queue.put((p[0], timestamp))
self._logger.info("DataPointManager - waiting for worker to finish processing the algorithms.")
self._worker.queue.join()
for p in self._purges:
self._worker.queue.put((p, timestamp))
self._logger.info("DataPointManager - waiting for worker to purge outdated values.")
self._worker.queue.join()
def _do_loop(self):
"""
Call _loop_process every n seconds.
"""
self._logger.info("DataPointManager - start loop.")
while not self._stop_service.is_set():
start = time.time()
self._loop_process()
sleep_for = max(0, self._update_cycle - (time.time() - start))
self._logger.info("DataPointManager - wait for '{} s'.".format(sleep_for))
self._stop_service.wait(sleep_for)
self._logger.info("DataPointManager - exited loop.")
@classmethod
def _get_schema(cls):
"""
Get the sub schema to validate the yaml-config file against.
:return: json-schema dict
"""
return schema.get_schema()
def _start(self):
"""
Start all datapoints, worker and the process loop.
"""
for dp in self._data_points:
dp.start()
self._worker.start()
self._do_loop_thread.start()
def _stop(self):
"""
Stop all datapoints, worker and the process loop.
"""
for dp in self._data_points:
dp.stop()
self._do_loop_thread.join()
self._worker.stop()
def runtime_information(self):
return {}
def config_information(self):
return {}
def standalone():
"""Calls the static method DataPointManager.standalone()."""
DataPointManager.standalone()
if __name__ == "__main__":
DataPointManager.standalone() | Alcathous | /Alcathous-0.4.0.tar.gz/Alcathous-0.4.0/alcathous/datapointmanager.py | datapointmanager.py |
import collections
from threading import Lock
import time
from alcathous.algorithms.algorithmfactory import AlgorithmFactory
from pelops.logging import mylogger
class DataPoint:
"""
A DataPoint subscribes to a single topic and applies n methods/algorithms to the recorded data set.
config yaml for the datapoint:
- topic-sub: /test/0/raw
topic-pub-prefix: /test/0/aggregated/
zero_is_valid: False # 0 is valid or rejected
methods: wavg_5min, avg_5min
config yaml for the methods:
methods:
avg_5min:
topic-pub-suffix: avg_5min
algorithm: avg # avg - average, wavg - weighted average, count, min, max
time_window: 5 # use the values from the last ... minutes
wavg_5min:
topic-pub-suffix: wavg_5min
algorithm: wavg # avg - average, wavg - weighted average, count, min, max
time_window: 5 # use the values from the last ... minutes
"""
methods = None # list of instantiated methods / algorithms
_topic_sub = None # the values received through this topic subscription should be processed
_topic_pub_prefix = None # prefix for publishing the method results
_zero_is_valid = None # is 0 a valid value or should it be skipped
_max_time_window = None # each method has its own time window. this variable stores the maximum for this datapoint.
_data_set = None # ordered dict containing values and timestamps. It is owned the parent datapoint class instance.
_lock_data_set = None # prevents datapoint class to change the dataset while an algorithm process it.
_logger = None # logger instance
_pubsub_client=None # instance of pelops.mymqttclient
def __init__(self, config_datapoint, config_methods, pubsub_client, parent_logger, no_data_behavior):
"""
Constructor.
:param config_datapoint: config yaml structure of this data point
:param config_methods: config yaml structure for all methods
:param parent_logger: logger instance from the parent. a child will be spawned
:param pubsub_client: instance of an mymqttclient
:param no_data_behavior: defines how the algorithm should react if no data is available.
"""
self._logger = mylogger.get_child(parent_logger, self.__class__.__name__)
self._logger.info("DataPoint.__init__ - initializing")
self._logger.debug("DataPoint.__init__ - config_datapoint: {}".format(config_datapoint))
self._logger.debug("DataPoint.__init__ - config_methods: {}".format(config_methods))
self._pubsub_client = pubsub_client
self._topic_sub = str(config_datapoint["topic-sub"])
self._logger.info("DataPoint.__init__ - topic_sub: {}".format(self._topic_sub))
self._topic_pub_prefix = str(config_datapoint["topic-pub-prefix"])
self._logger.info("{} - publish to '{}#'".format(self._topic_sub, self._topic_pub_prefix))
self._data_set = collections.OrderedDict()
self._lock_data_set = Lock()
self._max_time_window = 0
self._zero_is_valid = bool(config_datapoint["zero_is_valid"])
temp_methods = [x.strip() for x in config_datapoint["methods"].split(',')]
self.methods = AlgorithmFactory.get_instances(temp_methods, config_methods, self._logger, self._data_set,
self._lock_data_set, self._topic_pub_prefix, self._pubsub_client,
no_data_behavior)
for m in self.methods:
if m._time_window > self._max_time_window:
self._max_time_window = m._time_window
self._logger.info("{} - max time window for purging data: {} s.".format(self._topic_sub, self._max_time_window))
def _message_handler(self, value):
"""
Message handler - to be registered to _topic_sub in pubsub_client. Stores the received value in _data_set with
the current time stamp.
:param value: Message content from incoming mqtt message.
"""
if self._is_value_valid(value):
with self._lock_data_set:
timestamp = time.time()
self._data_set[timestamp] = float(value)
self._logger.info("{} - added {}@{}s".format(self._topic_sub, value, timestamp))
self._logger.debug(self._data_set)
def _is_value_valid(self, value):
"""
Checks for simple rules: 'is None' and '==0' if _zero_is_valid is not set.
:param value:
:return: True/False
"""
result = True
if value is None:
result = False
elif value == 0 and not self._zero_is_valid:
result = False
if not result:
self._logger.info("{} - value '{}' is not valid.".format(self._topic_sub, value))
return result
def purge_old_values(self, timestamp):
"""
Remove all entries in _data_set that have a time stamp that is older than timestamp.
:param timestamp: oldest time stamp to be kept.
"""
min_time_stamp = timestamp - self._max_time_window
self._logger.info("{} - purging values with timestamp < '{}'.".format(self._topic_sub, min_time_stamp))
count = 0
list_size = len(self._data_set)
with self._lock_data_set:
while len(self._data_set) and (next(iter(self._data_set.items())))[0] < min_time_stamp:
self._logger.debug("{} - purge item '{}'.".format(self._topic_sub, (next(iter(self._data_set.items())))))
count = count + 1
self._data_set.popitem(False)
self._logger.info("{} - purged {}/{} items.".format(self._topic_sub, count, list_size))
def start(self):
"""
Start by subscribing to _topic_sub.
"""
self._logger.info("Datapoint.start - subscribing to topic '{}'.".format(self._topic_sub))
self._pubsub_client.subscribe(self._topic_sub, self._message_handler)
def stop(self):
"""
Stop by unsubscription from _topic_sub.
"""
self._logger.info("Datapoint.stop - unsubscribing to topic '{}'.".format(self._topic_sub))
self._pubsub_client.unsubscribe(self._topic_sub, self._message_handler) | Alcathous | /Alcathous-0.4.0.tar.gz/Alcathous-0.4.0/alcathous/datapoint.py | datapoint.py |
from pelops.mythreading import LoggerThread
import queue
from pelops.logging import mylogger
class Worker:
"""
Creates n worker threads that wait for functions to be executed. Thus, a simple way of parallelizing
processing of many different tasks. A single queue is filled and the worker threads take the oldest
entry and executes them.
"""
_list = None # list containing the worker thread references.
queue = None # queue of to be processed tasks
_number = None # number of worker threads that should be created.
_logger = None # logger instance
def __init__(self, number_worker, parent_logger):
"""
Constructor.
:param number_worker: number of worker threads that should be created.
:param parent_logger: logger instance from the parent. a child will be spawned
"""
self._logger = mylogger.get_child(parent_logger, self.__class__.__name__)
self._logger.info("Worker.__init__ - initializing")
self._logger.debug("Worker.__init__ - config: number_worker={}.".format(number_worker))
self.queue = queue.Queue()
self._list = []
self._number = int(number_worker)
def _worker_thread(self):
self._logger.info("Worker._worker_thread - started worker")
while True:
item = self.queue.get()
self._logger.info("Worker._worker_thread - worker received item '{}'.".format(item))
if item is None:
self._logger.info("Worker._worker_thread - worker received stop signal")
break
func, parameter = item
func(parameter)
self.queue.task_done()
self._logger.info("Worker._worker_thread - stopped worker")
def start(self):
"""
Starts all worker threads.
"""
self._logger.info("Worker.start - starting {} worker.".format(self._number))
for i in range(self._number):
w = LoggerThread(target=self._worker_thread, name="worker_{}".format(self._number), logger=self._logger)
w.start()
self._list.append(w)
self._logger.info("Worker.start - {} worker started.".format(len(self._list)))
def stop(self):
"""
Stops all worker threads.
"""
self._logger.info("Worker.stop - stopping {} worker.".format(len(self._list)))
for i in range(len(self._list)):
self.queue.put(None)
for t in self._list:
t.join()
self._list = []
self._logger.info("Worker.stop - worker stopped.") | Alcathous | /Alcathous-0.4.0.tar.gz/Alcathous-0.4.0/alcathous/worker.py | worker.py |
from alcathous.nodatabehavior import NoDataBehavior
from pelops.logging import mylogger
class AbstractAlgorithm:
"""
AbstractAlgorithm provides everything that is needed for an algorithm to perform its aggregation work. It selects
to correct subset of the collected data.
config yaml:
topic-pub-suffix: avg_5min
algorithm: avg # avg - average, wavg - weighted average, count, min, max
time_window: 5 # use the values from the last ... minutes
"""
_data_set = None # ordered dict containing values and timestamps. It is owned the parent datapoint class instance.
_lock_data_set = None # prevents datapoint class to change the dataset while an algorithm process it.
_time_window = None # time window in seconds. e.g. use the values from the last 300s and calculate their average.
_no_data_behavior = None # defines how the algorithm should react if no data is available.
_last_valid = None # stores the last valid result of the algorithm.
_topic = None # publish the result to this topic
_pubsub_client = None # instance of pelops.pubsub client
name = None # identifier for this algorithm
_config = None # stores the yaml-configuration for this algorithm
_logger = None # logger instance
def __init__(self, name, config, parent_logger, data_set, lock_data_set, topic_pub_prefix, pubsub_client,
no_data_behavior):
"""
Constructor
:param name: Name of the instance of the algorithm
:param config: yaml config structure
:param parent_logger: logger instance from the parent. a child will be spawned
:param data_set: the data the algortihm should work with
:param lock_data_set: the algorithm works with the data set only if a lock has been acquired successfully
:param topic_pub_prefix: prefix+name is the topic the result will be published to
:param pubsub_client: instance of an mymqttclient
:param no_data_behavior: defines how the algorithm should react if no data is available.
"""
self._logger = mylogger.get_child(parent_logger, self.__class__.__name__)
self.name = name
self._config = config
self._no_data_behavior = no_data_behavior
self._data_set = data_set
self._lock_data_set = lock_data_set
self._logger.info("{}.{} - __init__".format(self.__class__.__name__, name))
self._logger.debug("{}.{} - __init__ config: ".format(self.__class__.__name__, name, self._config))
self._topic = topic_pub_prefix + self._config["topic-pub-suffix"]
self._logger.info("{}.{} - publish to topic '{}'.".format(self.__class__.__name__, name, self._topic))
self._pubsub_client = pubsub_client
self._time_window = int(self._config["time_window"]) * 60
if self._time_window <= 0:
self._logger.error("Value for time_window must be a positive integer larger than 0. ('{}' not > 0)".
format(self._time_window))
raise ValueError("Value for time_window must be a positive integer larger than 0. ('{}' not > 0)".
format(self._time_window))
self._logger.info("{}.{} - time window {} s.".format(self.__class__.__name__, name, self._time_window))
def process(self, timestamp):
"""
Calls the specialiced method _process and publishes the result to _topic.
:param timestamp: timestamp of the latest value to be used. valid timestamps are within timestamp and
(timestamp - self._time_window)
"""
time_from = timestamp - self._time_window
time_to = timestamp
try:
value = self._process(time_from, time_to)
self._last_valid = value
self._pubsub_client.publish(self._topic, value)
except ValueError:
self._logger.info("{} - process/ValueError. performing no data behavior '{}'.".
format(self._topic, self._no_data_behavior))
if self._no_data_behavior == NoDataBehavior.MUTE:
pass
elif self._no_data_behavior == NoDataBehavior.EMPTY_MESSAGE:
self._pubsub_client.publish(self._topic, None)
elif self._no_data_behavior == NoDataBehavior.LAST_VALID:
self._pubsub_client.publish(self._topic, self._last_valid)
else:
self._logger.error("Don't know how to handle NoDataBehavior.{}.".format(self._no_data_behavior))
raise NotImplementedError("Don't know how to handle NoDataBehavior.{}.".format(self._no_data_behavior))
def _process(self, time_from, time_to):
"""
This method must be implemented by each implementation of this abstract algorithm class. This is the core
functionality - whatever condensation with the provided data should be done it must be implemented within
this method.
If no_data occurs (no/not enough valid values within given time window) then a ValueError exception should be raised.
:param time_from: earliest entry from data_set
:param time_to: latest entry from data_set
:return: the result of the condensation
:raises: ValueError if no_data occured.
"""
raise NotImplementedError()
def execution_points_estimation(self):
"""
Each implementation of this abstract algorithm class must provide an estimation how much execution cost.
:return: numeric value larger than 0
"""
raise NotImplementedError() | Alcathous | /Alcathous-0.4.0.tar.gz/Alcathous-0.4.0/alcathous/algorithms/abstractalgorithm.py | abstractalgorithm.py |
from alcathous.algorithms.average import Average
from alcathous.algorithms.weightedaverage import WeightedAverage
from alcathous.algorithms.count import Count
from alcathous.algorithms.minimum import Minimum
from alcathous.algorithms.maximum import Maximum
from pelops.logging import mylogger
class AlgorithmFactory:
"""
Create algorithm instances based on the given yaml config structure.
Note: each implementation of abstractalgorithm must be represented by a class variable. E.g. the implementation
'Average' is added with the line 'avg = Average'. 'avg' represents the identifier that must be used in the
config yaml.
"""
avg = Average # used by the line "klass = cls.__dict__[class_name]"
wavg = WeightedAverage # used by the line "klass = cls.__dict__[class_name]"
count = Count # used by the line "klass = cls.__dict__[class_name]"
min = Minimum # used by the line "klass = cls.__dict__[class_name]"
max = Maximum # used by the line "klass = cls.__dict__[class_name]"
@classmethod
def get_instance(cls, name, config, parent_logger, data_set, lock_data_set, topic_pub_prefix, pubsub_client,
no_data_behavior):
"""
Generate a method / algorithm instance.
:param name: Name of the to be created method / algorithm instance.
:param config: Configuration yaml containing config for all algorithms.
:param parent_logger: logger instance from the parent.
:param data_set: the data the algortihm should work with
:param lock_data_set: the algorithm works with the data set only if a lock has been acquired successfully
:param topic_pub_prefix: prefix+name is the topic the result will be published to
:param pubsub_client: instance of an pubsubclient
:param no_data_behavior: defines how the algorithm should react if no data is available.
:return: The corresponding method / algorithm instance
"""
_logger = mylogger.get_child(parent_logger, cls.__name__)
class_name = str(config["algorithm"]).lower()
try:
klass = cls.__dict__[class_name]
except KeyError:
raise ValueError("Unknown value for AlgorithmFactory '{}'. Expected {}.".
format(class_name, cls.__dict__.keys()))
_logger.info("AlgorithmFactory.get_instance - creating instance of '{}'.".format(klass.__name__))
instance = klass(name, config, parent_logger, data_set, lock_data_set, topic_pub_prefix, pubsub_client,
no_data_behavior)
return instance
@classmethod
def get_instances(cls, method_names, config_methods, parent_logger, data_set, lock_data_set, topic_pub_prefix,
pubsub_client, no_data_behavior):
"""
Take the list of method names and generates the corresponding algorithm instances.
:param method_names: List of algorithms that should be generated.
:param config_methods: Configuration yaml containing config for all algorithms.
:param parent_logger: logger instance from the parent.
:param data_set: the data the algortihm should work with
:param lock_data_set: the algorithm works with the data set only if a lock has been acquired successfully
:param topic_pub_prefix: prefix+name is the topic the result will be published to
:param pubsub_client: instance of an pubsubclient
:param no_data_behavior: defines how the algorithm should react if no data is available.
:return: List of methods / algorithm instances
"""
_logger = mylogger.get_child(parent_logger, cls.__name__)
_logger.info("AlgorithmFactory.get_instances - creating instances of '{}'.".format(method_names))
_logger.debug("AlgorithmFactory.get_instances - config_methods '{}'.".format(config_methods))
methods = []
for name in method_names:
try:
config = config_methods[name]
except KeyError:
raise ValueError("Unknown value for method '{}'. Expected {}.".
format(name, config_methods.keys()))
m = AlgorithmFactory.get_instance(name, config, parent_logger, data_set, lock_data_set,
topic_pub_prefix, pubsub_client, no_data_behavior)
methods.append(m)
_logger.info("AlgorithmFactory.get_instances - created {} instances.".format(len(methods)))
return methods | Alcathous | /Alcathous-0.4.0.tar.gz/Alcathous-0.4.0/alcathous/algorithms/algorithmfactory.py | algorithmfactory.py |
Alchemytools
------------
This project brings a set of useful tools to be used in any SQLAchemly project.
The idea is to save common problems, for example: Opening/Closing sessions, commiting the sesssin only at the end of the transaction, etc.
Available Tools
###############
Here are all tools available in alchemytools.
Context Managers
****************
managed
=======
This is the basic context manager and it will commit and close your session automatically, at the end of the ``with`` block.
.. code-block:: python
with managed(MySessionClass) as session:
# Do what you need with your session
# Here the session is already closed and commited
If you raise any exception inside the ``with`` block, the session will be rolled back and the exception re-raised.
To avoid having all of the function body inside the ``with`` block, ``managed`` functions as a context manager as well.
.. code-block:: python
@managed(MySessionClass)
def foo(session, *args, **kwargs):
# Do what you need with your session
pass
# call as if the session didn't exist:
foo(2, a='b')
The session is opened every time the function is called and closed whenever it returns or raises an exception. Autommit and rollback rules work as normal.
Additional options
^^^^^^^^^^^^^^^^^^
``auto_flush``: Sets the autoflush option on the SQLAlchemy session, defaults fo ``False``
Executing tests
^^^^^^^^^^^^^^^
.. code-block:: shell
$ python setup.py test
| Alchemytools | /Alchemytools-0.7.0.tar.gz/Alchemytools-0.7.0/README.rst | README.rst |
# Abstraction Layer for Ephemerides (ALE)
[](https://travis-ci.org/USGS-Astrogeology/ale)
[](https://coveralls.io/github/USGS-Astrogeology/ale?branch=master)
[](https://usgs-astrogeology.github.io/ale/)
This library allows for the position, rotation, velocity and rotational velocity tracking of
multiple bodies in space, especially in relation to one another. It makes extensive use of NAIF's
SPICE data for such calculations.
## Setting up dependencies with conda (RECOMMENDED)
Install conda (either [Anaconda](https://www.anaconda.com/download/#linux) or
[Miniconda](https://conda.io/miniconda.html)) if you do not already have it. Installation
instructions may be found [here](https://conda.io/docs/user-guide/install/index.html).
### Creating an isolated conda environment
(TODO This command will need to be updated)
Run the following commands to create a self-contained dev environment for ale (type `y` to confirm creation):
```bash
conda env create -n ale -f environment.yml
```
> *For more information: [conda environments](https://conda.io/docs/user-guide/tasks/manage-environments.html)*
### Activating the environment
After creating the `ale` environment, we need to activate it. The activation command depends on your shell.
* **bash**: `source activate ale`
* **tcsh**: `conda activate ale`
> *You can add these to the end of your $HOME/.bashrc or $HOME/.cshrc if you want the `ale` environment to be active in every new terminal.*
## Building ALE
After you've set up and activated your conda environment, you may then build ale. Inside
of a cloned fork of the repository, follow these steps:
```bash
python setup.py install
cd build
cmake ..
make
```
Keep in mind that you will need to clone the repository with the `--recursive` flag in order to
retrieve the gtest submodule for testing. If you have already cloned without the `--recusive` flag,
running the following command will retrieve the gtest submodule manually:
```bash
git submodule update --init --recursive
```
## Running Tests
To run ctests to test c++ part of ale, run:
```
ctest
```
from the build directory.
| Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/README.md | README.md |
import os
from os import path
from glob import glob
from itertools import filterfalse, groupby
import warnings
import pvl
import collections
from collections import OrderedDict
from itertools import chain
from datetime import datetime
import subprocess
import re
import networkx as nx
from networkx.algorithms.shortest_paths.generic import shortest_path
import spiceypy as spice
from ale import spice_root
def get_metakernels(spice_dir=spice_root, missions=set(), years=set(), versions=set()):
"""
Given a root directory, get any subdirectory containing metakernels,
assume spice directory structure.
Mostly doing filtering here, might be worth using Pandas?
Parameters
----------
spice_dir : str
Path containing Spice directories downlaoded from NAIF's website
missions : set, str
Mission or set of missions to search for
years : set, str, int
year or set of years to search for
versions : set, str
version or set of versions to search for
"""
if not missions or missions == "all":
missions = set()
if not years or years == "all":
years = set()
if not versions or versions == "all":
versions = set()
if isinstance(missions, str):
missions = {missions}
if isinstance(years, str) or isinstance(years, int):
years = {str(years)}
else:
years = {str(year) for year in years}
avail = {
'count': 0,
'data': []
}
missions = [m.lower() for m in missions]
if spice_dir is not None:
mission_dirs = list(filter(path.isdir, glob(path.join(spice_dir, '*'))))
else:
warnings.warn("Unable to search mission directories without" +
"ALESPICEROOT being set. Defaulting to empty list")
mission_dirs = []
for md in mission_dirs:
# Assuming spice root has the same name as the original on NAIF website"
mission = os.path.basename(md).split('-')[0].split('_')[0]
if missions and all([m not in mission.lower() for m in missions]):
continue
metakernel_keys = ['mission', 'year', 'version', 'path']
# recursive glob to make metakernel search more robust to subtle directory structure differences
metakernel_paths = sorted(glob(os.path.join(md, '**','*.[Tt][Mm]'), recursive=True))
metakernels = []
for k in metakernel_paths:
components = path.splitext(path.basename(k))[0].split('_') + [k]
if len(components) == 3:
components.insert(1, 'N/A')
metakernels.append(dict(zip(metakernel_keys, components)))
# naive filter, do we really need anything else?
if years:
metakernels = list(filter(lambda x:x['year'] in years or x['year'] == 'N/A', metakernels))
if versions:
if versions == 'latest':
latest = []
# Panda's groupby is overrated
for k, g in groupby(metakernels, lambda x:x['year']):
items = list(g)
latest.append(max(items, key=lambda x:x['version']))
metakernels = latest
else:
metakernels = list(filter(lambda x:x['version'] in versions, metakernels))
avail['data'].extend(metakernels)
avail['count'] = len(avail['data'])
return avail
def find_latest_metakernel(path, year):
metakernel = None
mks = sorted(glob(os.path.join(path,'*.[Tt][Mm]')))
if not mks:
raise Exception(f'No metakernels found in {path}.')
for mk in mks:
if str(year) in os.path.basename(mk):
metakernel = mk
if not metakernel:
raise Exception(f'No metakernels found in {path} for {year}.')
return metakernel
def dict_merge(dct, merge_dct):
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
def get_isis_preferences(isis_preferences=None):
"""
Returns ISIS Preference file as a pvl object
"""
argprefs = {}
if isis_preferences:
if isinstance(isis_preferences, dict):
argprefs = isis_preferences
else:
argprefs = read_pvl(isis_preferences)
try:
homeprefs = read_pvl(os.path.join(os.path.expanduser("~"), '.Isis', 'IsisPreferences'))
except FileNotFoundError as e:
homeprefs = {}
try:
isisrootprefs_path = os.path.join(os.environ["ISISROOT"], 'IsisPreferences')
isisroot = os.environ['ISISROOT']
isisrootprefs = read_pvl(isisrootprefs_path)
except (FileNotFoundError, KeyError) as e:
isisrootprefs = {}
finalprefs = dict_merge(dict_merge(isisrootprefs, homeprefs), argprefs)
return finalprefs
def dict_to_lower(d):
return {k.lower():v if not isinstance(v, dict) else dict_to_lower(v) for k,v in d.items()}
def expandvars(path, env_dict=os.environ, default=None, case_sensative=True):
user_dict = env_dict if case_sensative else dict_to_lower(env_dict)
def replace_var(m):
group0 = m.group(0) if case_sensative else m.group(0).lower()
group1 = m.group(1) if case_sensative else m.group(1).lower()
return user_dict.get(m.group(2) or group1, group0 if default is None else default)
reVar = r'\$(\w+|\{([^}]*)\})'
return re.sub(reVar, replace_var, path)
def generate_kernels_from_cube(cube, expand=False, format_as='list'):
"""
Parses a cube label to obtain the kernels from the Kernels group.
Parameters
----------
cube : cube
Path to the cube to pull the kernels from.
expand : bool, optional
Whether or not to expand variables within kernel paths based on your IsisPreferences file.
format_as : str, optional {'list', 'dict'}
How to return the kernels: either as a one-demensional ordered list, or as a dictionary
of kernel lists.
Returns
-------
: list
One-dimensional ordered list of all kernels from the Kernels group in the cube.
: Dictionary
Dictionary of lists of kernels with the keys being the Keywords from the Kernels group of
cube itself, and the values being the values associated with that Keyword in the cube.
"""
# enforce key order
mk_paths = OrderedDict.fromkeys(
['TargetPosition', 'InstrumentPosition',
'InstrumentPointing', 'Frame', 'TargetAttitudeShape',
'Instrument', 'InstrumentAddendum', 'LeapSecond',
'SpacecraftClock', 'Extra'])
# just work with full path
cube = os.path.abspath(cube)
cubelabel = pvl.load(cube)
try:
kernel_group = cubelabel['IsisCube']
except KeyError:
raise KeyError(f'{cubelabel}, Could not find kernels group, input cube [{cube}] may not be spiceinited')
return get_kernels_from_isis_pvl(kernel_group, expand, format_as)
def get_kernels_from_isis_pvl(kernel_group, expand=True, format_as="list"):
# enforce key order
mk_paths = OrderedDict.fromkeys(
['TargetPosition', 'InstrumentPosition',
'InstrumentPointing', 'Frame', 'TargetAttitudeShape',
'Instrument', 'InstrumentAddendum', 'LeapSecond',
'SpacecraftClock', 'Extra'])
if isinstance(kernel_group, str):
kernel_group = pvl.loads(kernel_group)
kernel_group = kernel_group["Kernels"]
def load_table_data(key):
mk_paths[key] = kernel_group.get(key, None)
if isinstance(mk_paths[key], str):
mk_paths[key] = [mk_paths[key]]
while 'Table' in mk_paths[key]: mk_paths[key].remove('Table')
while 'Nadir' in mk_paths[key]: mk_paths[key].remove('Nadir')
load_table_data('TargetPosition')
load_table_data('InstrumentPosition')
load_table_data('InstrumentPointing')
load_table_data('TargetAttitudeShape')
# the rest
mk_paths['Frame'] = [kernel_group.get('Frame', None)]
mk_paths['Instrument'] = [kernel_group.get('Instrument', None)]
mk_paths['InstrumentAddendum'] = [kernel_group.get('InstrumentAddendum', None)]
mk_paths['SpacecraftClock'] = [kernel_group.get('SpacecraftClock', None)]
mk_paths['LeapSecond'] = [kernel_group.get('LeapSecond', None)]
mk_paths['Clock'] = [kernel_group.get('Clock', None)]
mk_paths['Extra'] = [kernel_group.get('Extra', None)]
if (format_as == 'list'):
# get kernels as 1-d string list
kernels = [kernel for kernel in chain.from_iterable(mk_paths.values()) if isinstance(kernel, str)]
if expand:
isisprefs = get_isis_preferences()
kernels = [expandvars(expandvars(k, dict_to_lower(isisprefs['DataDirectory']))) for k in kernels]
return kernels
elif (format_as == 'dict'):
# return created dict
if expand:
isisprefs = get_isis_preferences()
for kern_list in mk_paths:
for index, kern in enumerate(mk_paths[kern_list]):
if kern is not None:
mk_paths[kern_list][index] = expandvars(expandvars(kern, dict_to_lower(isisprefs['DataDirectory'])))
return mk_paths
else:
raise Exception(f'{format_as} is not a valid return format')
def write_metakernel_from_cube(cube, mkpath=None):
# add ISISPREF paths as path_symbols and path_values to avoid custom expand logic
pvlprefs = get_isis_preferences()
kernels = generate_kernels_from_cube(cube)
# make sure kernels are mk strings
kernels = ["'"+k+"'" for k in kernels]
paths = OrderedDict(pvlprefs['DataDirectory'])
path_values = ["'"+os.path.expandvars(path)+"'" for path in paths.values()]
path_symbols = ["'"+symbol.lower()+"'" for symbol in paths.keys()]
body = '\n\n'.join([
'KPL/MK',
f'Metakernel Generated from an ISIS cube: {cube}',
'\\begindata',
'PATH_VALUES = (',
'\n'.join(path_values),
')',
'PATH_SYMBOLS = (',
'\n'.join(path_symbols),
')',
'KERNELS_TO_LOAD = (',
'\n'.join(kernels),
')',
'\\begintext'
])
if mkpath is not None:
with open(mkpath, 'w') as f:
f.write(body)
return body
def get_ck_frames(kernel):
"""
Get all of the reference frames defined in a kernel.
Parameters
----------
kernel : str
The path to the kernel
Returns
-------
ids : list
The set of reference frames IDs defined in the kernel
"""
ckbrief = subprocess.run(["ckbrief", "-t {}".format(kernel)],
capture_output=True,
check=True,
text=True)
ids = set()
for id in re.findall(r'^(-?[0-9]+)', ckbrief.stdout, flags=re.MULTILINE):
ids.add(int(id))
# Sort the output list for testability
return sorted(list(ids))
def create_spk_dependency_tree(kernels):
"""
construct the dependency tree for the body states in a set of kernels.
Parameters
----------
kernels : list
The list of kernels to evaluate the dependencies in. If two
kernels in this list contain the same information for the same
pair of bodies, then the later kernel in the list will be
identified in the kernel property for that edge in dep_tree.
Returns
-------
dep_tree : nx.DiGraph
The dependency tree for the kernels. Nodes are bodies. There is
an edge from one node to another if the state of the body of the
source node is defined relative to the state of the body of the
destination node. The kernel edge property identifies what kernel
the information for the edge is defined in.
"""
dep_tree = nx.DiGraph()
for kernel in kernels:
brief = subprocess.run(["brief", "-c {}".format(kernel)],
capture_output=True,
check=True,
text=True)
for body, rel_body in re.findall(r'\((.*)\).*w\.r\.t\..*\((.*)\)', brief.stdout):
dep_tree.add_edge(int(body), int(rel_body), kernel=kernel)
return dep_tree
def spkmerge_config_string(dep_tree, output_spk, bodies, lsk, start, stop):
"""
Create the contents of an spkmerge config file that will produce a spk that
completely defines the state of a list of bodies for a time range.
Parameters
----------
dep_tree : nx.DiGraph
Dependency tree from create_kernel_dependency_tree that contains
information about what the state of different bodies are relative
to and where that information is stored.
output_spk : str
The path to the SPK that will be output by spkmerge
bodies : list
The list of body ID codes that need to be defined in the kernel
created by spkmerge
lsk : str
The absolute path to the leap second kernel to use
start : str
The UTC start time for the kernel created by spkmerge
stop : str
The UTC stop time for the kernel created by spkmerge
Returns
-------
: str
The contents of an spkmerge config file that will produce a kernel that
defines the state of the input bodies for the input time range.
"""
input_kernels = set()
all_bodies = set(bodies)
for body in bodies:
# Everything is ultimately defined relative to
# SOLAR SYSTEM BARYCENTER (0) so find the path to it
dep_path = shortest_path(dep_tree, body, 0)
all_bodies.update(dep_path)
for i in range(len(dep_path) - 1):
input_kernels.add(dep_tree[dep_path[i]][dep_path[i+1]]['kernel'])
config = f"LEAPSECONDS_KERNEL = {lsk}\n"
config += f"SPK_KERNEL = {output_spk}\n"
config += f" BODIES = {', '.join([str(b) for b in all_bodies])}\n"
config += f" BEGIN_TIME = {start}\n"
config += f" END_TIME = {stop}\n"
for kernel in input_kernels:
config += f" SOURCE_SPK_KERNEL = {kernel}\n"
config += f" INCLUDE_COMMENTS = no\n"
return config
def write_metakernel_from_kernel_list(kernels):
"""
Parameters
----------
kernels : str
list of kernel paths
Returns
-------
: str
Returns string representation of a Naif Metakernel file
"""
kernels = [os.path.abspath(k) for k in kernels]
common_prefix = os.path.commonprefix(kernels)
kernels = ["'"+"$PREFIX"+k[len(common_prefix):]+"'" for k in kernels]
body = '\n\n'.join([
'KPL/MK',
f'Metakernel Generated from a kernel list by Ale',
'\\begindata',
'PATH_VALUES = (',
"'"+common_prefix+"'",
')',
'PATH_SYMBOLS = (',
"'PREFIX'",
')',
'KERNELS_TO_LOAD = (',
'\n'.join(kernels),
')',
'\\begintext'
])
return body
def duckpool(naifvar, start=0, length=10, default=None):
"""
Duck typing friendly version of spiceypy kernel pool functions.
Parameters
----------
naifvar : str
naif var string to query pool for
start : int
Index of first value
length : int
max number of values returned
default : obj
Default value to return if key is not found in kernel pool
Returns
-------
: obj
Spice value returned from spiceypy if found, default value otherwise
"""
for f in [spice.gdpool, spice.gcpool, spice.gipool]:
try:
val = f(naifvar, start, length)
return val[0] if len(val) == 1 else val
except:
continue
return default
def query_kernel_pool(matchstr="*"):
"""
Collect multiple keywords from the naif kernel pool based on a
template string
Parameters
----------
matchstr : str
matchi_c formatted str
Returns
-------
: dict
python dictionary of naif keywords in {keyword:value} format.
"""
try:
svars = spice.gnpool(matchstr, 0, 100)
except Exception as e:
warnings.warn(f"kernel search for {matchstr} failed with {e}")
svars = []
svals = [duckpool(v) for v in svars]
return dict(zip(svars, svals))
def read_pvl(path, use_jank=False):
"""
Syntax sugar, used to load a pvl object file from path
Parameters
----------
path : str
Path to Pvl file
use_jank : bool
If true, uses faster but less reliable JBFPvlParser, else uses standard PVL parser.
"""
with open(path) as f:
preftext = f.read().replace('EndGroup', 'End_Group').replace("EndObject", "End_Object")
if use_jank:
pvlprefs = JBFPvlParser(open(path).read())
else:
pvlprefs = pvl.loads(preftext)
return pvlprefs
def get_isis_mission_translations(isis3_data):
"""
Use ISIS translation files and return a lookup table.
Parameters
----------
isis3_data : str
path to $ISIS3DATA
Returns
-------
: dict
Dictionary mapping label mission strings to ISIS3 mission strings
"""
mission_translation_file = read_pvl(os.path.join(isis3_data, "base", "translations", "MissionName2DataDir.trn"))
# For some reason this file takes the form [value, key] for mission name -> data dir
lookup = [l[::-1] for l in mission_translation_file["MissionName"].getlist("Translation")]
return dict(lookup)
def JBFPvlParser(lines):
"""
Janky But Faster PVL Parser(TM)
Only really supports ISIS's Kernel DB files. This is because KernelDB files are sometimes very large for smithed kernels.
This should bring the parsing time for those DB files from minutes to seconds.
Still needs nested object/group support and it should be able to read most PVL files.
Parameters
----------
lines : str
string body of PVL file.
Returns : PVLModule
object representing the parsed PVL
"""
def JBFKeywordParser(lines):
keyword = lines[0].split("=")[0]
value = lines[0].split("=")[1]+"".join(l.strip() for l in lines[1:])
if "(" in value and ")" in value:
value = value.replace("(", "").replace(")", "").split(",")
value = tuple([v.replace("\"", "") for v in value])
else:
value = value.strip()
return keyword.strip(), value
if isinstance(lines, str):
lines = lines.split("\n")
items = []
lines = [l.strip() for l in lines if l.strip()]
metadata = []
for i,l in enumerate(lines):
if "group = " in l.lower():
metadata.append([i, "group_start"])
elif "object = " in l.lower():
metadata.append([i, "object_start"])
elif "=" in l:
metadata.append([i, "keyword"])
elif "end_group" in l.lower() or "endgroup" in l.lower():
metadata.append([i, "group_end"])
elif "end_object" in l.lower() or "endobject" in l.lower():
metadata.append([i, "object_end"])
imeta = 0
while imeta < len(metadata):
element_start_line, element_type = metadata[imeta]
if element_type == "keyword":
next_element_start = metadata[imeta+1][0] if imeta+1<len(metadata) else len(lines)+1
element_lines = lines[element_start_line:next_element_start]
items.append(JBFKeywordParser(element_lines))
imeta+=1
elif element_type == "group_start":
group_name = lines[element_start_line].split('=')[1].strip()
next_meta = [(i,m) for i,m in enumerate(metadata[imeta:]) if m[1] == "group_end"][0]
next_group_start = next_meta[1][0]
group_lines = lines[element_start_line+1:next_group_start]
items.append((group_name, JBFPvlParser(group_lines)))
imeta += next_meta[0]
elif element_type == "object_start":
# duplicate code but whatever
group_name = lines[element_start_line].split('=')[1].strip()
next_meta = [(i,m) for i,m in enumerate(metadata[imeta:]) if m[1] == "object_end"][0]
next_group_start = next_meta[1][0]
group_lines = lines[element_start_line+1:next_group_start]
items.append((group_name, JBFPvlParser(group_lines)))
imeta += next_meta[0]
elif element_type == "object_end" or element_type == "group_end":
imeta+=1
return pvl.PVLModule(items)
def search_isis_db(dbobj, labelobj, isis3_data="/usgs/cpkgs/isis3/data/"):
"""
Given an PVL obj of a KernelDB file and an Isis Label for a cube, find the best kernel
to attach to the cube.
The Logic here is a bit gross, but it matches ISIS's implementation very closely.
Parameters
----------
dbobj : PVLModule
ISIS3 KernelDB file as a loaded PVLModule
labelobj : PVLModule
Cube label as loaded PVLModule
isis3_data : str
path to $ISIS3DATA
Returns
-------
: dict
dictionary containing kernel list and optionally the kernel type if relevant.
"""
if not dbobj:
return
quality = dict(e[::-1] for e in enumerate(["predicted", "nadir", "reconstructed", "smithed"]))
utc_start_time = labelobj["IsisCube"]["Instrument"]["StartTime"]
utc_stop_time = labelobj["IsisCube"]["Instrument"]["StopTime"]
run_time = None
dependencies = None
kernels = []
typ = None
types = []
# flag is set when a kernel is found matching the start time but not stop time
# and therefore a second pair needs to be found
partial_match = False
# Flag is set when kernels encapsulating the entire image time is found
full_match = False
for selection in dbobj.getlist("Selection"):
files = selection.getlist("File")
# selection criteria
matches = selection.getlist("Match")
times = selection.getlist("Time")
if not files:
raise Exception(f"No File found in {selection}")
files = [path.join(*file) if isinstance(file, list) else file for file in files]
for i,time in enumerate(times):
isis_time_format = '%Y %b %d %H:%M:%S.%f TDB'
other_isis_time_format = '"%Y %b %d %H:%M:%S.%f TDB"'
try:
time = (datetime.strptime(time[0].strip(), isis_time_format),
datetime.strptime(time[1].strip(), isis_time_format))
except:
time = (datetime.strptime(time[0].strip(), other_isis_time_format),
datetime.strptime(time[1].strip(), other_isis_time_format))
start_time_in_range = utc_start_time >= time[0] and utc_start_time <= time[1]
stop_time_in_range = utc_stop_time >= time[0] and utc_stop_time <= time[1]
times[i] = stop_time_in_range, stop_time_in_range
for i,match in enumerate(matches):
matches[i] = labelobj["IsisCube"][match[0].strip()][match[1].strip()].lower().strip() == match[2].lower().strip()
if any(matches if matches else [True]):
for i,f in enumerate(files):
if isinstance(f, tuple):
f = os.path.join(*[e.strip() for e in f])
full_path = os.path.join(isis3_data, f).replace("$", "").replace("\"", "")
if "{" in full_path:
start = full_path.find("{")
stop = full_path.find("}")
full_path = full_path[:start] + "?"*(stop-start-1) + full_path[stop+1:]
if '?' in full_path:
full_path = sorted(glob(full_path))[-1]
files[i] = full_path
if times:
have_start_match, have_stop_match = list(map(list, zip(*times)))
typ = selection.get("Type", None)
typ = typ.lower().strip() if typ else None
current_quality = max([quality[t.lower().strip()] for t in types if t]) if any(types) else 0
if any(have_start_match) and any(have_stop_match):
# best case, the image is fully encapsulated in the kernel
full_match = True
if quality[typ] >= current_quality:
kernels = files
types = [selection.get("Type", None)]
elif any(have_start_match):
kernels.extend(files)
types.append(selection.get("Type", None))
partial_match = True
elif any(have_stop_match):
if partial_match:
if quality[typ] >= current_quality:
kernels.extend(files)
types.append(selection.get("Type", None))
full_match = True
else:
full_match = True
kernels = files
types = [selection.get("Type", None)]
if partial_match:
# this can only be true if a kernel matching start time was founf
# but not the end time
raise Exception("Could not find kernels encapsulating the full image time")
kernels = {"kernels" : kernels}
if any(types):
kernels["types"] = types
return kernels
def find_kernels(cube, isis3_data="/usgs/cpkgs/isis3/data/", format_as=dict):
"""
Find all kernels for a cube and return a json object with categorized kernels.
Parameters
----------
cube : str
Path to an ISIS cube
isis3_data : str
path to $ISIS3DATA
format_as : obj
What type to return the kernels as, ISIS3-like dict/PVL or flat list
Returns
-------
: obj
Container with kernels
"""
def remove_dups(listofElements):
# Create an empty list to store unique elements
uniqueList = []
# Iterate over the original list and for each element
# add it to uniqueList, if its not already there.
for elem in listofElements:
if elem not in uniqueList:
uniqueList.append(elem)
# Return the list of unique elements
return uniqueList
cube_label = pvl.load(cube)
mission_lookup_table = get_isis_mission_translations(isis3_data)
mission_dir = mission_lookup_table[cube_label["IsisCube"]["Instrument"]["SpacecraftName"]]
mission_dir = path.join(isis3_data, mission_dir.lower())
kernel_dir = path.join(mission_dir, "kernels")
base_kernel_dir = path.join(isis3_data, "base", "kernels")
kernel_types = [ name for name in os.listdir(kernel_dir) if os.path.isdir(os.path.join(kernel_dir, name)) ]
kernel_types.extend(name for name in os.listdir(base_kernel_dir) if os.path.isdir(os.path.join(base_kernel_dir, name)))
kernel_types = set(kernel_types)
db_files = []
for typ in kernel_types:
files = sorted(glob(path.join(kernel_dir, typ, "*.db")))
base_files = sorted(glob(path.join(base_kernel_dir, typ, "*.db")))
files = [list(it) for k,it in groupby(files, key=lambda f:os.path.basename(f).split(".")[0])]
base_files = [list(it) for k,it in groupby(base_files, key=lambda f:os.path.basename(f).split(".")[0])]
for instrument_dbs in files:
db_files.append(read_pvl(sorted(instrument_dbs)[-1], True))
for base_dbs in base_files:
db_files.append(read_pvl(sorted(base_dbs)[-1], True))
kernels = {}
for f in db_files:
#TODO: Error checking
typ = f[0][0]
kernel_search_results = search_isis_db(f[0][1], cube_label)
if not kernel_search_results:
kernels[typ] = None
else:
try:
kernels[typ]["kernels"].extend(kernel_search_results["kernels"])
if any(kernel_search_results.get("types", [None])):
kernels[typ]["types"].extend(kernel_search_results["types"])
except:
kernels[typ] = {}
kernels[typ]["kernels"] = kernel_search_results["kernels"]
if any(kernel_search_results.get("types", [None])):
kernels[typ]["types"] = kernel_search_results["types"]
for k,v in kernels.items():
if v:
kernels[k]["kernels"] = remove_dups(v["kernels"])
if format_as == dict:
return kernels
elif format_as == list:
kernel_list = []
for _,kernels in kernels.items():
if kernels:
kernel_list.extend(kernels["kernels"])
return kernel_list
else:
warnings.warn(f"{format_as} is not a valid format, returning as dict")
return kernels | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/util.py | util.py |
import numpy as np
from numpy.polynomial.polynomial import polyval, polyder
import networkx as nx
from networkx.algorithms.shortest_paths.generic import shortest_path
import spiceypy as spice
from ale.rotation import ConstantRotation, TimeDependentRotation
def create_rotations(rotation_table):
"""
Convert an ISIS rotation table into rotation objects.
Parameters
----------
rotation_table : dict
The rotation ISIS table as a dictionary
Returns
-------
: list
A list of time dependent or constant rotation objects from the table. This
list will always have either 1 or 2 elements. The first rotation will be
time dependent and the second rotation will be constant. The rotations will
be ordered such that the reference frame the first rotation rotates to is
the reference frame the second rotation rotates from.
"""
rotations = []
root_frame = rotation_table['TimeDependentFrames'][-1]
last_time_dep_frame = rotation_table['TimeDependentFrames'][0]
# Case 1: It's a table of quaternions and times
if 'J2000Q0' in rotation_table:
# SPICE quaternions are (W, X, Y, Z) and ALE uses (X, Y, Z, W).
quats = np.array([rotation_table['J2000Q1'],
rotation_table['J2000Q2'],
rotation_table['J2000Q3'],
rotation_table['J2000Q0']]).T
if 'AV1' in rotation_table:
av = np.array([rotation_table['AV1'],
rotation_table['AV2'],
rotation_table['AV3']]).T
else:
av = None
time_dep_rot = TimeDependentRotation(quats,
rotation_table['ET'],
root_frame,
last_time_dep_frame,
av=av)
rotations.append(time_dep_rot)
# Case 2: It's a table of Euler angle coefficients
elif 'J2000Ang1' in rotation_table:
ephemeris_times = np.linspace(rotation_table['CkTableStartTime'],
rotation_table['CkTableEndTime'],
rotation_table['CkTableOriginalSize'])
base_time = rotation_table['J2000Ang1'][-1]
time_scale = rotation_table['J2000Ang2'][-1]
scaled_times = (ephemeris_times - base_time) / time_scale
coeffs = np.array([rotation_table['J2000Ang1'][:-1],
rotation_table['J2000Ang2'][:-1],
rotation_table['J2000Ang3'][:-1]]).T
angles = polyval(scaled_times, coeffs).T
# ISIS is hard coded to ZXZ (313) Euler angle axis order.
# SPICE also interprets Euler angle rotations as negative rotations,
# so negate them before passing to scipy.
time_dep_rot = TimeDependentRotation.from_euler('zxz',
-angles,
ephemeris_times,
root_frame,
last_time_dep_frame)
rotations.append(time_dep_rot)
if 'ConstantRotation' in rotation_table:
last_constant_frame = rotation_table['ConstantFrames'][0]
rot_mat = np.reshape(np.array(rotation_table['ConstantRotation']), (3, 3))
constant_rot = ConstantRotation.from_matrix(rot_mat,
last_time_dep_frame,
last_constant_frame)
rotations.append(constant_rot)
return rotations
class FrameChain(nx.DiGraph):
"""
This class is responsible for handling rotations between reference frames.
Every node is the reference frame and every edge represents the rotation to
between those two nodes. Each edge is directional, where the source --> destination
is one rotation and destination --> source is the inverse of that rotation.
Attributes
__________
frame_changes : list
A list of tuples that represent the rotation from one frame
to another. These tuples should all be NAIF codes for
reference frames
ephemeris_time : list
A of ephemeris times that need to be rotated for each set
of frame rotations in the frame chain
"""
@classmethod
def from_spice(cls, sensor_frame, target_frame, center_ephemeris_time, ephemeris_times=[], nadir=False):
frame_chain = cls()
times = np.array(ephemeris_times)
sensor_time_dependent_frames, sensor_constant_frames = cls.frame_trace(sensor_frame, center_ephemeris_time, nadir)
target_time_dependent_frames, target_constant_frames = cls.frame_trace(target_frame, center_ephemeris_time)
time_dependent_frames = list(zip(sensor_time_dependent_frames[:-1], sensor_time_dependent_frames[1:]))
constant_frames = list(zip(sensor_constant_frames[:-1], sensor_constant_frames[1:]))
target_time_dependent_frames = list(zip(target_time_dependent_frames[:-1], target_time_dependent_frames[1:]))
target_constant_frames = list(zip(target_constant_frames[:-1], target_constant_frames[1:]))
time_dependent_frames.extend(target_time_dependent_frames)
constant_frames.extend(target_constant_frames)
for s, d in time_dependent_frames:
quats = np.zeros((len(times), 4))
avs = np.zeros((len(times), 3))
for j, time in enumerate(times):
state_matrix = spice.sxform(spice.frmnam(s), spice.frmnam(d), time)
rotation_matrix, avs[j] = spice.xf2rav(state_matrix)
quat_from_rotation = spice.m2q(rotation_matrix)
quats[j,:3] = quat_from_rotation[1:]
quats[j,3] = quat_from_rotation[0]
rotation = TimeDependentRotation(quats, times, s, d, av=avs)
frame_chain.add_edge(rotation=rotation)
for s, d in constant_frames:
quats = np.zeros(4)
rotation_matrix = spice.pxform(spice.frmnam(s), spice.frmnam(d), times[0])
quat_from_rotation = spice.m2q(rotation_matrix)
quats[:3] = quat_from_rotation[1:]
quats[3] = quat_from_rotation[0]
rotation = ConstantRotation(quats, s, d)
frame_chain.add_edge(rotation=rotation)
return frame_chain
@staticmethod
def frame_trace(reference_frame, ephemeris_time, nadir=False):
frame_codes = [reference_frame]
_, frame_type, _ = spice.frinfo(frame_codes[-1])
frame_types = [frame_type]
if nadir:
return [], []
while(frame_codes[-1] != 1):
try:
center, frame_type, frame_type_id = spice.frinfo(frame_codes[-1])
except Exception as e:
print(e)
break
if frame_type is 1 or frame_type is 2:
frame_code = 1
elif frame_type is 3:
try:
matrix, frame_code = spice.ckfrot(frame_type_id, ephemeris_time)
except:
raise Exception(f"The ck rotation from frame {frame_codes[-1]} can not \
be found due to no pointing available at requested time \
or a problem with the frame")
elif frame_type is 4:
try:
matrix, frame_code = spice.tkfram(frame_type_id)
except:
raise Exception(f"The tk rotation from frame {frame_codes[-1]} can not \
be found")
elif frame_type is 5:
matrix, frame_code = spice.zzdynrot(frame_type_id, center, ephemeris_time)
else:
raise Exception(f"The frame {frame_codes[-1]} has a type {frame_type_id} \
not supported by your version of Naif Spicelib. \
You need to update.")
frame_codes.append(frame_code)
frame_types.append(frame_type)
constant_frames = []
while frame_codes:
if frame_types[0] == 4:
constant_frames.append(frame_codes.pop(0))
frame_types.pop(0)
else:
break
time_dependent_frames = []
if len(constant_frames) != 0:
time_dependent_frames.append(constant_frames[-1])
while frame_codes:
time_dependent_frames.append(frame_codes.pop(0))
return time_dependent_frames, constant_frames
@classmethod
def from_isis_tables(cls, *args, inst_pointing={}, body_orientation={}, **kwargs):
frame_chain = cls()
for rotation in create_rotations(inst_pointing):
frame_chain.add_edge(rotation=rotation)
for rotation in create_rotations(body_orientation):
frame_chain.add_edge(rotation=rotation)
return frame_chain
def add_edge(self, rotation, **kwargs):
super(FrameChain, self).add_edge(rotation.source, rotation.dest, rotation=rotation, **kwargs)
rotation = rotation.inverse()
super(FrameChain, self).add_edge(rotation.source, rotation.dest, rotation=rotation, **kwargs)
def compute_rotation(self, source, destination):
"""
Returns the rotation to another node. Returns the identity rotation
if the other node is this node.
Parameters
----------
source : int
Integer id for the source node to rotate from
destination : int
Integer id for the node to rotate into from the source node
Returns
-------
rotation : Object
Returns either a TimeDependentRotation object or ConstantRotation
object depending on the number of rotations being multiplied
together
"""
if source == destination:
return ConstantRotation(np.array([0, 0, 0, 1]), source, destination)
path = shortest_path(self, source, destination)
rotations = [self.edges[path[i], path[i+1]]['rotation'] for i in range(len(path) - 1)]
rotation = rotations[0]
for next_rotation in rotations[1:]:
rotation = next_rotation * rotation
return rotation
def last_time_dependent_frame_between(self, source, destination):
"""
Find the last time dependent frame between the source frame and the
destination frame.
Parameters
----------
source : int
Integer id of the source node
destination : int
Integer of the destination node
Returns
-------
: tuple, None
Returns the source node id, destination node id, and edge dictionary
which contains the rotation from source to destination.
"""
path = shortest_path(self, source, destination)
# Reverse the path to search bottom up to find the last time dependent
# frame between the source and destination
path.reverse()
for i in range(len(path) - 1):
edge = self.edges[path[i+1], path[i]]
if isinstance(edge['rotation'], TimeDependentRotation):
return path[i+1], path[i], edge
return None | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/transformation.py | transformation.py |
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation
import numpy as np
class ConstantRotation:
"""
A constant rotation between two 3D reference frames.
Attributes
__________
source : int
The NAIF ID code for the source frame
dest : int
The NAIF ID code for the destination frame
"""
def from_matrix(mat, source, dest):
"""
Create a constant rotation from a directed cosine matrix
Parameters
----------
mat : 2darray
The rotation matrix
source : int
The NAIF ID code for the source frame
dest : int
The NAIF ID code for the destination frame
See Also
--------
scipy.spatial.transform.Rotation.from_dcm
"""
rot = Rotation.from_dcm(mat)
return ConstantRotation(rot.as_quat(), source, dest)
def __init__(self, quat, source, dest):
"""
Construct a constant rotation
Parameters
----------
quat : array
The quaternion representation of the rotation as a numpy array.
The quaternion must be in scalar last format (x, y, z, w).
source : int
The NAIF ID code for the source frame
dest : int
The NAIF ID code for the destination frame
"""
self.source = source
self.dest = dest
self.quat = np.asarray(quat)
def __repr__(self):
return f'ConstantRotation Source: {self.source}, Destination: {self.dest}, Quat: {self.quat}'
@property
def quat(self):
"""
The quaternion that rotates from the source reference frame to
the destination reference frame. The quaternion is in scalar last
format (x, y, z, w).
"""
return self._rot.as_quat()
@quat.setter
def quat(self, new_quat):
"""
Change the rotation to a different quaternion
Parameters
----------
new_quat : array
The new quaternion as an array.
The quaternion must be in scalar last format (x, y, z, w).
"""
self._rot = Rotation.from_quat(np.asarray(new_quat))
def rotation_matrix(self):
"""
The rotation matrix representation of the constant rotation
"""
return self._rot.as_dcm()
def inverse(self):
"""
Get the inverse rotation, that is the rotation from the destination
reference frame to the source reference frame.
"""
return ConstantRotation(self._rot.inv().as_quat(), self.dest, self.source)
def __mul__(self, other):
"""
Compose this rotation with another rotation.
The destination frame of the right rotation (other) and the source
frame of the left rotation (self) must be the same. I.E. if A and B are
rotations, then for A*B to be valid, A.source must equal B.dest.
Parameters
----------
other : Rotation
Another rotation object, it can be constant or time dependent.
"""
if self.source != other.dest:
raise ValueError("Destination frame of first rotation {} is not the same as source frame of second rotation {}.".format(other.dest, self.source))
if isinstance(other, ConstantRotation):
new_rot = self._rot * other._rot
return ConstantRotation(new_rot.as_quat(), other.source, self.dest)
elif isinstance(other, TimeDependentRotation):
return TimeDependentRotation((self._rot * other._rots).as_quat(), other.times, other.source, self.dest, av=other.av)
else:
raise TypeError("Rotations can only be composed with other rotations.")
class TimeDependentRotation:
"""
A time dependent rotation between two 3D reference frames.
Attributes
__________
source : int
The NAIF ID code for the source frame
dest : int
The NAIF ID code for the destination frame
"""
def from_euler(sequence, euler, times, source, dest, degrees=False):
"""
Create a time dependent rotation from a set of Euler angles.
Parameters
----------
sequence : string
The axis sequence that the Euler angles are applied in. I.E. 'XYZ'
or 'ZXZ'.
euler : 2darray
2D numpy array of the euler angle rotations in radians.
times : array
The time for each rotation in euler. This array must be sorted
in ascending order.
source : int
The NAIF ID code for the source frame
dest : int
The NAIF ID code for the destination frame
degrees : bool
If the angles are in degrees. If false, then degrees are
assumed to be in radians. Defaults to False.
See Also
--------
scipy.spatial.transform.Rotation.from_euler
"""
rot = Rotation.from_euler(sequence, np.asarray(euler), degrees=degrees)
return TimeDependentRotation(rot.as_quat(), times, source, dest)
def __init__(self, quats, times, source, dest, av=None):
"""
Construct a time dependent rotation
Parameters
----------
quats : 2darray
The quaternion representations of the rotation as a 2d numpy array.
Each inner array represents the rotation at the time at the same index
in the times argument. The quaternions must be in scalar last format
(x, y, z, w).
times : array
The time for each rotation in quats. This array must be sorted
in ascending order.
source : int
The NAIF ID code for the source frame
dest : int
The NAIF ID code for the destination frame
av : 2darray
The angular velocity of the rotation at each time as a 2d numpy array.
If not entered, then angular velocity will be computed by assuming constant
angular velocity between times.
"""
self.source = source
self.dest = dest
self.quats = quats
self.times = np.atleast_1d(times)
if av is not None:
self.av = np.asarray(av)
else:
self.av = av
def __repr__(self):
return f'Time Dependent Rotation Source: {self.source}, Destination: {self.dest}, Quats: {self.quats}, AV: {self.av}, Times: {self.times}'
@property
def quats(self):
"""
The quaternions that rotates from the source reference frame to
the destination reference frame. The quaternions are in scalar
last format (x, y, z, w).
"""
return self._rots.as_quat()
@quats.setter
def quats(self, new_quats):
"""
Change the rotations to interpolate over
Parameters
----------
new_quats : 2darray
The new quaternions as a 2d array. The quaternions must be
in scalar last format (x, y, z, w).
"""
self._rots = Rotation.from_quat(new_quats)
def inverse(self):
"""
Get the inverse rotation, that is the rotation from the destination
reference frame to the source reference frame.
"""
if self.av is not None:
new_av = -self._rots.apply(self.av)
else:
new_av = None
return TimeDependentRotation(self._rots.inv().as_quat(), self.times, self.dest, self.source, av=new_av)
def _slerp(self, times):
"""
Using SLERP interpolate the rotation and angular velocity at
specific times.
Times outside of the range covered by this rotation are extrapolated
assuming constant angular velocity. If the rotation has angular velocities
stored, then the first and last angular velocity are used for extrapolation.
Otherwise, the angular velocities from the first and last interpolation
interval are used for extrapolation.
Parameters
----------
times : 1darray or float
The new times to interpolate at.
Returns
-------
: Rotation
The new rotations at the input times
: 2darray
The angular velocity vectors
"""
# Convert non-vector input to vector and check input
vec_times = np.atleast_1d(times)
if vec_times.ndim > 1:
raise ValueError('Input times must be either a float or a 1d iterable of floats')
# Compute constant angular velocity for interpolation intervals
avs = np.zeros((len(self.times) + 1, 3))
if len(self.times) > 1:
steps = self.times[1:] - self.times[:-1]
rotvecs = (self._rots[1:] * self._rots[:-1].inv()).as_rotvec()
avs[1:-1] = rotvecs / steps[:, None]
# If available use actual angular velocity for extrapolation
# Otherwise use the adjacent interpolation interval
if self.av is not None:
avs[0] = self.av[0]
avs[-1] = self.av[-1]
else:
avs[0] = avs[1]
avs[-1] = avs[-2]
# Determine interpolation intervals for input times
av_idx = np.searchsorted(self.times, vec_times)
rot_idx = av_idx - 1
rot_idx[rot_idx < 0] = 0
# Interpolate/extrapolate rotations
time_diffs = vec_times - self.times[rot_idx]
interp_av = avs[av_idx]
interp_rots = Rotation.from_rotvec(interp_av * time_diffs[:, None]) * self._rots[rot_idx]
# If actual angular velocities are available, linearly interpolate them
if self.av is not None:
av_diff = np.zeros((len(self.times), 3))
if len(self.times) > 1:
av_diff[:-1] = self.av[1:] - self.av[:-1]
av_diff[-1] = av_diff[-2]
interp_av = self.av[rot_idx] + (av_diff[rot_idx] * time_diffs[:, None])
return interp_rots, interp_av
def reinterpolate(self, times):
"""
Reinterpolate the rotation at a given set of times.
Parameters
----------
times : 1darray or float
The new times to interpolate at.
Returns
-------
: TimeDependentRotation
The new rotation at the input times
"""
new_rots, av = self._slerp(times)
return TimeDependentRotation(new_rots.as_quat(), times, self.source, self.dest, av=av)
def __mul__(self, other):
"""
Compose this rotation with another rotation.
The destination frame of the right rotation (other) and the source
frame of the left rotation (self) must be the same. I.E. if A and B are
rotations, then for A*B to be valid, A.source must equal B.dest.
If the other rotation is a time dependent rotation, then the time range
for the resultant rotation will be the time covered by both rotations.
I.E. if A covers 0 to 2 and B covers 1 to 4, then A*B will cover 1 to 2.
Parameters
----------
other : Rotation
Another rotation object, it can be constant or time dependent.
"""
if self.source != other.dest:
raise ValueError("Destination frame of first rotation {} is not the same as source frame of second rotation {}.".format(other.dest, self.source))
if isinstance(other, ConstantRotation):
if self.av is not None:
other_inverse = other._rot.inv()
new_av = np.asarray([other_inverse.apply(av) for av in self.av])
else:
new_av = None
return TimeDependentRotation((self._rots * other._rot).as_quat(), self.times, other.source, self.dest, av=new_av)
elif isinstance(other, TimeDependentRotation):
merged_times = np.union1d(np.asarray(self.times), np.asarray(other.times))
reinterp_self = self.reinterpolate(merged_times)
reinterp_other = other.reinterpolate(merged_times)
new_quats = (reinterp_self._rots * reinterp_other._rots).as_quat()
new_av = reinterp_other._rots.inv().apply(reinterp_self.av) + reinterp_other.av
return TimeDependentRotation(new_quats, merged_times, other.source, self.dest, av=new_av)
else:
raise TypeError("Rotations can only be composed with other rotations.")
def apply_at(self, vec, et):
"""
Apply the rotation to a position at a specific time
"""
return self.reinterpolate(et)._rots.apply(vec)
def rotate_velocity_at(self, pos, vel, et):
"""
Apply the rotation to a velocity at a specific time
See:
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/rotation.html#State%20transformations
For an explanation of why a separate method is required to rotate velocities.
"""
vec_pos = np.asarray(pos)
vec_vel = np.asarray(vel)
if vec_pos.ndim < 1:
vec_pos = np.asarray([pos])
if vec_vel.ndim < 1:
vec_vel = np.asarray([vel])
if vec_pos.shape != vec_vel.shape:
raise ValueError('Input velocities and positions must have the same shape')
rots, avs = self._slerp(et)
rotated_vel = np.zeros(vec_vel.shape)
for indx in range(vec_pos.shape[0]):
skew = np.array([[0, -avs[indx, 2], avs[indx, 1]],
[avs[indx, 2], 0, -avs[indx, 0]],
[-avs[indx, 1], avs[indx, 0], 0]])
rot_deriv = np.dot(skew, rots[indx].as_dcm().T).T
rotated_vel[indx] = rots[indx].apply(vec_vel[indx])
rotated_vel[indx] += np.dot(rot_deriv, vec_pos[indx])
return rotated_vel | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/rotation.py | rotation.py |
import json
import numpy as np
from scipy.interpolate import interp1d, BPoly
from networkx.algorithms.shortest_paths.generic import shortest_path
from ale.transformation import FrameChain
from ale.base.type_sensor import LineScanner, Framer
from ale.rotation import ConstantRotation, TimeDependentRotation
def to_isd(driver):
"""
Formatter to create sensor model meta data from a driver.
Parameters
----------
driver : Driver
Concrete driver for the image that meta data is being generated for.
Returns
-------
string
The ISIS compatible meta data as a JSON encoded string.
"""
meta_data = {}
meta_data['isis_camera_version'] = driver.sensor_model_version
# interiror orientation
meta_data['naif_keywords'] = driver.naif_keywords
meta_data['detector_sample_summing'] = driver.sample_summing
meta_data['detector_line_summing'] = driver.line_summing
meta_data['focal_length_model'] = {
'focal_length' : driver.focal_length
}
meta_data['detector_center'] = {
'line' : driver.detector_center_line,
'sample' : driver.detector_center_sample
}
meta_data['starting_detector_line'] = driver.detector_start_line
meta_data['starting_detector_sample'] = driver.detector_start_sample
meta_data['focal2pixel_lines'] = driver.focal2pixel_lines
meta_data['focal2pixel_samples'] = driver.focal2pixel_samples
meta_data['optical_distortion'] = driver.usgscsm_distortion_model
# general information
meta_data['image_lines'] = driver.image_lines
meta_data['image_samples'] = driver.image_samples
meta_data['name_platform'] = driver.platform_name
meta_data['name_sensor'] = driver.sensor_name
meta_data['reference_height'] = {
"maxheight": 1000,
"minheight": -1000,
"unit": "m"
}
# line scan sensor model specifics
if isinstance(driver, LineScanner):
meta_data['name_model'] = 'USGS_ASTRO_LINE_SCANNER_SENSOR_MODEL'
meta_data['interpolation_method'] = 'lagrange'
start_lines, start_times, scan_rates = driver.line_scan_rate
center_time = driver.center_ephemeris_time
meta_data['line_scan_rate'] = [[line, time, rate] for line, time, rate in zip(start_lines, start_times, scan_rates)]
meta_data['starting_ephemeris_time'] = driver.ephemeris_start_time
meta_data['center_ephemeris_time'] = center_time
# frame sensor model specifics
if isinstance(driver, Framer):
meta_data['name_model'] = 'USGS_ASTRO_FRAME_SENSOR_MODEL'
meta_data['center_ephemeris_time'] = driver.center_ephemeris_time
frame_chain = driver.frame_chain
sensor_frame = driver.sensor_frame_id
target_frame = driver.target_frame_id
body_radii = driver.target_body_radii
meta_data['radii'] = {
'semimajor' : body_radii[0],
'semiminor' : body_radii[2],
'unit' : 'km'
}
instrument_pointing = {}
source_frame, destination_frame, time_dependent_sensor_frame = frame_chain.last_time_dependent_frame_between(1, sensor_frame)
# Reverse the frame order because ISIS orders frames as
# (destination, intermediate, ..., intermediate, source)
instrument_pointing['time_dependent_frames'] = shortest_path(frame_chain, destination_frame, 1)
time_dependent_rotation = frame_chain.compute_rotation(1, destination_frame)
instrument_pointing['ck_table_start_time'] = time_dependent_rotation.times[0]
instrument_pointing['ck_table_end_time'] = time_dependent_rotation.times[-1]
instrument_pointing['ck_table_original_size'] = len(time_dependent_rotation.times)
instrument_pointing['ephemeris_times'] = time_dependent_rotation.times
instrument_pointing['quaternions'] = time_dependent_rotation.quats[:, [3, 0, 1, 2]]
instrument_pointing['angular_velocities'] = time_dependent_rotation.av
# reference frame should be the last frame in the chain
instrument_pointing["reference_frame"] = instrument_pointing['time_dependent_frames'][-1]
# Reverse the frame order because ISIS orders frames as
# (destination, intermediate, ..., intermediate, source)
instrument_pointing['constant_frames'] = shortest_path(frame_chain, sensor_frame, destination_frame)
constant_rotation = frame_chain.compute_rotation(destination_frame, sensor_frame)
instrument_pointing['constant_rotation'] = constant_rotation.rotation_matrix().flatten()
meta_data['instrument_pointing'] = instrument_pointing
body_rotation = {}
source_frame, destination_frame, time_dependent_target_frame = frame_chain.last_time_dependent_frame_between(target_frame, 1)
if source_frame != 1:
# Reverse the frame order because ISIS orders frames as
# (destination, intermediate, ..., intermediate, source)
body_rotation['time_dependent_frames'] = shortest_path(frame_chain, source_frame, 1)
time_dependent_rotation = frame_chain.compute_rotation(1, source_frame)
body_rotation['ck_table_start_time'] = time_dependent_rotation.times[0]
body_rotation['ck_table_end_time'] = time_dependent_rotation.times[-1]
body_rotation['ck_table_original_size'] = len(time_dependent_rotation.times)
body_rotation['ephemeris_times'] = time_dependent_rotation.times
body_rotation['quaternions'] = time_dependent_rotation.quats[:, [3, 0, 1, 2]]
body_rotation['angular_velocities'] = time_dependent_rotation.av
if source_frame != target_frame:
# Reverse the frame order because ISIS orders frames as
# (destination, intermediate, ..., intermediate, source)
body_rotation['constant_frames'] = shortest_path(frame_chain, target_frame, source_frame)
constant_rotation = frame_chain.compute_rotation(source_frame, target_frame)
body_rotation['constant_rotation'] = constant_rotation.rotation_matrix().flatten()
body_rotation["reference_frame"] = destination_frame
meta_data['body_rotation'] = body_rotation
j2000_rotation = frame_chain.compute_rotation(target_frame, 1)
instrument_position = {}
positions, velocities, times = driver.sensor_position
instrument_position['spk_table_start_time'] = times[0]
instrument_position['spk_table_end_time'] = times[-1]
instrument_position['spk_table_original_size'] = len(times)
instrument_position['ephemeris_times'] = times
# Rotate positions and velocities into J2000 then scale into kilometers
velocities = j2000_rotation.rotate_velocity_at(positions, velocities, times)/1000
positions = j2000_rotation.apply_at(positions, times)/1000
instrument_position['positions'] = positions
instrument_position['velocities'] = velocities
instrument_position["reference_frame"] = destination_frame
meta_data['instrument_position'] = instrument_position
sun_position = {}
positions, velocities, times = driver.sun_position
sun_position['spk_table_start_time'] = times[0]
sun_position['spk_table_end_time'] = times[-1]
sun_position['spk_table_original_size'] = len(times)
sun_position['ephemeris_times'] = times
# Rotate positions and velocities into J2000 then scale into kilometers
velocities = j2000_rotation.rotate_velocity_at(positions, velocities, times)/1000
positions = j2000_rotation.apply_at(positions, times)/1000
sun_position['positions'] = positions
sun_position['velocities'] = velocities
sun_position["reference_frame"] = destination_frame
meta_data['sun_position'] = sun_position
# check that there is a valid sensor model name
if 'name_model' not in meta_data:
raise Exception('No CSM sensor model name found!')
return meta_data | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/formatters/formatter.py | formatter.py |
import os
import re
import struct
from glob import glob
import numpy as np
from numpy.polynomial.polynomial import polyval, polyder
from dateutil import parser
import pvl
import spiceypy as spice
from ale.rotation import ConstantRotation, TimeDependentRotation
from ale.transformation import FrameChain
from scipy.interpolate import interp1d, BPoly
from ale.base.label_isis import IsisLabel
def read_table_data(table_label, cube):
"""
Helper function to read all of the binary table data
Parameters
----------
table_label : PVLModule
The ISIS table label
cube : file
The ISIS cube file
Returns
-------
bytes :
The binary portion of the table data
"""
cubehandle = open(cube, "rb")
cubehandle.seek(table_label['StartByte'] - 1)
return cubehandle.read(table_label['Bytes'])
def parse_table(table_label, data):
"""
Parse an ISIS table into a dictionary.
Parameters
----------
table_label : PVLModule
The ISIS table label
data : bytes
The binary component of the ISIS table
Returns
-------
dict :
The table as a dictionary with the keywords from the label and the
binary data
"""
data_sizes = {'Integer' : 4,
'Double' : 8,
'Real' : 4,
'Text' : 1}
data_formats = {'Integer' : 'i',
'Double' : 'd',
'Real' : 'f'}
# Parse the binary data
fields = table_label.getlist('Field')
results = {field['Name']:[] for field in fields}
offset = 0
for record in range(table_label['Records']):
for field in fields:
if field['Type'] == 'Text':
field_data = data[offset:offset+field['Size']].decode(encoding='latin_1')
else:
data_format = data_formats[field['Type']] * field['Size']
field_data = struct.unpack_from(data_format, data[offset:])
if len(field_data) == 1:
field_data = field_data[0]
results[field['Name']].append(field_data)
offset += data_sizes[field['Type']] * field['Size']
# Parse the keywords from the label
results.update({key : value for key, value in table_label.items() if not isinstance(value, pvl._collections.PVLGroup)})
return results
def rotate_state(table, rotation):
"""
Rotate the positions and velocities in an ISIS position Table.
If the table stores states as a function, then it will re compute them
based on the original size of the table.
Parameters
----------
table : dict
The position table as a dictionary
rotation : TimeDependentRotation
The rotation to rotate the positions by
Returns
-------
: 2darray
Array of rotated positions
: 2darray
Array of rotated velocities. Returns None if no velocities are in the table.
: array
Array of times for the states
"""
positions = None
velocities = None
ephemeris_times = None
# Case 1, the table has states at discrete times
if 'J2000X' in table:
ephemeris_times = table['ET']
positions = 1000 * np.array([table['J2000X'],
table['J2000Y'],
table['J2000Z']]).T
if 'J2000XV' in table:
ephemeris_times = table['ET']
velocities = 1000 * np.array([table['J2000XV'],
table['J2000YV'],
table['J2000ZV']]).T
# Case 2, the table has coefficients of polynomials for the states
elif 'J2000SVX' in table:
ephemeris_times = np.linspace(table['SpkTableStartTime'],
table['SpkTableEndTime'],
table['SpkTableOriginalSize'])
base_time = table['J2000SVX'][-1]
time_scale = table['J2000SVY'][-1]
scaled_times = (ephemeris_times - base_time) / time_scale
coeffs = np.array([table['J2000SVX'][:-1],
table['J2000SVY'][:-1],
table['J2000SVZ'][:-1]])
positions = 1000 * polyval(scaled_times, coeffs.T).T
scaled_vel = 1000 * polyval(scaled_times, polyder(coeffs,axis=1).T).T
# We took a derivative in scaled time, so we have to multiply by our
# scale in order to get the derivative in real time
velocities = scaled_vel / time_scale
else:
raise ValueError('No positions are available in the input table.')
rotated_pos = rotation.apply_at(positions, ephemeris_times)
if velocities is not None:
rotated_vel = rotation.rotate_velocity_at(positions, velocities, ephemeris_times)
else:
rotated_vel = None
return rotated_pos, rotated_vel, ephemeris_times
class IsisSpice():
"""Mixin class for reading from an ISIS cube that has been spiceinit'd
Attributes
----------
_label : PVLModule
Dict-like object with PVL keys
_inst_pointing_table : dict
Dictionary that contains information about the
rotation from J2000 to the sensor reference frame.
All of the values for each property, such as angular
velocity, are stored in a list or numpy array where
each entry is the property at a different time.
_body_orientation_table : dict
Dictionary that contains information about the
rotation from J2000 to the body fixed reference
frame. All of the values for each property, such
as angular velocity, are stored in a list or
numpy array where each entry is the property at a
different time.
_inst_position_table : dict
Dictionary that contains information about the
location of the sensor relative to the center of the
target body. All of the values for each property,
such as velocity, are stored in a list or numpy
array where each entry is the property at a
different time.
_sun_position_table : dict
Dictionary that contains information about the
location of the sun relative to the center of the
target body. All of the values for each property,
such as velocity, are stored in a list or numpy
array where each entry is the property at a
different time.
"""
@property
def inst_pointing_table(self):
"""
ISIS Table containing the rotation between the J2000 reference frame
and the instrument reference frame.
Returns
-------
: dict
Instrument pointing table
"""
if not hasattr(self, "_inst_pointing_table"):
for table in self.label.getlist('Table'):
if table['Name'] == 'InstrumentPointing':
binary_data = read_table_data(table, self._file)
self._inst_pointing_table = parse_table(table, binary_data)
return self._inst_pointing_table
raise ValueError(f'Could not find InstrumentPointing table on file {self._file}')
return self._inst_pointing_table
@property
def body_orientation_table(self):
"""
ISIS Table containing the rotation between the J2000 reference frame
and the target body reference frame.
Returns
-------
: dict
Body orientation table
"""
if not hasattr(self, "_body_orientation_table"):
for table in self.label.getlist('Table'):
if table['Name'] == 'BodyRotation':
binary_data = read_table_data(table, self._file)
self._body_orientation_table = parse_table(table, binary_data)
return self._body_orientation_table
raise ValueError(f'Could not find BodyRotation table on file {self._file}')
return self._body_orientation_table
@property
def inst_position_table(self):
"""
ISIS Table containing the location of the instrument relative to the
target body in the J2000 reference frame.
Returns
-------
: dict
Instrument position table
"""
if not hasattr(self, "_inst_position_table"):
for table in self.label.getlist('Table'):
if table['Name'] == 'InstrumentPosition':
binary_data = read_table_data(table, self._file)
self._inst_position_table = parse_table(table, binary_data)
return self._inst_position_table
raise ValueError(f'Could not find InstrumentPosition table on file {self._file}')
return self._inst_position_table
@property
def sun_position_table(self):
"""
ISIS Table containing the location of the sun relative to the
target body in the J2000 reference frame.
Returns
-------
: dict
Sun position table
"""
if not hasattr(self, "_sun_position_table"):
for table in self.label.getlist('Table'):
if table['Name'] == 'SunPosition':
binary_data = read_table_data(table, self._file)
self._sun_position_table = parse_table(table, binary_data)
return self._sun_position_table
raise ValueError(f'Could not find SunPosition table on file {self._file}')
return self._sun_position_table
def __enter__(self):
"""
Stub method to conform with how other driver mixins
are used.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Called when the context goes out of scope. This is
implemented to conform to the context manager paradigm
used by other data mix ins.
"""
pass
@property
def _sclock_hex_string(self):
"""
The hex encoded image start time computed from the
spacecraft clock count
Expects naif_keywords to be defined. This should be a dict containing
Naif keyworkds from the label.
Returns
-------
str :
The hex string representation of the image
start time as a double
"""
regex = re.compile('CLOCK_ET_.*_COMPUTED')
for key in self.naif_keywords:
if re.match(regex, key[0]):
# If the hex string is only numbers and contains leading 0s,
# the PVL library strips them off (ie. 0000000000002040 becomes
# 2040). Pad to 16 in case this happens.
return str(key[1]).zfill(16)
raise ValueError("No computed spacecraft clock time found in NaifKeywords.")
@property
def ephemeris_start_time(self):
"""
The image start time in ephemeris time
Expects sclock_hex_string to be defined. This should be a string
containing the hex start time of the image
Returns
-------
float :
The image start ephemeris time
"""
return struct.unpack('d', bytes.fromhex(self._sclock_hex_string))[0]
@property
def detector_center_sample(self):
"""
The center of the CCD in detector pixels
Expects ikid to be defined. this should be the integer Naif ID code for
the instrument.
Returns
-------
list :
The center of the CCD formatted as line, sample
"""
return self.naif_keywords.get('INS{}_BORESIGHT_SAMPLE'.format(self.ikid), None)
@property
def detector_center_line(self):
"""
The center of the CCD in detector pixels
Expects ikid to be defined. this should be the integer Naif ID code for
the instrument.
Returns
-------
list :
The center of the CCD formatted as line, sample
"""
return self.naif_keywords.get('INS{}_BORESIGHT_LINE'.format(self.ikid), None)
@property
def _cube_label(self):
"""
The ISIS cube label portion of the file label
Returns
-------
PVLModule :
The ISIS cube label
"""
if 'IsisCube' not in self.label:
raise ValueError("Could not find ISIS cube label.")
return self.label['IsisCube']
@property
def _kernels_group(self):
"""
The Kernels group from the ISIS cube label.
This is where the original SPICE kernels are listed.
Expects cube_label to be defined. This should be a PVLModule containing
the ISIS cube label.
Returns
-------
PVLModule :
The kernels group
"""
if 'Kernels' not in self._cube_label:
raise ValueError("Could not find Kernels group in ISIS cube label.")
return self._cube_label['Kernels']
@property
def ikid(self):
"""
The NAIF id for the instrument
Expects kernels_group to be defined. This should be a PVLModule
containing the kernels group.
Returns
-------
int :
The instrument id
"""
if 'NaifIkCode' not in self._kernels_group:
if 'NaifFrameCode' not in self._kernels_group:
raise ValueError("Could not find Instrument NAIF ID in Kernels group.")
return self._kernels_group['NaifFrameCode']
return self._kernels_group['NaifIkCode']
@property
def focal2pixel_lines(self):
"""
The line component of the affine transformation
from focal plane coordinates to centered ccd pixels
Expects naif_keywords to be defined. This should be a dict containing
Naif keyworkds from the label.
Expects ikid to be defined. This should be the integer Naif ID code
for the instrument.
Returns
-------
list :
The coefficients of the affine transformation
formatted as constant, x, y
"""
return self.naif_keywords.get('INS{}_ITRANSL'.format(self.ikid), None)
@property
def focal2pixel_samples(self):
"""
The sample component of the affine transformation
from focal plane coordinates to centered ccd pixels
Expects naif_keywords to be defined. This should be a dict containing
Naif keyworkds from the label.
Expects ikid to be defined. This should be the integer Naif ID code
for the instrument.
Returns
-------
list :
The coefficients of the affine transformation
formatted as constant, x, y
"""
return self.naif_keywords.get('INS{}_ITRANSS'.format(self.ikid), None)
@property
def pixel2focal_x(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
detector to focal plane x
"""
return self.naif_keywords.get('INS{}_TRANSX'.format(self.ikid), None)
@property
def pixel2focal_y(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
detector to focal plane y
"""
return self.naif_keywords.get('INS{}_TRANSY'.format(self.ikid), None)
@property
def focal_length(self):
"""
The focal length of the instrument
Expects naif_keywords to be defined. This should be a dict containing
Naif keyworkds from the label.
Expects ikid to be defined. This should be the integer Naif ID code
for the instrument.
Returns
-------
float :
The focal length in millimeters
"""
return self.naif_keywords.get('INS{}_FOCAL_LENGTH'.format(self.ikid), None)
@property
def target_body_radii(self):
"""
The triaxial radii of the target body
Expects naif_keywords to be defined. This should be a dict containing
Naif keyworkds from the label.
Returns
-------
list :
The body radii in kilometers. For most bodies,
this is formatted as semimajor, semimajor,
semiminor
"""
regex = re.compile(r'BODY-?\d*_RADII')
for key in self.naif_keywords:
if re.match(regex, key[0]):
return self.naif_keywords[key[0]]
@property
def frame_chain(self):
"""
Return the root node of the rotation frame tree/chain.
The root node is the J2000 reference frame. The other nodes in the
tree can be accessed via the methods in the FrameNode class.
Returns
-------
FrameNode
The root node of the frame tree. This will always be the J2000 reference frame.
"""
if not hasattr(self, '_frame_chain'):
self._frame_chain = FrameChain.from_isis_tables(
inst_pointing = self.inst_pointing_table,
body_orientation = self.body_orientation_table)
return self._frame_chain
@property
def sun_position(self):
"""
The sun position
Expects sun_position_table to be defined. This should be a
dictionary that contains information about the location of the sun
relative to the center of the target body.
Returns
-------
array :
The sun position vectors relative to the center
of the target body in the J2000 reference frame
as a tuple of numpy arrays.
"""
j2000_to_target = self.frame_chain.compute_rotation(1, self.target_frame_id)
positions, velocities, times = rotate_state(self.sun_position_table, j2000_to_target)
return positions, velocities, times
@property
def sensor_position(self):
"""
Sensor position
Expects inst_position_table to be defined. This should be a
dictionary that contains information about the location of the
sensor relative to the center of the target body.
Expects number_of_ephemerides to be defined. This should be an integer
containing the number of instrument position states.
Returns
-------
: (positions, velocities, times)
a tuple containing a list of positions, a list of velocities, and a list of times
"""
j2000_to_target = self.frame_chain.compute_rotation(1, self.target_frame_id)
positions, velocities, times = rotate_state(self.inst_position_table, j2000_to_target)
return positions, velocities, times
@property
def naif_keywords(self):
"""
The NaifKeywords group from the file label that
contains stored values from the original SPICE
kernels
Returns
-------
PVLModule :
The stored NAIF keyword values
"""
if 'NaifKeywords' not in self.label:
raise ValueError("Could not find NaifKeywords in label.")
return self.label['NaifKeywords']
@property
def odtk(self):
"""
Returns optical distortion coefficients
Expects ikid to be defined. This should be the integer Naif ID code
for the instrument
Returns
-------
: list
optical distortion coefficients
"""
return self.naif_keywords["INS{}_OD_K".format(self.ikid)]
@property
def sensor_frame_id(self):
if 'ConstantFrames' in self.inst_pointing_table:
return self.inst_pointing_table['ConstantFrames'][0]
else:
return self.inst_pointing_table['TimeDependentFrames'][0]
@property
def target_frame_id(self):
if 'ConstantFrames' in self.body_orientation_table:
return self.body_orientation_table['ConstantFrames'][0]
else:
return self.body_orientation_table['TimeDependentFrames'][0] | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/base/data_isis.py | data_isis.py |
import pvl
class Pds3Label():
"""
Mix-in for parsing PDS3 PVL labels.
"""
@property
def label(self):
if not hasattr(self, "_label"):
if isinstance(self._file, pvl.PVLModule):
self._label = self._file
else:
try:
self._label = pvl.loads(self._file, strict=False)
except Exception:
self._label = pvl.load(self._file)
except:
raise ValueError("{} is not a valid label".format(self._file))
return self._label
@property
def instrument_id(self):
"""
Returns the short name of the instrument
Returns
-------
: str
Short name of the instrument
"""
return self.label['INSTRUMENT_ID']
@property
def instrument_name(self):
"""
Returns the full name of the instrument
Returns
-------
: str
Full name of the instrument
"""
return self.label['INSTRUMENT_NAME']
@property
def sensor_name(self):
"""
Returns the name of the instrument
Returns
-------
: str
Name of the sensor
"""
return self.instrument_name
@property
def instrument_host_id(self):
"""
Returns the short name of the instrument host
Returns
-------
: str
Short name of the instrument host
"""
return self.label['INSTRUMENT_HOST_ID']
@property
def instrument_host_name(self):
"""
Returns the full name of the instrument host
Returns
-------
: str
Full name of the instrument host
"""
return self.label['INSTRUMENT_HOST_NAME']
@property
def platform_name(self):
"""
Returns the name of the platform which the instrument is mounted on
Returns
-------
: str
platform name
"""
return self.instrument_host_name
@property
def spacecraft_name(self):
"""
Returns the name of the spacecraft
Returns
-------
: str
Full name of the spacecraft
"""
return self.label['SPACECRAFT_NAME']
@property
def utc_start_time(self):
"""
Returns the start time of the image as a UTC string
Returns
-------
: str
Start time of the image in UTC YYYY-MM-DDThh:mm:ss[.fff]
"""
return self.label['START_TIME']
@property
def utc_stop_time(self):
"""
Returns the stop time of the image as a UTC string
Returns
-------
: str
Stop time of the image in UTC YYYY-MM-DDThh:mm:ss[.fff]
"""
return self.label['STOP_TIME']
@property
def image_lines(self):
"""
Returns
-------
: int
Number of lines in the image
"""
return self.label['IMAGE']['LINES']
@property
def image_samples(self):
"""
Returns
-------
: int
Number of samples in the image
"""
return self.label['IMAGE']['LINE_SAMPLES']
@property
def target_name(self):
"""
Returns a target name unquely identifying what an observation was capturing.
This is most often a body name (e.g., Mars, Moon, Europa). This value is often
use to acquire Ephermis data from SPICE files; therefore it should be the same
name spicelib expects in bodvrd calls.
Returns
-------
: str
Target name
"""
return self.label['TARGET_NAME']
@property
def sampling_factor(self):
"""
Returns the summing factor from the PDS3 label. For example a return value of 2
indicates that 2 lines and 2 samples (4 pixels) were summed and divided by 4
to produce the output pixel value.
Returns
-------
: int
Number of samples and lines combined from the original data to produce a single pixel in this image
"""
return self.label.get('SAMPLING_FACTOR', 1)
@property
def line_summing(self):
"""
Expects sampling_factor to be defined. This must be an integer
containing the number of samples and lines combined from the original data
Returns
-------
: int
Number of detector lines summed to produce each image line
"""
return self.sampling_factor
@property
def sample_summing(self):
"""
Expects sampling_factor to be defined. This must be an integer
containing the number of samples and lines combined from the original data
Returns
-------
: int
Number of detector lines summed to produce each image line
"""
return self.sampling_factor
@property
def downtrack_summing(self):
"""
Returns the number of detector pixels (normally in the line direction) that
have been averaged to produce the output pixel
Returns
-------
: int
Number of downtrack pixels summed together
"""
return self.label.get('DOWNTRACK_SUMMING', 1)
@property
def crosstrack_summing(self):
"""
Returns the number of detector pixels (normally in the sample direction) that
have been averaged to produce the output pixel
Returns
-------
: int
Number of crosstrack pixels summed together
"""
return self.label.get('CROSSTRACK_SUMMING', 1)
@property
def spacecraft_clock_start_count(self):
"""
Returns
-------
: str
Returns the start clock count string from the PDS3 label.
"""
return self.label['SPACECRAFT_CLOCK_START_COUNT']
@property
def spacecraft_clock_stop_count(self):
"""
Returns
-------
: str
Returns the stop clock count string from the PDS3 label.
"""
count = self.label['SPACECRAFT_CLOCK_STOP_COUNT']
if count == 'N/A':
count = None
return count
@property
def exposure_duration(self):
"""
Returns the exposure duration converted to seconds. If the exposure duration
is not present in the PDS3 label, then this property returns the
line exposure duration. Expects line_exposure_duration to be defined. This
should be a floating point number containing the line exposure duration.
Returns
-------
: float
Returns the exposure duration in seconds from the PDS3 label.
"""
# The EXPOSURE_DURATION may either be stored as a (value, unit) or just a value
if 'EXPOSURE_DURATION' in self.label:
try:
unit = self.label['EXPOSURE_DURATION'].units
unit = unit.lower()
if unit == "ms" or unit == "msec" or unit == "millisecond":
return self.label['EXPOSURE_DURATION'].value * 0.001
else:
return self.label['EXPOSURE_DURATION'].value
# With no units, assume milliseconds
except:
return self.label['EXPOSURE_DURATION'] * 0.001
else:
return self.line_exposure_duration
# Consider expanding this to handle units
@property
def line_exposure_duration(self):
"""
Line exposure duration returns the time between the exposures for
subsequent lines.
Returns
-------
: float
Returns the line exposure duration in seconds from the PDS3 label.
"""
return self.label['LINE_EXPOSURE_DURATION'].value * 0.001 # Scale to seconds
@property
def filter_number(self):
"""
Returns
-------
: int
Returns the filter number from the PDS3 label
"""
return self.label['FILTER_NUMBER'] | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/base/label_pds3.py | label_pds3.py |
import pvl
import json
class Driver():
"""
Base class for all Drivers.
Attributes
----------
_file : str
Reference to file path to be used by mixins for opening.
"""
def __init__(self, file, num_ephem=909, num_quats=909, props={}):
"""
Parameters
----------
file : str
path to file to be parsed
"""
if not props:
self._props = {}
elif isinstance(props, dict):
self._props = props
elif isinstance(props, str):
self._props = json.loads(props)
else:
raise Exception(f'Invalid props arg: {props}')
self._num_quaternions = num_quats
self._num_ephem = num_ephem
self._file = file
@property
def image_lines(self):
"""
Returns
-------
: int
Number of lines in image
"""
raise NotImplementedError
@property
def image_samples(self):
"""
Returns
-------
: int
Number of samples in image
"""
raise NotImplementedError
@property
def usgscsm_distortion_model(self):
"""
Returns
-------
: dict
A dict containing the information about the distortion model for the usgscsm
"""
raise NotImplementedError
@property
def detector_start_line(self):
"""
Returns
-------
: int
Zero based Detector line corresponding to the first image line
"""
return 0
@property
def detector_start_sample(self):
"""
Returns
-------
: int
Zero based Detector sample corresponding to the first image sample
"""
return 0
@property
def sample_summing(self):
"""
Returns
-------
: int
Sample summing
"""
raise NotImplementedError
@property
def line_summing(self):
"""
Returns
-------
: int
Line summing
"""
raise NotImplementedError
@property
def platform_name(self):
"""
Returns
-------
: str
Name of the platform that the sensor is on
"""
raise NotImplementedError
@property
def sensor_name(self):
"""
Returns
-------
: str
Name of the sensor
"""
raise NotImplementedError
@property
def target_body_radii(self):
"""
Returns
-------
: list
target body radii, first list element is semimajor axis, second is semiminor axis.
"""
raise NotImplementedError
@property
def focal_length(self):
"""
Returns
-------
: float
focal length
"""
raise NotImplementedError
@property
def detector_center_line(self):
"""
Returns
-------
: int
The detector line of the principle point
"""
raise NotImplementedError
@property
def detector_center_sample(self):
"""
Returns
-------
: int
The detector sample of the principle point
"""
raise NotImplementedError
@property
def sensor_position(self):
"""
Returns
-------
: (positions, velocities, times)
a tuple containing a list of positions, a list of velocities, and a list of times
"""
raise NotImplementedError
@property
def frame_chain(self):
"""
Returns
-------
FrameNode
The root node of the frame tree. This will always be the J2000 reference frame.
"""
raise NotImplementedError
@property
def sun_position(self):
"""
Returns
-------
: (sun_positions, sun_velocities)
a tuple containing a list of sun positions, a list of sun velocities
"""
@property
def target_name(self):
"""
Returns
-------
: int
NAIF ID associated with the target body
"""
raise NotImplementedError
@property
def target_frame_id(self):
"""
Returns
-------
: int
NAIF ID associated with the target body
"""
raise NotImplementedError
@property
def sensor_frame_id(self):
"""
Returns
-------
: int
NAIF ID associated with the sensor frame
"""
raise NotImplementedError
@property
def naif_keywords(self):
"""
Returns
-------
: dict
dictionary containing the keys : values needed by Isis for the NaifKeywords group
"""
raise NotImplementedError
@property
def sensor_model_version(self):
"""
Returns
-------
: int
version of the sensor model
"""
raise NotImplementedError
@property
def focal2pixel_lines(self):
"""
Returns
-------
: list
3 element list containing affine transformation coefficient.
The elements are as follows: constant, x coefficent, y coeffecient
"""
raise NotImplementedError
@property
def focal2pixel_samples(self):
"""
Returns
-------
: list
3 element list containing affine transformation coefficients.
The elements are as follows: constant, x coefficent, y coeffecient
"""
raise NotImplementedError
@property
def pixel2focal_x(self):
"""
Returns
-------
: list
3 element list containing coefficience for the pixels to focal plane
transformation. The elements are as follows: constant, sample, line
"""
raise NotImplementedError
@property
def pixel2focal_y(self):
"""
Returns
-------
: : list
3 element list containing coefficience for the pixels to focal plane
transformation. The elements are as follows: constant, sample, line
"""
raise NotImplementedError
@property
def ephemeris_start_time(self):
"""
Returns
-------
: double
The start time of the image in ephemeris seconds past the J2000 epoch.
"""
raise NotImplementedError
@property
def ephemeris_stop_time(self):
"""
Returns
-------
: double
The stop time of the image in ephemeris seconds past the J2000 epoch.
"""
raise NotImplementedError
@property
def center_ephemeris_time(self):
"""
Returns the average of the start and stop ephemeris times.
Returns
-------
: double
Center ephemeris time for an image
"""
return (self.ephemeris_start_time + self.ephemeris_stop_time) / 2
@property
def short_mission_name(self):
return self.__module__.split('.')[-1].split('_')[0] | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/base/base.py | base.py |
import numpy as np
class LineScanner():
"""
Mix-in for line scan sensors.
"""
@property
def name_model(self):
"""
Returns Key used to define the sensor type. Primarily
used for generating camera models.
Returns
-------
: str
USGS Frame model
"""
return "USGS_ASTRO_LINE_SCANNER_SENSOR_MODEL"
@property
def line_scan_rate(self):
"""
Expects ephemeris_start_time to be defined. This should be a float
containing the start time of the image.
Expects center_ephemeris_time to be defined. This should be a float
containing the average of the start and end ephemeris times.
Returns
-------
: list
Start lines
: list
Line times
: list
Exposure durations
"""
t0_ephemeris = self.ephemeris_start_time - self.center_ephemeris_time
return [0.5], [t0_ephemeris], [self.exposure_duration]
@property
def ephemeris_time(self):
"""
Returns an array of times between the start/stop ephemeris times
based on the number of lines in the image.
Expects ephemeris start/stop times to be defined. These should be
floating point numbers containing the start and stop times of the
images.
Expects image_lines to be defined. This should be an integer containing
the number of lines in the image.
Returns
-------
: ndarray
ephemeris times split based on image lines
"""
return np.linspace(self.ephemeris_start_time, self.ephemeris_stop_time, max(2, int(self.image_lines/64)))
@property
def ephemeris_stop_time(self):
"""
Returns the sum of the starting ephemeris time and the number of lines
times the exposure duration. Expects ephemeris start time, exposure duration
and image lines to be defined. These should be double precision numbers
containing the ephemeris start, exposure duration and number of lines of
the image.
Returns
-------
: double
Center ephemeris time for an image
"""
return self.ephemeris_start_time + (self.image_lines * self.exposure_duration)
class Framer():
"""
Mix-in for framing sensors.
"""
@property
def name_model(self):
"""
Returns Key used to define the sensor type. Primarily
used for generating camera models.
Returns
-------
: str
USGS Frame model
"""
return "USGS_ASTRO_FRAME_SENSOR_MODEL"
@property
def ephemeris_time(self):
"""
Returns the center ephemeris time for the image which is start time plus
half of the exposure duration.
Expects center_ephemeris_time to be defined. This should be a double
containing the average of the start and stop ephemeris times.
Returns
-------
: double
Center ephemeris time for the image
"""
return [self.center_ephemeris_time]
@property
def ephemeris_stop_time(self):
"""
Returns the sum of the starting ephemeris time and the exposure duration.
Expects ephemeris start time and exposure duration to be defined. These
should be double precision numbers containing the ephemeris start and
exposure duration of the image.
Returns
-------
: double
Ephemeris stop time for an image
"""
return self.ephemeris_start_time + self.exposure_duration
class Radar():
"""
Mix-in for synthetic aperture radar sensors.
"""
@property
def name_model(self):
"""
Returns Key used to define the sensor type. Primarily
used for generating camera models.
Returns
-------
: str
USGS SAR (synthetic aperture radar) model
"""
return "USGS_ASTRO_SAR_MODEL"
@property
def ephemeris_time(self):
"""
Returns an array of times between the start/stop ephemeris times
based on the start/stop times with a timestep 0.25.
Expects ephemeris start/stop times to be defined. These should be
floating point numbers containing the start and stop times of the
images.
Returns
-------
: ndarray
ephemeris times split based on image lines
"""
# 0.25 is the delta used by minirf, used as a default.
num_states = int((self.ephemeris_stop_time - self.ephemeris_start_time)/0.25) + 1
return np.linspace(self.ephemeris_start_time, self.ephemeris_stop_time, num_states)
@property
def wavelength(self):
"""
Returns the wavelength used for image acquistion.
Returns
-------
: double
Wavelength used to create an image in meters
"""
raise NotImplementedError
@property
def line_exposure_duration(self):
"""
Returns the exposure duration for each line.
Returns
-------
: double
Exposure duration for a line
"""
raise NotImplmentedError
@property
def scaled_pixel_width(self):
"""
Returns the scaled pixel width
Returns
-------
: double
Scaled pixel width
"""
raise NotImplementedError
@property
def range_conversion_coefficients(self):
"""
Returns the range conversion coefficients
Returns
-------
: list
Coefficients needed for range conversion
"""
raise NotImplementedError
@property
def range_conversion_times(self):
"""
Returns the times associated with the range conversion coefficients
Returns
-------
: list
Times for the range conversion coefficients
"""
raise NotImplementedError
@property
def look_direction(self):
"""
Direction of the look (left or right)
Returns
-------
: string
left or right
"""
raise NotImplementedError | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/base/type_sensor.py | type_sensor.py |
import spiceypy as spice
import numpy as np
import scipy.constants
import ale
from ale.base.type_sensor import Framer
from ale.transformation import FrameChain
from ale.rotation import TimeDependentRotation
from ale import util
class NaifSpice():
"""
Mix-in for reading data from NAIF SPICE Kernels.
"""
def __enter__(self):
"""
Called when the context is created. This is used
to get the kernels furnished.
"""
if self.kernels:
[spice.furnsh(k) for k in self.kernels]
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Called when the context goes out of scope. Once
this is done, the object is out of scope and the
kernels can be unloaded.
"""
if self.kernels:
[spice.unload(k) for k in self.kernels]
@property
def kernels(self):
"""
Get the NAIF SPICE Kernels to furnish
There are two ways to specify which kernels a driver will use:
1. Passing the 'kernels' property into load(s) or at instantiation.
This can be either a straight iterable or a dictionary that specifies
the kernels in ISIS style ('TargetPosition', 'InstrumentPosition', etc).
2. Set the ALESPICEROOT environment variable. This variable should be
the path to a directory that contains directories whose naming
convention matches the PDS Kernel Archives format,
`shortMissionName-versionInfo`. The directory corresponding to the
driver's mission will be searched for the approriate meta kernel to
load.
See Also
--------
ale.util.get_kernels_from_isis_pvl : Function used to parse ISIS style dict
ale.util.get_metakernels : Function that searches ALESPICEROOT for meta kernels
ale.util.generate_kernels_from_cube : Helper function to get an ISIS style dict
from an ISIS cube that has been through
spiceinit
"""
if not hasattr(self, '_kernels'):
if 'kernels' in self._props.keys():
try:
self._kernels = util.get_kernels_from_isis_pvl(self._props['kernels'])
except Exception as e:
self._kernels = self._props['kernels']
else:
if not ale.spice_root:
raise EnvironmentError(f'ale.spice_root is not set, cannot search for metakernels. ale.spice_root = "{ale.spice_root}"')
search_results = util.get_metakernels(ale.spice_root, missions=self.short_mission_name, years=self.utc_start_time.year, versions='latest')
if search_results['count'] == 0:
raise ValueError(f'Failed to find metakernels. mission: {self.short_mission_name}, year:{self.utc_start_time.year}, versions="latest" spice root = "{ale.spice_root}"')
self._kernels = [search_results['data'][0]['path']]
return self._kernels
@property
def light_time_correction(self):
"""
Returns the type of light time correciton and abberation correction to
use in NAIF calls. Expects ikid to be defined. This must be the integer
Naif id code of the instrument.
This searches for the value of the NAIF keyword INS<ikid>_LIGHTTIME_CORRECTION.
If the keyword is not defined, then this defaults to light time
correction and abberation correction (LT+S).
Returns
-------
: str
The light time and abberation correction string for use in NAIF calls.
See https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/abcorr.html
for the different options available.
"""
try:
return spice.gcpool('INS{}_LIGHTTIME_CORRECTION'.format(self.ikid), 0, 1)[0]
except:
return 'LT+S'
@property
def odtx(self):
"""
Returns the x coefficient for the optical distortion model
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list
Optical distortion x coefficients
"""
return spice.gdpool('INS{}_OD_T_X'.format(self.ikid),0, 10).tolist()
@property
def odty(self):
"""
Returns the y coefficient for the optical distortion model.
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list
Optical distortion y coefficients
"""
return spice.gdpool('INS{}_OD_T_Y'.format(self.ikid), 0, 10).tolist()
@property
def odtk(self):
"""
The coefficients for the radial distortion model
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list
Radial distortion coefficients
"""
return spice.gdpool('INS{}_OD_K'.format(self.ikid),0, 3).tolist()
@property
def ikid(self):
"""
Returns the Naif ID code for the instrument
Expects the instrument_id to be defined. This must be a string containing
the short name of the instrument.
Returns
-------
: int
Naif ID used to for indentifying the instrument in Spice kernels
"""
return spice.bods2c(self.instrument_id)
@property
def spacecraft_id(self):
"""
Returns the Naif ID code for the spacecraft
Expects the spacecraft_name to be defined. This must be a string containing
the name of the spacecraft.
Returns
-------
: int
Naif ID code for the spacecraft
"""
return spice.bods2c(self.spacecraft_name)
@property
def target_id(self):
"""
Returns the Naif ID code for the target body
Expects target_name to be defined. This must be a string containig the name
of the target body.
Returns
-------
: int
Naif ID code for the target body
"""
return spice.bods2c(self.target_name)
@property
def target_frame_id(self):
"""
Returns the Naif ID code for the target reference frame
Expects the target_id to be defined. This must be the integer Naif ID code
for the target body.
Returns
-------
: int
Naif ID code for the target frame
"""
frame_info = spice.cidfrm(self.target_id)
return frame_info[0]
@property
def sensor_frame_id(self):
"""
Returns the Naif ID code for the sensor reference frame
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: int
Naif ID code for the sensor frame
"""
return self.ikid
@property
def focal2pixel_lines(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
focal plane to detector lines
"""
return list(spice.gdpool('INS{}_ITRANSL'.format(self.ikid), 0, 3))
@property
def focal2pixel_samples(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
focal plane to detector samples
"""
return list(spice.gdpool('INS{}_ITRANSS'.format(self.ikid), 0, 3))
@property
def pixel2focal_x(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
detector to focal plane x
"""
return list(spice.gdpool('INS{}_TRANSX'.format(self.ikid), 0, 3))
@property
def pixel2focal_y(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
detector to focal plane y
"""
return list(spice.gdpool('INS{}_TRANSY'.format(self.ikid), 0, 3))
@property
def focal_length(self):
"""
Returns the focal length of the sensor
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: float
focal length
"""
return float(spice.gdpool('INS{}_FOCAL_LENGTH'.format(self.ikid), 0, 1)[0])
@property
def pixel_size(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: float pixel size
"""
return spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0] * 0.001
@property
def target_body_radii(self):
"""
Returns a list containing the radii of the target body
Expects target_name to be defined. This must be a string containing the name
of the target body
Returns
-------
: list<double>
Radius of all three axis of the target body
"""
rad = spice.bodvrd(self.target_name, 'RADII', 3)
return rad[1]
@property
def reference_frame(self):
"""
Returns a string containing the name of the target reference frame
Expects target_name to be defined. This must be a string containing the name
of the target body
Returns
-------
: str
String name of the target reference frame
"""
try:
return spice.cidfrm(spice.bodn2c(self.target_name))[1]
except:
return 'IAU_{}'.format(self.target_name)
@property
def sun_position(self):
"""
Returns a tuple with information detailing the sun position at the time
of the image. Expects center_ephemeris_time to be defined. This must be
a floating point number containing the average of the start and end ephemeris time.
Expects reference frame to be defined. This must be a sring containing the name of
the target reference frame. Expects target_name to be defined. This must be
a string containing the name of the target body.
Returns
-------
: (sun_positions, sun_velocities)
a tuple containing a list of sun positions, a list of sun velocities
"""
times = [self.center_ephemeris_time]
positions = []
velocities = []
for time in times:
sun_state, _ = spice.spkezr("SUN",
time,
self.reference_frame,
'LT+S',
self.target_name)
positions.append(sun_state[:3])
velocities.append(sun_state[3:6])
positions = 1000 * np.asarray(positions)
velocities = 1000 * np.asarray(velocities)
return positions, velocities, times
@property
def sensor_position(self):
"""
Returns a tuple with information detailing the position of the sensor at the time
of the image. Expects ephemeris_time to be defined. This must be a floating point number
containing the ephemeris time. Expects spacecraft_name to be defined. This must be a
string containing the name of the spacecraft containing the sensor. Expects
reference_frame to be defined. This must be a sring containing the name of
the target reference frame. Expects target_name to be defined. This must be
a string containing the name of the target body.
Returns
-------
: (positions, velocities, times)
a tuple containing a list of positions, a list of velocities, and a list of times
"""
if not hasattr(self, '_position'):
ephem = self.ephemeris_time
pos = []
vel = []
target = self.spacecraft_name
observer = self.target_name
# Check for ISIS flag to fix target and observer swapping
if self.swap_observer_target:
target = self.target_name
observer = self.spacecraft_name
for time in ephem:
# spkezr returns a vector from the observer's location to the aberration-corrected
# location of the target. For more information, see:
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/spicelib/spkezr.html
if self.correct_lt_to_surface and self.light_time_correction.upper() == 'LT+S':
obs_tar_state, obs_tar_lt = spice.spkezr(target,
time,
'J2000',
self.light_time_correction,
observer)
# ssb to spacecraft
ssb_obs_state, ssb_obs_lt = spice.spkezr(observer,
time,
'J2000',
'NONE',
'SSB')
radius_lt = (self.target_body_radii[2] + self.target_body_radii[0]) / 2 / (scipy.constants.c/1000.0)
adjusted_time = time - obs_tar_lt + radius_lt
ssb_tar_state, ssb_tar_lt = spice.spkezr(target,
adjusted_time,
'J2000',
'NONE',
'SSB')
state = ssb_tar_state - ssb_obs_state
matrix = spice.sxform("J2000", self.reference_frame, time)
state = spice.mxvg(matrix, state, 6, 6);
else:
state, _ = spice.spkezr(target,
time,
self.reference_frame,
self.light_time_correction,
observer)
if self.swap_observer_target:
pos.append(-state[:3])
vel.append(-state[3:])
else:
pos.append(state[:3])
vel.append(state[3:])
# By default, SPICE works in km, so convert to m
self._position = [p * 1000 for p in pos]
self._velocity = [v * 1000 for v in vel]
return self._position, self._velocity, self.ephemeris_time
@property
def frame_chain(self):
if not hasattr(self, '_frame_chain'):
nadir = self._props.get('nadir', False)
self._frame_chain = FrameChain.from_spice(sensor_frame=self.sensor_frame_id,
target_frame=self.target_frame_id,
center_ephemeris_time=self.center_ephemeris_time,
ephemeris_times=self.ephemeris_time,
nadir=nadir)
if nadir:
# Logic for nadir calculation was taken from ISIS3
# SpiceRotation::setEphemerisTimeNadir
rotation = self._frame_chain.compute_rotation(self.target_frame_id, 1)
p_vec, v_vec, times = self.sensor_position
rotated_positions = rotation.apply_at(p_vec, times)
rotated_velocities = rotation.rotate_velocity_at(p_vec, v_vec, times)
p_vec = rotated_positions
v_vec = rotated_velocities
velocity_axis = 2
# Get the default line translation with no potential flipping
# from the driver
trans_x = np.array(list(spice.gdpool('INS{}_ITRANSL'.format(self.ikid), 0, 3)))
if (trans_x[0] < trans_x[1]):
velocity_axis = 1
quats = [spice.m2q(spice.twovec(-p_vec[i], 3, v_vec[i], velocity_axis)) for i, time in enumerate(times)]
quats = np.array(quats)[:,[1,2,3,0]]
rotation = TimeDependentRotation(quats, times, 1, self.sensor_frame_id)
self._frame_chain.add_edge(rotation)
return self._frame_chain
@property
def sensor_orientation(self):
"""
Returns quaternions describing the sensor orientation. Expects ephemeris_time
to be defined. This must be a floating point number containing the
ephemeris time. Expects instrument_id to be defined. This must be a string
containing the short name of the instrument. Expects reference frame to be defined.
This must be a string containing the name of the target reference frame.
Returns
-------
: list
Quaternions describing the orientation of the sensor
"""
if not hasattr(self, '_orientation'):
self._orientation = self.frame_chain.compute_rotation(self.sensor_frame_id, self.target_frame_id).quats
return self._orientation.tolist()
@property
def ephemeris_start_time(self):
"""
Returns the starting ephemeris time of the image. Expects spacecraft_id to
be defined. This must be the integer Naif Id code for the spacecraft. Expects
spacecraft_clock_start_count to be defined. This must be a string
containing the start clock count of the spacecraft
Returns
-------
: double
Starting ephemeris time of the image
"""
return spice.scs2e(self.spacecraft_id, self.spacecraft_clock_start_count)
@property
def ephemeris_stop_time(self):
"""
Returns the ephemeris stop time of the image. Expects spacecraft_id to
be defined. This must be the integer Naif Id code for the spacecraft.
Expects spacecraft_clock_stop_count to be defined. This must be a string
containing the stop clock count of the spacecraft
Returns
-------
: double
Ephemeris stop time of the image
"""
return spice.scs2e(self.spacecraft_id, self.spacecraft_clock_stop_count)
@property
def detector_center_sample(self):
"""
Returns the center detector sample. Expects ikid to be defined. This should
be an integer containing the Naif Id code of the instrument.
Returns
-------
: float
Detector sample of the principal point
"""
return float(spice.gdpool('INS{}_BORESIGHT_SAMPLE'.format(self.ikid), 0, 1)[0])
@property
def detector_center_line(self):
"""
Returns the center detector line. Expects ikid to be defined. This should
be an integer containing the Naif Id code of the instrument.
Returns
-------
: float
Detector line of the principal point
"""
return float(spice.gdpool('INS{}_BORESIGHT_LINE'.format(self.ikid), 0, 1)[0])
@property
def swap_observer_target(self):
"""
Returns if the observer and target should be swapped when determining the
sensor state relative to the target. This is defined by a keyword in
ISIS IAKs. If the keyword is not defined in any loaded kernels then False
is returned.
Expects ikid to be defined. This should be an integer containing the
Naif Id code of the instrument.
"""
try:
swap = spice.gcpool('INS{}_SWAP_OBSERVER_TARGET'.format(self.ikid), 0, 1)[0]
return swap.upper() == "TRUE"
except:
return False
@property
def correct_lt_to_surface(self):
"""
Returns if light time correction should be made to the surface instead of
to the center of the body. This is defined by a keyword in ISIS IAKs.
If the keyword is not defined in any loaded kernels then False is returned.
Expects ikid to be defined. This should be an integer containing the
Naif Id code of the instrument.
"""
try:
surface_correct = spice.gcpool('INS{}_LT_SURFACE_CORRECT'.format(self.ikid), 0, 1)[0]
return surface_correct.upper() == "TRUE"
except:
return False
@property
def naif_keywords(self):
"""
Returns
-------
: dict
Dictionary of keywords and values that ISIS creates and attaches to the label
"""
if not hasattr(self, "_naif_keywords"):
self._naif_keywords = dict()
self._naif_keywords['BODY{}_RADII'.format(self.target_id)] = self.target_body_radii
self._naif_keywords['BODY_FRAME_CODE'] = self.target_frame_id
self._naif_keywords['BODY_CODE'] = self.target_id
self._naif_keywords = {**self._naif_keywords, **util.query_kernel_pool(f"*{self.ikid}*"), **util.query_kernel_pool(f"*{self.target_id}*")}
try:
self._naif_keywords = {**self._naif_keywords, **util.query_kernel_pool(f"*{self.fikid}*")}
except AttributeError as error:
pass
return self._naif_keywords | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/base/data_naif.py | data_naif.py |
import pvl
class IsisLabel():
"""
Mix-in for parsing ISIS Cube labels.
"""
@property
def label(self):
if not hasattr(self, "_label"):
if isinstance(self._file, pvl.PVLModule):
self._label = self._file
try:
self._label = pvl.loads(self._file)
except Exception:
self._label = pvl.load(self._file)
except:
raise ValueError("{} is not a valid label".format(self._file))
return self._label
@property
def instrument_id(self):
"""
Returns the short name of the instrument
Returns
-------
: str
instrument id
"""
return self.label['IsisCube']['Instrument']['InstrumentId']
@property
def platform_name(self):
"""
Returns the name of the platform containing the sensor. This is usually
the spacecraft name.
Returns
-------
: str
Name of the platform which the sensor is mounted on
"""
return self.label['IsisCube']['Instrument']['SpacecraftName']
@property
def spacecraft_name(self):
"""
Returns the name of the spacecraft
Returns
-------
: str
Full name of the spacecraft
"""
return self.platform_name
@property
def sensor_name(self):
"""
Returns the name of the instrument
Returns
-------
: str
Name of the sensor
"""
return self.label['IsisCube']['Instrument']['InstrumentName']
@property
def sensor_model_version(self):
"""
Returns the ISIS camera version
Returns
-------
: int
Camera version number
"""
return self.label["IsisCube"]["Kernels"]["CameraVersion"]
@property
def image_lines(self):
"""
Returns an integer containing the number of lines in the image
Returns
-------
: int
Number of lines in image
"""
return self.label['IsisCube']['Core']['Dimensions']['Lines']
@property
def image_samples(self):
"""
Returns an integer containing the number of samples in the image
Returns
-------
: int
Number of samples in image
"""
return self.label['IsisCube']['Core']['Dimensions']['Samples']
@property
def sampling_factor(self):
"""
Returns the summing factor from the PDS3 label. For example a return value of 2
indicates that 2 lines and 2 samples (4 pixels) were summed and divided by 4
to produce the output pixel value.
Returns
-------
: int
Number of samples and lines combined from the original data to produce a single pixel in this image
"""
try:
summing = self.label['IsisCube']['Instrument']['SummingMode']
except:
summing = 1
return summing
@property
def sample_summing(self):
"""
Returns the number of detector samples summed to produce each image sample
Returns
-------
: int
Sample summing
"""
return self.sampling_factor
@property
def line_summing(self):
"""
the number of detector lines summed to produce each image sample
Returns
-------
: int
Line summing
"""
return self.sampling_factor
@property
def target_name(self):
"""
Target body name used in various Spice calls to acquire
target specific ephemeris data.
Returns
-------
: str
Target name
"""
return self.label['IsisCube']['Instrument']['TargetName']
@property
def spacecraft_clock_start_count(self):
"""
The spacecraft clock start count, frequently used to determine the start time
of the image.
Returns
-------
: str
Spacecraft clock start count
"""
if 'SpacecraftClockStartCount' in self.label['IsisCube']['Instrument']:
return str(self.label['IsisCube']['Instrument']['SpacecraftClockStartCount'])
elif 'SpacecraftClockCount' in self.label['IsisCube']['Instrument']:
return str(self.label['IsisCube']['Instrument']['SpacecraftClockCount'])
elif 'SpacecraftClockStartCount' in self.label['IsisCube']['Archive']:
return str(self.label['IsisCube']['Archive']['SpacecraftClockStartCount'])
else:
return None
@property
def spacecraft_clock_stop_count(self):
"""
The spacecraft clock stop count, frequently used to determine the stop time
of the image.
Returns
-------
: str
Spacecraft clock stop count
"""
if 'SpacecraftClockStopCount' in self.label['IsisCube']['Instrument']:
return self.label['IsisCube']['Instrument']['SpacecraftClockStopCount']
elif 'SpacecraftClockStopCount' in self.label['IsisCube']['Archive']:
return self.label['IsisCube']['Archive']['SpacecraftClockStopCount']
else:
return None
@property
def utc_start_time(self):
"""
The UTC start time of the image.
This is generally less accurate than converting the spacecraft start
clock count using the spacecraft clock kernel (SCLK).
Returns
-------
: datetime
Start time of the image in UTC
"""
return self.label['IsisCube']['Instrument']['StartTime']
@property
def utc_stop_time(self):
"""
The UTC stop time of the image.
This is generally less accurate than converting the spacecraft stop
clock count using the spacecraft clock kernel (SCLK).
Returns
-------
: datetime
Stop time of the image in UTC
"""
return self.label['IsisCube']['Instrument']['StopTime']
@property
def exposure_duration(self):
"""
The exposure duration of the image, in seconds
Returns
-------
: float
Exposure duration in seconds
"""
if 'ExposureDuration' in self.label['IsisCube']['Instrument']:
exposure_duration = self.label['IsisCube']['Instrument']['ExposureDuration']
# Check for units on the PVL keyword
if isinstance(exposure_duration, pvl._collections.Units):
units = exposure_duration.units
if "ms" in units.lower() or 'milliseconds' in units.lower():
exposure_duration = exposure_duration.value * 0.001
else:
# if not milliseconds, the units are probably seconds
exposure_duration = exposure_duration.value
else:
# if no units are available, assume the exposure duration is given in milliseconds
exposure_duration = exposure_duration * 0.001
return exposure_duration
else:
return self.line_exposure_duration
@property
def line_exposure_duration(self):
"""
The line exposure duration of the image, in seconds
Returns
-------
: float
Line exposure duration in seconds
"""
line_exposure_duration = self.label['IsisCube']['Instrument']['LineExposureDuration']
if isinstance(line_exposure_duration, pvl._collections.Units):
units = line_exposure_duration.units
if "ms" in units.lower():
line_exposure_duration = line_exposure_duration.value * 0.001
else:
# if not milliseconds, the units are probably seconds
line_exposure_duration = line_exposure_duration.value
else:
# if no units are available, assume the exposure duration is given in milliseconds
line_exposure_duration = line_exposure_duration * 0.001
return line_exposure_duration | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/base/label_isis.py | label_isis.py |
import spiceypy as spice
from ale.base import Driver
from ale.base.data_naif import NaifSpice
from ale.base.data_isis import IsisSpice
from ale.base.label_pds3 import Pds3Label
from ale.base.label_isis import IsisLabel
from ale.base.type_distortion import RadialDistortion
from ale.base.type_sensor import LineScanner
class MroCtxIsisLabelIsisSpiceDriver(LineScanner, IsisLabel, IsisSpice, RadialDistortion, Driver):
@property
def instrument_id(self):
"""
Returns an instrument id for uniquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Returns
-------
: str
instrument id
"""
id_lookup = {
"CTX" : "MRO_CTX"
}
return id_lookup[super().instrument_id]
@property
def spacecraft_id(self):
"""
Returns
-------
: int
Naif ID code for the spacecraft
"""
return "-74"
@property
def sensor_name(self):
"""
ISIS doesn't propergate this to the ingested cube label, so hard-code it.
"""
return "CONTEXT CAMERA"
@property
def detector_center_sample(self):
"""
The center of the CCD in detector pixels
ISIS uses 0.5 based CCD samples, so we need to convert to 0 based.
Returns
-------
float :
The center sample of the CCD
"""
return super().detector_center_sample - 0.5
class MroCtxIsisLabelNaifSpiceDriver(LineScanner, IsisLabel, NaifSpice, RadialDistortion, Driver):
"""
Driver for reading CTX ISIS labels.
"""
@property
def instrument_id(self):
"""
Returns an instrument id for uniquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Expects instrument_id to be defined in the IsisLabel mixin. This should be
a string of the form 'CTX'
Returns
-------
: str
instrument id
"""
id_lookup = {
"CTX" : "MRO_CTX"
}
return id_lookup[super().instrument_id]
@property
def sensor_name(self):
"""
ISIS doesn't propergate this to the ingested cube label, so hard-code it.
"""
return "CONTEXT CAMERA"
@property
def ephemeris_start_time(self):
"""
Returns the ephemeris start time of the image.
Expects spacecraft_id to be defined. This should be the integer
Naif ID code for the spacecraft.
Returns
-------
: float
ephemeris start time of the image
"""
if not hasattr(self, '_ephemeris_start_time'):
sclock = self.label['IsisCube']['Instrument']['SpacecraftClockCount']
self._ephemeris_start_time = spice.scs2e(self.spacecraft_id, sclock)
return self._ephemeris_start_time
@property
def ephemeris_stop_time(self):
"""
ISIS doesn't preserve the spacecraft stop count that we can use to get
the ephemeris stop time of the image, so compute the epehemris stop time
from the start time and the exposure duration.
"""
return self.ephemeris_start_time + self.exposure_duration * self.image_lines
@property
def spacecraft_name(self):
"""
Returns the spacecraft name used in various Spice calls to acquire
ephemeris data.
Expects the platform_name to be defined. This should be a string of
the form 'Mars_Reconnaissance_Orbiter'
Returns
-------
: str
spacecraft name
"""
name_lookup = {
'Mars_Reconnaissance_Orbiter': 'MRO'
}
return name_lookup[super().platform_name]
@property
def detector_start_sample(self):
"""
Returns
-------
: int
The starting detector sample of the image
"""
return self.label['IsisCube']['Instrument']['SampleFirstPixel']
@property
def detector_center_sample(self):
"""
The center of the CCD in detector pixels
ISIS uses 0.5 based CCD samples, so we need to convert to 0 based.
Returns
-------
float :
The center sample of the CCD
"""
return super().detector_center_sample - 0.5
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1
class MroCtxPds3LabelNaifSpiceDriver(LineScanner, Pds3Label, NaifSpice, RadialDistortion, Driver):
"""
Driver for reading CTX PDS3 labels. Requires a Spice mixin to acquire addtional
ephemeris and instrument data located exclusively in spice kernels.
"""
@property
def instrument_id(self):
"""
Returns an instrument id for uniquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Expects instrument_id to be defined in the Pds3Label mixin. This should
be a string of the form 'CONTEXT CAMERA' or 'CTX'
Returns
-------
: str
instrument id
"""
id_lookup = {
'CONTEXT CAMERA':'MRO_CTX',
'CTX':'MRO_CTX'
}
return id_lookup[super().instrument_id]
@property
def spacecraft_name(self):
"""
Returns the spacecraft name used in various Spice calls to acquire
ephemeris data.
Expects spacecraft_name to be defined. This should be a string of the form
'MARS_RECONNAISSANCE_ORBITER'
Returns
-------
: str
spacecraft name
"""
name_lookup = {
'MARS_RECONNAISSANCE_ORBITER': 'MRO'
}
return name_lookup[super().spacecraft_name]
@property
def detector_start_sample(self):
"""
Returns
-------
: int
Starting detector sample for the image
"""
return self.label.get('SAMPLE_FIRST_PIXEL', 0)
@property
def detector_center_sample(self):
"""
The center of the CCD in detector pixels
ISIS uses 0.5 based CCD samples, so we need to convert to 0 based.
Returns
-------
float :
The center sample of the CCD
"""
return super().detector_center_sample - 0.5
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1
@property
def platform_name(self):
"""
Returns the name of the platform which the instrument is mounted on
Returns
-------
: str
platform name
"""
return self.label['SPACECRAFT_NAME'] | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/mro_drivers.py | mro_drivers.py |
import os
from glob import glob
import numpy as np
import pvl
import spiceypy as spice
from ale.base import Driver
from ale.base.data_naif import NaifSpice
from ale.base.label_pds3 import Pds3Label
from ale.base.type_distortion import RadialDistortion
from ale.base.type_sensor import Framer
from ale.rotation import ConstantRotation
from ale.transformation import FrameChain
from scipy.spatial.transform import Rotation
class CassiniIssPds3LabelNaifSpiceDriver(Framer, Pds3Label, NaifSpice, RadialDistortion, Driver):
"""
Cassini mixin class for defining Spice calls.
"""
id_lookup = {
"ISSNA" : "CASSINI_ISS_NAC",
"ISSWA" : "CASSINI_ISS_WAC"
}
nac_filter_to_focal_length = {
("P0","BL2"):2002.19,
("P0","CB1"):2002.30,
("P0","GRN"):2002.38,
("P0","IR1"):2002.35,
("P0","MT1"):2002.40,
("P0","UV3"):2002.71,
("P60","BL2"):2002.13,
("P60","CB1"):2002.18,
("P60","GRN"):2002.28,
("P60","IR1"):2002.36,
("P60","MT1"):2002.34,
("P60","UV3"):2002.51,
("RED","GRN"):2002.61,
("RED","IR1"):2002.48,
("UV1","CL2"):2003.03,
("UV2","CL2"):2002.91,
("UV2","UV3"):2002.90,
("RED","CL2"):2002.69,
("CL1","IR3"):2002.65,
("CL1","BL2"):2002.37,
("CL1","CB1"):2002.66,
("CL1","CB2"):2002.66,
("CL1","CB3"):2002.68,
("CL1","MT1"):2002.88,
("CL1","MT2"):2002.91,
("CL1","MT3"):2002.87,
("CL1","UV3"):2003.09,
("HAL","CL2"):2002.94,
("IR2","CL2"):2002.71,
("IR2","IR1"):2002.56,
("IR2","IR3"):2002.55,
("IR4","CL2"):2002.89,
("IR4","IR3"):2002.81,
("BL1","CL2"):2002.79,
("CL1","CL2"):2002.88,
("CL1","GRN"):2002.75,
("CL1","IR1"):2002.74,
("IRP0","CB2"):2002.48,
("IRP0","CB3"):2002.74,
("IRP0","IR1"):2002.60,
("IRP0","IR3"):2002.48,
("IRP0","MT2"):2002.72,
("IRP0","MT3"):2002.72,
("P120","BL2"):2002.11,
("P120","CB1"):002.28,
("P120","GRN"):2002.38,
("P120","IR1"):2002.39,
("P120","MT1"):2002.54,
("P120","UV3"):2002.71
}
wac_filter_to_focal_length = {
("B2","CL2"):200.85,
("B2","IRP90"):200.83,
("B2","IRP0"):200.82,
("B3","CL2"):201.22,
("B3","IRP90"):201.12,
("B3","IRP0"):201.11,
("L1","BL1"):200.86,
("L1","CL2"):200.77,
("L1","GRN"):200.71,
("L1","HAL"):200.74,
("L1","IR1"):200.80,
("L1","RED"):200.74,
("L1","VIO"):201.09,
("R2","CL2"):200.97,
("R2","IR"):200.95,
("R2","IRP90"):200.95,
("R3","CL2"):201.04,
("R3","IRP90"):201.03,
("R3","IRP0"):201.04,
("R4","CL2"):201.22,
("R4","IRP90"):201.16,
("R4","IRP0"):201.15,
("T2","CL2"):200.82,
("T2","IRP0"):200.81,
("T2","IRP90"):200.82,
("T3","CL2"):201.04,
("T3","IRP0"):201.06,
("T3","IRP90"):201.07
}
@property
def instrument_id(self):
"""
Returns an instrument id for unquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire instrument kernel (IK) NAIF IDs.
Therefore they use the same NAIF ID asin bods2c calls. Expects instrument_id to be
defined from a mixin class. This should return a string containing either 'ISSNA' or
'ISSWA'
Returns
-------
: str
instrument id
"""
return self.id_lookup[super().instrument_id]
@property
def focal_epsilon(self):
"""
Expects ikid to be defined. This should be an integer containing the Naif
ID code of the instrument
Returns
-------
: float
focal epsilon
"""
return float(spice.gdpool('INS{}_FL_UNCERTAINTY'.format(self.ikid), 0, 1)[0])
@property
def spacecraft_name(self):
"""
Spacecraft name used in various Spice calls to acquire
ephemeris data.
Returns
-------
: str
Name of the spacecraft
"""
return 'CASSINI'
@property
def focal2pixel_samples(self):
"""
Expects ikid to be defined. This should be an integer containing the Naif
ID code of the instrument
Returns
-------
: list<double>
focal plane to detector samples
"""
# Microns to mm
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0] * 0.001
return [0.0, 1/pixel_size, 0.0]
@property
def focal2pixel_lines(self):
"""
Expects ikid to be defined. This should be an integer containing the Naif
ID code of the instrument
Returns
-------
: list<double>
focal plane to detector lines
"""
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0] * 0.001
return [0.0, 0.0, 1/pixel_size]
@property
def odtk(self):
"""
The radial distortion coeffs are not defined in the ik kernels, instead
they are defined in the ISS Data User Guide (Knowles). Therefore, we
manually specify the codes here.
Expects instrument_id to be defined. This should be a string containing either
CASSINI_ISS_WAC or CASSINI_ISIS_NAC
Returns
-------
: list<float>
radial distortion coefficients
"""
if self.instrument_id == 'CASSINI_ISS_WAC':
# WAC
return [0, float('-6.2e-5'), 0]
elif self.instrument_id == 'CASSINI_ISS_NAC':
# NAC
return [0, float('-8e-6'), 0]
@property
# FOV_CENTER_PIXEL doesn't specify which coordinate is sample or line, but they are the same
# number, so the order doesn't matter
def detector_center_line(self):
"""
Dectector center based on ISIS's corrected values.
Returns
-------
: int
The detector line of the principle point
"""
return 512
@property
# FOV_CENTER_PIXEL doesn't specify which coordinate is sample or line, but they are the same
# number, so the order doesn't matter
def detector_center_sample(self):
"""
Dectector center based on ISIS's corrected values.
Returns
-------
: int
The detector sample of the principle point
"""
return 512
@property
def sensor_model_version(self):
"""
Returns instrument model version
Returns
-------
: int
ISIS sensor model version
"""
return 1
@property
def focal_length(self):
"""
NAC uses multiple filter pairs, each filter combination has a different focal length.
NAIF's Cassini kernels do not contain focal lengths for NAC filters and
so we aquired updated NAC filter data from ISIS's IAK kernel.
"""
# default focal defined by IK kernel
try:
default_focal_len = super(CassiniIssPds3LabelNaifSpiceDriver, self).focal_length
except:
default_focal_len = float(spice.gdpool('INS{}_FOV_CENTER_PIXEL'.format(self.ikid), 0, 2)[0])
filters = tuple(self.label['FILTER_NAME'])
if self.instrument_id == "CASSINI_ISS_NAC":
return self.nac_filter_to_focal_length.get(filters, default_focal_len)
elif self.instrument_id == "CASSINI_ISS_WAC":
return self.wac_filter_to_focal_length.get(filters, default_focal_len)
@property
def _original_naif_sensor_frame_id(self):
"""
Original sensor frame ID as defined in Cassini's IK kernel. This
is the frame ID you want to default to for WAC. For NAC, this Frame ID
sits between J2000 and an extra 180 rotation since NAC was mounted
upside down.
Returns
-------
: int
sensor frame code from NAIF's IK kernel
"""
return self.ikid
@property
def sensor_frame_id(self):
"""
Overwrite sensor frame id to return fake frame ID for NAC representing a
mounting point with a 180 degree rotation. ID was taken from ISIS's IAK
kernel for Cassini. This is because NAC requires an extra rotation not
in NAIF's Cassini kernels. Wac does not require an extra rotation so
we simply return original sensor frame id for Wac.
Returns
-------
: int
NAIF's Wac sensor frame ID, or ALE's Nac sensor frame ID
"""
if self.instrument_id == "CASSINI_ISS_NAC":
return 14082360
elif self.instrument_id == "CASSINI_ISS_WAC":
return 14082361
@property
def frame_chain(self):
"""
Construct the initial frame chain using the original sensor_frame_id
obtained from the ikid. Then tack on the ISIS iak rotation.
Returns
-------
: Object
Custom Cassini ALE Frame Chain object for rotation computation and application
"""
if not hasattr(self, '_frame_chain'):
try:
# Call frinfo to check if the ISIS iak has been loaded with the
# additional reference frame. Otherwise, Fail and add it manually
spice.frinfo(self.sensor_frame_id)
self._frame_chain = super().frame_chain
except spice.utils.exceptions.NotFoundError as e:
self._frame_chain = FrameChain.from_spice(sensor_frame=self._original_naif_sensor_frame_id,
target_frame=self.target_frame_id,
center_ephemeris_time=self.center_ephemeris_time,
ephemeris_times=self.ephemeris_time,)
rotation = ConstantRotation([[0, 0, 1, 0]], self.sensor_frame_id, self._original_naif_sensor_frame_id)
self._frame_chain.add_edge(rotation=rotation)
return self._frame_chain | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/co_drivers.py | co_drivers.py |
import pvl
import spiceypy as spice
import os
from glob import glob
import ale
from ale.base import Driver
from ale.base.data_naif import NaifSpice
from ale.base.label_pds3 import Pds3Label
from ale.base.type_sensor import Framer
ID_LOOKUP = {
"FC1" : "DAWN_FC1",
"FC2" : "DAWN_FC2"
}
class DawnFcPds3NaifSpiceDriver(Framer, Pds3Label, NaifSpice, Driver):
"""
Dawn driver for generating an ISD from a Dawn PDS3 image.
"""
@property
def instrument_id(self):
"""
Returns an instrument id for uniquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore the
the same ID that Spice expects in bods2c calls. Expects instrument_id to be
defined from the PDS3Label mixin. This should be a string containing the short
name of the instrument. Expects filter_number to be defined. This should be an
integer containing the filter number from the PDS3 Label.
Returns
-------
: str
instrument id
"""
instrument_id = super().instrument_id
filter_number = self.filter_number
return "{}_FILTER_{}".format(ID_LOOKUP[instrument_id], filter_number)
@property
def label(self):
"""
Loads a PVL from from the _file attribute and
parses the binary table data.
Returns
-------
PVLModule :
Dict-like object with PVL keys
"""
class PvlDecoder(pvl.decoder.PVLDecoder):
def unescape_next_char(self, stream):
esc = stream.read(1)
string = '\{}'.format(esc.decode('utf-8')).encode('utf-8')
return string
if not hasattr(self, "_label"):
if isinstance(self._file, pvl.PVLModule):
self._label = self._file
try:
self._label = pvl.loads(self._file, PvlDecoder)
except Exception:
# PvlDecoder class to ignore all escape sequences when getting
# the label
self._label = pvl.load(self._file, PvlDecoder)
except:
raise ValueError("{} is not a valid label".format(self._file))
return self._label
@property
def spacecraft_name(self):
"""
Spacecraft name used in various Spice calls to acquire
ephemeris data. Dawn does not have a SPACECRAFT_NAME keyword, therefore
we are overwriting this method using the instrument_host_id keyword instead.
Expects instrument_host_id to be defined. This should be a string containing
the name of the spacecraft that the instrument is mounted on.
Returns
-------
: str
Spacecraft name
"""
return self.instrument_host_id
@property
def target_name(self):
"""
Returns an target name for unquely identifying the instrument, but often
piped into Spice Kernels to acquire Ephermis data from Spice. Therefore they
the same ID the Spice expects in bodvrd calls. In this case, vesta images
have a number infront of them like "4 VESTA" which needs to be simplified
to "VESTA" for spice. Expects target_name to be defined in the Pds3Label mixin.
This should be a string containing the name of the target body.
Returns
-------
: str
target name
"""
target = super().target_name
target = target.split(' ')[-1]
return target
@property
def ephemeris_start_time(self):
"""
Compute the center ephemeris time for a Dawn Frame camera. This is done
via a spice call but 193 ms needs to be added to
account for the CCD being discharged or cleared.
"""
if not hasattr(self, '_ephemeris_start_time'):
sclock = self.spacecraft_clock_start_count
self._ephemeris_start_time = spice.scs2e(self.spacecraft_id, sclock)
self._ephemeris_start_time += 193.0 / 1000.0
return self._ephemeris_start_time
@property
def usgscsm_distortion_model(self):
"""
The Dawn framing camera uses a unique radial distortion model so we need
to overwrite the method packing the distortion model into the ISD.
Expects odtk to be defined. This should be a list containing the radial
distortion coefficients
Returns
-------
: dict
Dictionary containing the distortion model
"""
return {
"dawnfc": {
"coefficients" : self.odtk
}
}
@property
def odtk(self):
"""
The coefficients for the distortion model
Expects ikid to be defined. This should be an integer containing the
Naif ID code for the instrument.
Returns
-------
: list
Radial distortion coefficients
"""
return spice.gdpool('INS{}_RAD_DIST_COEFF'.format(self.ikid),0, 1).tolist()
# TODO: Update focal2pixel samples and lines to reflect the rectangular
# nature of dawn pixels
@property
def focal2pixel_samples(self):
"""
Expects ikid to be defined. This should be an integer containing the
Naif ID code for the instrument.
Returns
-------
: list<double>
focal plane to detector samples
"""
# Microns to mm
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0] * 0.001
return [0.0, 1/pixel_size, 0.0]
@property
def focal2pixel_lines(self):
"""
Expects ikid to be defined. This should be an integer containing the
Naif ID code for the instrument.
Returns
-------
: list<double>
focal plane to detector lines
"""
# Microns to mm
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0] * 0.001
return [0.0, 0.0, 1/pixel_size]
@property
def sensor_model_version(self):
"""
Returns instrument model version
Returns
-------
: int
ISIS sensor model version
"""
return 2
@property
def detector_center_sample(self):
"""
Returns center detector sample acquired from Spice Kernels.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
We have to add 0.5 to the CCD Center because the Dawn IK defines the
detector pixels as 0.0 being the center of the first pixel so they are
-0.5 based.
Returns
-------
: float
center detector sample
"""
return float(spice.gdpool('INS{}_CCD_CENTER'.format(self.ikid), 0, 2)[0]) + 0.5
@property
def detector_center_line(self):
"""
Returns center detector line acquired from Spice Kernels.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
We have to add 0.5 to the CCD Center because the Dawn IK defines the
detector pixels as 0.0 being the center of the first pixel so they are
-0.5 based.
Returns
-------
: float
center detector line
"""
return float(spice.gdpool('INS{}_CCD_CENTER'.format(self.ikid), 0, 2)[1]) + 0.5 | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/dawn_drivers.py | dawn_drivers.py |
from ale.base.data_isis import IsisSpice
from ale.base.label_isis import IsisLabel
from ale.base import Driver
from ale.base.type_sensor import LineScanner
from ale.base.type_distortion import NoDistortion
class IdealLsIsisLabelIsisSpiceDriver(LineScanner, IsisSpice, IsisLabel, NoDistortion, Driver):
@property
def sensor_name(self):
"""
Returns the name of the instrument
Returns
-------
: str
name of the instrument.
"""
return self.instrument_id
@property
def instrument_id(self):
instrument_id = super().instrument_id
if instrument_id != "IdealCamera":
raise Exception(f"Instrument ID is {instrument_id} when it should be \"IdealCamera\"")
return instrument_id
@property
def ephemeris_start_time(self):
"""
The image start time in ephemeris time.
Returns
-------
float :
The image start ephemeris time
"""
return self.label.get('IsisCube').get('Instrument').get("EphemerisTime").value
@property
def ephemeris_stop_time(self):
"""
Returns the sum of the starting ephemeris time and the number of lines
times the exposure duration. Expects ephemeris start time, exposure duration
and image lines to be defined. These should be double precision numbers
containing the ephemeris start, exposure duration, and number of lines of
the image.
Returns
-------
: double
Center ephemeris time for an image
"""
return super().ephemeris_stop_time
@property
def spacecraft_name(self):
"""
Returns the spacecraft name used in various Spice calls to acquire
ephemeris data.
Expects the platform_name to be defined. This should be a string of
the form 'Mars_Reconnaissance_Orbiter'
Returns
-------
: str
spacecraft name
"""
return super().platform_name
@property
def detector_start_line(self):
"""
Returns the starting detector line for the image.
Returns
-------
: int
Starting detector line for the image
"""
return 0
@property
def detector_start_sample(self):
"""
Returns the starting detector sample for the image.
Returns
-------
: int
Starting detector sample for the image
"""
return 0
@property
def sensor_model_version(self):
"""
Returns the ISIS sensor model version.
Returns
-------
: int
ISIS sensor model version
"""
return 1
@property
def pixel2focal_x(self):
"""
Returns detector to focal plane x.
Returns
-------
: list<double>
detector to focal plane x
"""
return self.naif_keywords.get('IDEAL_TRANSX')
@property
def pixel2focal_y(self):
"""
Returns detector to focal plane y.
Returns
-------
: list<double>
detector to focal plane y
"""
return self.naif_keywords.get('IDEAL_TRANSY')
@property
def focal2pixel_lines(self):
"""
Returns focal plane to detector lines.
Returns
-------
: list<double>
focal plane to detector lines
"""
return self.naif_keywords.get('IDEAL_TRANSL')
@property
def focal2pixel_samples(self):
"""
Returns focal plane to detector samples.
Returns
-------
: list<double>
focal plane to detector samples
"""
return self.naif_keywords.get('IDEAL_TRANSS')
@property
def focal_length(self):
"""
The focal length of the instrument
Expects naif_keywords to be defined. This should be a dict containing
Naif keyworkds from the label.
Returns
-------
float :
The focal length in millimeters
"""
return self.naif_keywords.get('IDEAL_FOCAL_LENGTH', None)
@property
def detector_center_sample(self):
"""
The center sample of the CCD in detector pixels
Returns
-------
float :
The center sample of the CCD
"""
return self.label['IsisCube']['Instrument']['SampleDetectors'] / 2.0
@property
def detector_center_line(self):
"""
The center line of the CCD in detector pixels
Returns
-------
float :
The center of line the CCD
"""
return 0.0 | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/isis_ideal_drivers.py | isis_ideal_drivers.py |
import os
from glob import glob
import numpy as np
import pvl
import struct
import spiceypy as spice
import warnings
import codecs
from ale.base import Driver
from ale.base.data_isis import read_table_data
from ale.base.data_isis import parse_table
from ale.base.data_naif import NaifSpice
from ale.base.label_pds3 import Pds3Label
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import LineScanner
from ale.base.type_sensor import Framer
from ale.base.type_distortion import RadialDistortion
FILTER_SPECIFIC_LOOKUP = {
# This table contains the filter specific information from the ISIS iak kernel. The format is as follows:
#
# fikid: [focal_length, ITRANSX, ITRANSY, ITRANSS, ITRANSL]
-41211: [174.80,
[-0.026155499841886, -0.006999999957684, 0.0000007696901985785],
[-59.9926971240526, 0.0000007696901985785, 0.006999999957684],
[-2.7941368739538, -142.857141993552, 0.015707963236297],
[8570.3856624766, 0.015707963236297, 142.857141993552]],
-41212: [174.61,
[-0.049776299960244, -0.006999999994409, -0.0000002797762790202],
[-49.7678019894611, -0.0000002797762790202, 0.006999999994409],
[-7.39506020202098, -142.85714274304, -0.005709719980004],
[7109.68570860705, -0.005709719980004, 142.85714274304]],
-41213: [174.74,
[-0.022301998711020, -0.006999999595423, -0.000002379930922169],
[-39.5483075824599, -0.000002379930922169, 0.006999999595423],
[-5.10686167529011, -142.857134600479, -0.048570018819771],
[5649.75681632013, -0.048570018819771, 142.857134600479]],
-41214: [175.01,
[0.066552498797889, -0.006999999873562, 0.000001330464480785],
[-10.2417126493911, 0.000001330464480785, 0.006999999873562],
[9.78558608311961, -142.85714027677, 0.027152336342545],
[1463.0999735726, 0.027152336342545, 142.85714027677]],
-41215: [175.01,
[0.033863199938965, -0.006999999987383, -0.0000004202752836277],
[-0.005497966876288, -0.0000004202752836277, 0.006999999987383],
[4.83755282624351, -142.857142599663, -0.008577046604648],
[0.785714284298145, -0.008577046604648, 142.857142599663]],
-41216: [175.23,
[0.044402379964146, -0.006999996841365, 0.000006649877982807],
[10.212957818494, 0.000006649877982807, 0.006999996841365],
[4.95717543186894, -142.857078395208, 0.135711795567498],
[-1458.99934165026, 0.135711795567498, 142.857078395208]],
-41217: [174.80,
[0.032494699283744, -0.006999999845704, 0.000001469741752306],
[39.5530931773118, 0.000001469741752306, 0.006999999845704],
[3.45571545912004, -142.857139708249, 0.029994729638890],
[-5650.44273259436, 0.029994729638890, 142.857139708249]],
-41218: [174.82,
[0.016461898406507, -0.006999999322408, 0.000003079982431615],
[49.7917927568053, 0.000003079982431615, 0.006999999322408],
[-0.778052433438109, -142.857129028729, 0.062856784318668],
[-7113.11359717265, 0.062856784318668, 142.857129028729]],
-41219: [174.87,
[0.024021897233075, -0.006999999193716, 0.000003359758681093],
[59.9868884703161, 0.000003359758681093, 0.006999999193716],
[-0.681392000547864, -142.857126402363, 0.068566503695773],
[-8569.5561557859, 0.068566503695773, 142.857126402363]],
}
class MexHrscPds3NaifSpiceDriver(LineScanner, Pds3Label, NaifSpice, RadialDistortion, Driver):
"""
Driver for a PDS3 Mars Express (Mex) High Resolution Stereo Camera (HRSC) images.
NOTES
-----
* HRSC has 9 different filters. Each has it's own instrument id, as well as
the main/"HEAD" camera composing those filters. There is also another
"SRC" instrument, making-up a total of 11 distinct sensors. It is very
important to understand which code is needed when/where.
* HRSC is a variable line scanner, and so does not maintain one exposure
duration, but rather differing exposure durations per line. This
information is stored within the individual records in the image data
itself, with the the first 8 bytes making up the double presicion
ephemeris time that the line exposure was started, and the next 4 bytes
making up the float containing that line's exposure duration.
"""
@property
def odtk(self):
"""
The coefficients for the distortion model
Returns
-------
: list
Radial distortion coefficients. There is only one coefficient for LROC NAC l/r
"""
return [0.0, 0.0, 0.0]
@property
def ikid(self):
"""
Returns the Naif ID code for the HRSC head instrument
This would be the Naif ID code for the base (or "head") instrument.
Returns
-------
: int
Naif ID used to for indentifying the instrument in Spice kernels
"""
return spice.bods2c("MEX_HRSC_HEAD")
@property
def fikid(self):
"""
Naif ID code of the filter dependent instrument codes.
Expects filter_number to be defined. This should be an integer containing
the filter number from the pds3 label.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
Returns
-------
: int
Naif ID code used in calculating focal length
"""
return spice.bods2c(self.instrument_id)
# TODO Since HRSC has different frames based on filters, need to check that
# this is returning the value needed for all calculations from the base
# class and therefor does not need to be reimplemented.
# @property
# def sensor_frame_id(self):
# """
# Returns the Naif ID code for the sensor reference frame
#
#
# This is the frame of the HRSC instrument itself, and is not dependant on filter.
#
# Returns
# -------
# : int
# Naif ID code for the sensor frame
# """
# return -41210
@property
def instrument_id(self):
"""
Returns the short name of the instrument
MEX HRSC has nine different filters each with their own name.
Returns
-------
: str
Short name of the instrument
"""
if(super().instrument_id != "HRSC"):
raise Exception ("Instrument ID is wrong.")
return self.label['DETECTOR_ID']
@property
def spacecraft_name(self):
"""
Spacecraft name used in various SPICE calls to acquire
ephemeris data. MEX HRSC img PDS3 labels do not the have SPACECRAFT_NAME
keyword, so we override it here to use the label_pds3 property for
instrument_host_id
Returns
-------
: str
Spacecraft name
"""
return self.instrument_host_id
@property
def focal_length(self):
"""
Returns the focal length of the filter-specific sensor
Expects fikid to be defined. This must be the integer Naif id code of
the filter-specific instrument.
NOTE: These values are pulled from ISIS iak kernels.
Returns
-------
: float
focal length
"""
return FILTER_SPECIFIC_LOOKUP[self.fikid][0]
@property
def focal2pixel_lines(self):
"""
Expects fikid to be defined. This must be the integer Naif id code of
the filter-sepcific instrument.
NOTE: These values are pulled from ISIS iak kernels.
Returns
-------
: list<double>
focal plane to detector lines
"""
return FILTER_SPECIFIC_LOOKUP[self.fikid][4]
@property
def focal2pixel_samples(self):
"""
Expects fikid to be defined. This must be the integer Naif id code of
the filter-sepcific instrument.
NOTE: These values are pulled from ISIS iak kernels.
Returns
-------
: list<double>
focal plane to detector samples
"""
return FILTER_SPECIFIC_LOOKUP[self.fikid][3]
@property
def pixel2focal_x(self):
"""
Expects fikid to be defined. This must be the integer Naif id code of
the filter-specific instrument.
NOTE: These values are pulled from ISIS iak kernels.
Returns
-------
: list<double>
detector to focal plane x
"""
return FILTER_SPECIFIC_LOOKUP[self.fikid][1]
@property
def pixel2focal_y(self):
"""
Expects fikid to be defined. This must be the integer Naif id code of
the filter-specific instrument.
NOTE: These values are pulled from ISIS iak kernels.
Returns
-------
: list<double>
detector to focal plane y
"""
return FILTER_SPECIFIC_LOOKUP[self.fikid][2]
@property
def detector_center_line(self):
"""
Returns the center detector line.
For HRSC, we are dealing with a single line, so center line will be 0.
Returns
-------
: float
Detector line of the principal point
"""
return 0.0
@property
def detector_center_sample(self):
"""
Returns the center detector sample.
For HRSC, center sample is consistent regardless of filter. This is
different from ISIS's center sample because ISIS line scan sensors use
0.5 based detector samples.
Returns
-------
: float
Detector sample of the principal point
"""
return 2592.0
@property
def line_scan_rate(self):
"""
Returns a 2D array of line scan rates.
For HRSC, the ephemeris times and exposure durations are
stored in the image data.
In the image, every line has an entry. This method goes through
and removes conescutive lines with the same exposure duration.
There are also potentially missing lines in the image which this
method accounts for.
Returns
-------
: list
Line scan rates
"""
relative_times = [time - self.center_ephemeris_time for time in self.binary_ephemeris_times]
start_lines = [self.binary_lines[0]]
start_times = [relative_times[0]]
exposure_durations = [self.binary_exposure_durations[0]]
for line, start_time, exposure_duration in zip(self.binary_lines, relative_times, self.binary_exposure_durations):
# Check for lines missing from the PDS image
#
# If more exposures fit into the time since the last entry than
# there are lines since the last entry, then there are missing lines.
#
# If line are missing, add an extra entry for the line immediately
# following them.
skipped_lines = int( (start_time - start_times[-1]) / exposure_durations[-1] - (line - start_lines[-1]) + 0.5 ) # add 0.5 to round up
if exposure_duration != exposure_durations[-1] or skipped_lines > 0:
start_lines.append(line)
start_times.append(start_time)
exposure_durations.append(exposure_duration)
return (start_lines, start_times, exposure_durations)
@property
def binary_exposure_durations(self):
"""
Returns the exposure durations taken from the binary image data.
For HRSC, the exposure durations are imbedded in the binary data of the image.
The expsoure durations start at the 9th byte of the line/record and are 4 bytes long.
Returns
-------
: list
Exposure durations
"""
if not hasattr(self, '_binary_exposure_durations'):
self.read_image_data()
return self._binary_exposure_durations
@property
def binary_ephemeris_times(self):
"""
Returns the ephemeris times taken from the binary image data.
For HRSC, the ephemeris times are imbedded in the binary data of the image.
The ephemeris times start at the first byte of the line/records and are 8 bytes long.
Returns
-------
: list
Ephemeris times
"""
if not hasattr(self, '_binary_ephemeris_times'):
self.read_image_data()
return self._binary_ephemeris_times
@property
def binary_lines(self):
"""
Returns the lines of the binary image data.
For example, the first entry would be the first line of the image.
Returns
-------
: list
Image lines
"""
if not hasattr(self, '_binary_lines'):
self.read_image_data()
return self._binary_lines
def read_image_data(self):
"""
Reads data off of image and stores in binary_exposure_durations, binary_lines,
and binary_ephemeris_times.
For HRSC, the exposure durations and ephemeris times are imbedded in the binary
data of the image itself. Each line is stored in what is referred to as a
"record" within the image. The label will have the size of each record,
the number of records, and the number of records in the label, so the
beginning of binary data can be calculated.
For each line/record of the binary data, the first 8 bytes make up the
double presicion value of the ephemeris time, with the next 4 bytes
making up the float value of the line exposure duration for the
associated line. NOTE: The prefix data is always LSB, regardless
of the overall file format.
"""
lines = []
times = []
durations = []
bytes_per_record = self.label['RECORD_BYTES']
num_records = self.label['FILE_RECORDS']
img_start_record = self.label['^IMAGE']
img_start_byte = bytes_per_record * (img_start_record - 1) # Offset by one for zero-based records
num_img_records = num_records - img_start_record
if os.path.isfile(self._file):
with open(self._file, 'rb') as image_file:
image_file.seek(img_start_byte)
data_blob = b''
for record in range(num_img_records):
initial_read = image_file.read(12)
image_file.read(bytes_per_record - 12)
data_blob += initial_read
else:
data_blob = codecs.decode(self._props['preprocess_data'].encode('utf-8'), 'base64')
for record in range(num_img_records):
read_record = record * 12
eph_time = struct.unpack('<d', data_blob[read_record:read_record+8])[0]
exp_dur = struct.unpack('<f', data_blob[read_record+8:read_record+12])[0] / 1000
# Offset for zero-based corrections, and then offest for ISIS pixel definition
lines.append(record+1-0.5)
times.append(eph_time)
durations.append(exp_dur)
self._binary_exposure_durations = durations
self._binary_lines = lines
self._binary_ephemeris_times = times
@property
def ephemeris_stop_time(self):
"""
Returns the ephemeris stop time.
For HRSC, the ephemeris stop time is calculated from the binary image data.
Returns
-------
: float
Ephemeris stop time
"""
return self.binary_ephemeris_times[-1] + self.binary_exposure_durations[-1]
# TODO We need to confirm that returning nothing here does not affect
# calculations elsewhere in code. Or is there possibly just a better way of
# doing this?
@property
def line_exposure_duration(self):
"""
Line exposure duration returns the time between the exposures for
subsequent lines.
Since HRSC is a variable line scan camera, it does not make sense to
have one exposure duration value.
Returns
-------
: float
Returns the line exposure duration in seconds from the PDS3 label.
"""
return
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1
class MexHrscIsisLabelNaifSpiceDriver(LineScanner, IsisLabel, NaifSpice, RadialDistortion, Driver):
@property
def instrument_id(self):
"""
Returns the name of the instrument
Returns
-------
: str
Name of the instrument
"""
if(super().instrument_id != "HRSC"):
raise Exception ("Instrument ID is wrong.")
return self.label['IsisCube']['Archive']['DetectorId']
@property
def sensor_name(self):
"""
Returns the name of the instrument. Need to over-ride isis_label because
InstrumentName is not defined in the ISIS label for MEX HSRC cubes.
Returns
-------
: str
Name of the sensor
"""
return self.instrument_id
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1
@property
def times_table(self):
"""
Returns EphermisTime, ExposureTime, and LinesStart informtation which was stored as
binary information in the ISIS cube.
Returns
-------
: dict
Dictionary with EphemerisTime, ExposureTime, and LineStart.
"""
isis_bytes = read_table_data(self.label['Table'], self._file)
return parse_table(self.label['Table'], isis_bytes)
@property
def line_scan_rate(self):
"""
Returns
-------
: tuple
list of lines, list of ephemeris times, and list of exposure
times
"""
return self.times_table['LineStart'], self.times_table['EphemerisTime'], self.times_table['ExposureTime']
@property
def ephemeris_start_time(self):
"""
Returns
-------
: float
starting ephemeris time
"""
return self.times_table['EphemerisTime'][0]
@property
def ephemeris_stop_time(self):
"""
Returns
-------
: float
ephemeris stop time
"""
last_line = self.times_table['LineStart'][-1]
return self.times_table['EphemerisTime'][-1] + ((self.image_lines - last_line + 1) * self.times_table['ExposureTime'][-1])
@property
def ikid(self):
"""
Returns the Naif ID code for the HRSC head instrument
This would be the Naif ID code for the base (or "head") instrument.
Returns
-------
: int
Naif ID used to for indentifying the instrument in Spice kernels
"""
return spice.bods2c("MEX_HRSC_HEAD")
@property
def fikid(self):
"""
Naif ID code of the filter dependent instrument codes.
Expects filter_number to be defined. This should be an integer containing
the filter number from the pds3 label.
Expects ikid to be defined. This should be the integer Naif ID code for
the instrument.
Returns
-------
: int
Naif ID code used in calculating focal length
"""
return spice.bods2c(self.instrument_id)
class MexSrcPds3NaifSpiceDriver(Framer, Pds3Label, NaifSpice, RadialDistortion, Driver):
"""
Driver for a PDS3 Mars Express (Mex) High Resolution Stereo Camera (HRSC) - Super Resolution
Channel (SRC) image.
"""
@property
def odtk(self):
"""
The coefficients for the distortion model. No distortion model, so pass in all zeroes.
Returns
-------
: list
Radial distortion coefficients.
"""
return [0.0, 0.0, 0.0]
@property
def ikid(self):
"""
Returns the Naif ID code for HRSC SRC.
Returns
-------
: int
Naif ID used to for indentifying the instrument in Spice kernels
"""
return spice.bods2c("MEX_HRSC_SRC")
@property
def instrument_id(self):
"""
Returns the short name of the instrument
MEX HRSC has nine different filters each with their own name.
Returns
-------
: str
Short name of the instrument
"""
if(super().instrument_id != "HRSC"):
raise Exception ("Instrument ID is wrong.")
return self.label['DETECTOR_ID']
@property
def spacecraft_name(self):
"""
Spacecraft name used in various SPICE calls to acquire
ephemeris data. MEX HRSC img PDS3 labels do not the have SPACECRAFT_NAME
keyword, so we override it here to use the label_pds3 property for
instrument_host_id
Returns
-------
: str
Spacecraft name
"""
return self.instrument_host_id
@property
def focal2pixel_lines(self):
"""
NOTE: These values are pulled from ISIS iak kernels.
Returns
-------
: list<double>
focal plane to detector lines
"""
return [0.0, 0.0, 111.111111111111]
@property
def focal2pixel_samples(self):
"""
NOTE: These values are pulled from ISIS iak kernels.
Returns
-------
: list<double>
focal plane to detector samples
"""
return [0.0, 111.111111111111, 0.0]
@property
def detector_center_line(self):
"""
Returns the center detector line.
Returns
-------
: float
Detector line of the principal point
"""
return 512.0
@property
def detector_center_sample(self):
"""
Returns the center detector sample.
This is
different from ISIS's center sample because ISIS uses
0.5-based samples.
Returns
-------
: float
Detector sample of the principal point
"""
return 512.0
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1 | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/mex_drivers.py | mex_drivers.py |
import os
import numpy as np
import pvl
import spiceypy as spice
from glob import glob
from ale.util import get_metakernels
from ale.base import Driver
from ale.base.data_naif import NaifSpice
from ale.base.label_pds3 import Pds3Label
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import LineScanner, Radar
class LroLrocPds3LabelNaifSpiceDriver(LineScanner, NaifSpice, Pds3Label, Driver):
"""
Driver for reading LROC NACL, NACR (not WAC, it is a push frame) labels. Requires a Spice mixin to
acquire addtional ephemeris and instrument data located exclusively in SPICE kernels, A PDS3 label,
and the LineScanner and Driver bases.
"""
@property
def instrument_id(self):
"""
The short text name for the instrument
Returns an instrument id uniquely identifying the instrument. Used to acquire
instrument codes from Spice Lib bods2c routine.
Returns
-------
str
The short text name for the instrument
"""
instrument = super().instrument_id
frame_id = self.label.get("FRAME_ID")
if instrument == "LROC" and frame_id == "LEFT":
return "LRO_LROCNACL"
elif instrument == "LROC" and frame_id == "RIGHT":
return "LRO_LROCNACR"
@property
def spacecraft_name(self):
"""
Spacecraft name used in various SPICE calls to acquire
ephemeris data. LROC NAC img PDS3 labels do not the have SPACECRAFT_NAME keyword, so we
override it here to use the label_pds3 property for instrument_host_id
Returns
-------
: str
Spacecraft name
"""
return self.instrument_host_id
@property
def sensor_model_version(self):
"""
Returns ISIS instrument sensor model version number
Returns
-------
: int
ISIS sensor model version
"""
return 2
@property
def usgscsm_distortion_model(self):
"""
The distortion model name with its coefficients
LRO LROC NAC does not use the default distortion model so we need to overwrite the
method packing the distortion model into the ISD.
Returns
-------
: dict
Returns a dict with the model name : dict of the coefficients
"""
return {"lrolrocnac":
{"coefficients": self.odtk}}
@property
def odtk(self):
"""
The coefficients for the distortion model
Returns
-------
: list
Radial distortion coefficients. There is only one coefficient for LROC NAC l/r
"""
return spice.gdpool('INS{}_OD_K'.format(self.ikid), 0, 1).tolist()
@property
def light_time_correction(self):
"""
Returns the type of light time correciton and abberation correction to
use in NAIF calls.
LROC is specifically set to not use light time correction because it is
so close to the surface of the moon that light time correction to the
center of the body is incorrect.
Returns
-------
: str
The light time and abberation correction string for use in NAIF calls.
See https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/abcorr.html
for the different options available.
"""
return 'NONE'
@property
def detector_center_sample(self):
"""
The center of the CCD in detector pixels
ISIS uses 0.5 based CCD samples, so we need to convert to 0 based.
Returns
-------
float :
The center sample of the CCD
"""
return super().detector_center_sample - 0.5
@property
def focal2pixel_lines(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of
the instrument. For LROC NAC this is flipped depending on the spacecraft
direction.
Returns
-------
: list<double>
focal plane to detector lines
"""
focal2pixel_lines = np.array(list(spice.gdpool('INS{}_ITRANSL'.format(self.ikid), 0, 3))) / self.sampling_factor
if self.spacecraft_direction < 0:
return -focal2pixel_lines
else:
return focal2pixel_lines
@property
def ephemeris_start_time(self):
"""
The starting ephemeris time for LRO is computed by taking the
LRO:SPACECRAFT_CLOCK_PREROLL_COUNT, as defined in the label, and
adding offsets that were taken from an IAK.
Returns
-------
: double
Starting ephemeris time of the image
"""
start_time = spice.scs2e(self.spacecraft_id, self.label['LRO:SPACECRAFT_CLOCK_PREROLL_COUNT'])
return start_time + self.constant_time_offset + self.additional_preroll * self.exposure_duration
@property
def exposure_duration(self):
"""
Takes the exposure_duration defined in a parent class and adds
offsets taken from an IAK.
Returns
-------
: float
Returns the exposure duration in seconds.
"""
return super().exposure_duration * (1 + self.multiplicative_line_error) + self.additive_line_error
@property
def multiplicative_line_error(self):
"""
Returns the multiplicative line error defined in an IAK.
Returns
-------
: float
Returns the multiplicative line error.
"""
return 0.0045
@property
def additive_line_error(self):
"""
Returns the additive line error defined in an IAK.
Returns
-------
: float
Returns the additive line error.
"""
return 0.0
@property
def constant_time_offset(self):
"""
Returns the constant time offset defined in an IAK.
Returns
-------
: float
Returns the constant time offset.
"""
return 0.0
@property
def additional_preroll(self):
"""
Returns the addition preroll defined in an IAK.
Returns
-------
: float
Returns the additionl preroll.
"""
return 1024.0
@property
def mission_name(self):
return self.label['MISSION_NAME']
@property
def sampling_factor(self):
"""
Returns the summing factor from the PDS3 label that is defined by the CROSSTRACK_SUMMING.
For example a return value of 2 indicates that 2 lines and 2 samples (4 pixels)
were summed and divided by 4 to produce the output pixel value.
Returns
-------
: int
Number of samples and lines combined from the original data to produce a single pixel in this image
"""
return self.crosstrack_summing
@property
def spacecraft_direction(self):
"""
Returns the x axis of the first velocity vector relative to the
spacecraft. This indicates of the craft is moving forwards or backwards.
From LROC Frame Kernel: lro_frames_2014049_v01.tf
"+X axis is in the direction of the velocity vector half the year. The
other half of the year, the +X axis is opposite the velocity vector"
Hence we rotate the first velocity vector into the sensor reference
frame, but the X component of that vector is inverted compared to the
spacecraft so a +X indicates backwards and -X indicates forwards
The returned velocity is also slightly off from the spacecraft velocity
due to the sensor being attached to the craft with wax.
Returns
-------
direction : double
X value of the first velocity relative to the sensor
"""
frame_chain = self.frame_chain
lro_bus_id = spice.bods2c('LRO_SC_BUS')
time = self.ephemeris_start_time
state, _ = spice.spkezr(self.spacecraft_name, time, 'J2000', 'None', self.target_name)
position = state[:3]
velocity = state[3:]
rotation = frame_chain.compute_rotation(1, lro_bus_id)
rotated_velocity = spice.mxv(rotation._rots.as_dcm()[0], velocity)
return rotated_velocity[0]
class LroLrocIsisLabelNaifSpiceDriver(LineScanner, NaifSpice, IsisLabel, Driver):
@property
def instrument_id(self):
"""
The short text name for the instrument
Returns an instrument id uniquely identifying the instrument. Used to acquire
instrument codes from Spice Lib bods2c routine.
Returns
-------
str
The short text name for the instrument
"""
id_lookup = {
"NACL": "LRO_LROCNACL",
"NACR": "LRO_LROCNACR"
}
return id_lookup[super().instrument_id]
@property
def sensor_model_version(self):
"""
Returns ISIS instrument sensor model version number
Returns
-------
: int
ISIS sensor model version
"""
return 2
@property
def usgscsm_distortion_model(self):
"""
The distortion model name with its coefficients
LRO LROC NAC does not use the default distortion model so we need to overwrite the
method packing the distortion model into the ISD.
Returns
-------
: dict
Returns a dict with the model name : dict of the coefficients
"""
return {"lrolrocnac":
{"coefficients": self.odtk}}
@property
def odtk(self):
"""
The coefficients for the distortion model
Returns
-------
: list
Radial distortion coefficients. There is only one coefficient for LROC NAC l/r
"""
return spice.gdpool('INS{}_OD_K'.format(self.ikid), 0, 1).tolist()
@property
def light_time_correction(self):
"""
Returns the type of light time correciton and abberation correction to
use in NAIF calls.
LROC is specifically set to not use light time correction because it is
so close to the surface of the moon that light time correction to the
center of the body is incorrect.
Returns
-------
: str
The light time and abberation correction string for use in NAIF calls.
See https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/abcorr.html
for the different options available.
"""
return 'NONE'
@property
def detector_center_sample(self):
"""
The center of the CCD in detector pixels
ISIS uses 0.5 based CCD samples, so we need to convert to 0 based.
Returns
-------
float :
The center sample of the CCD
"""
return super().detector_center_sample - 0.5
@property
def ephemeris_start_time(self):
"""
The starting ephemeris time for LRO is computed by taking the
LRO:SPACECRAFT_CLOCK_PREROLL_COUNT, as defined in the label, and
adding offsets that were taken from an IAK.
Returns
-------
: double
Starting ephemeris time of the image
"""
start_time = spice.scs2e(self.spacecraft_id, self.label['IsisCube']['Instrument']['SpacecraftClockPrerollCount'])
return start_time + self.constant_time_offset + self.additional_preroll * self.exposure_duration
@property
def exposure_duration(self):
"""
Takes the exposure_duration defined in a parent class and adds
offsets taken from an IAK.
Returns
-------
: float
Returns the exposure duration in seconds.
"""
return super().exposure_duration * (1 + self.multiplicative_line_error) + self.additive_line_error
@property
def focal2pixel_lines(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of
the instrument. For LROC NAC this is flipped depending on the spacecraft
direction.
Returns
-------
: list<double>
focal plane to detector lines
"""
focal2pixel_lines = np.array(list(spice.gdpool('INS{}_ITRANSL'.format(self.ikid), 0, 3))) / self.sampling_factor
if self.spacecraft_direction < 0:
return -focal2pixel_lines
else:
return focal2pixel_lines
@property
def multiplicative_line_error(self):
"""
Returns the multiplicative line error defined in an IAK.
Returns
-------
: float
Returns the multiplicative line error.
"""
return 0.0045
@property
def additive_line_error(self):
"""
Returns the additive line error defined in an IAK.
Returns
-------
: float
Returns the additive line error.
"""
return 0.0
@property
def constant_time_offset(self):
"""
Returns the constant time offset defined in an IAK.
Returns
-------
: float
Returns the constant time offset.
"""
return 0.0
@property
def additional_preroll(self):
"""
Returns the addition preroll defined in an IAK.
Returns
-------
: float
Returns the additionl preroll.
"""
return 1024.0
@property
def sampling_factor(self):
"""
Returns the summing factor from the PDS3 label that is defined by the CROSSTRACK_SUMMING.
For example a return value of 2 indicates that 2 lines and 2 samples (4 pixels)
were summed and divided by 4 to produce the output pixel value.
Returns
-------
: int
Number of samples and lines combined from the original data to produce a single pixel in this image
"""
return self.label['IsisCube']['Instrument']['SpatialSumming']
@property
def spacecraft_direction(self):
"""
Returns the x axis of the first velocity vector relative to the
spacecraft. This indicates of the craft is moving forwards or backwards.
From LROC Frame Kernel: lro_frames_2014049_v01.tf
"+X axis is in the direction of the velocity vector half the year. The
other half of the year, the +X axis is opposite the velocity vector"
Hence we rotate the first velocity vector into the sensor reference
frame, but the X component of that vector is inverted compared to the
spacecraft so a +X indicates backwards and -X indicates forwards
The returned velocity is also slightly off from the spacecraft velocity
due to the sensor being attached to the craft with wax.
Returns
-------
direction : double
X value of the first velocity relative to the sensor
"""
frame_chain = self.frame_chain
lro_bus_id = spice.bods2c('LRO_SC_BUS')
time = self.ephemeris_start_time
state, _ = spice.spkezr(self.spacecraft_name, time, 'J2000', 'None', self.target_name)
position = state[:3]
velocity = state[3:]
rotation = frame_chain.compute_rotation(1, lro_bus_id)
rotated_velocity = spice.mxv(rotation._rots.as_dcm()[0], velocity)
return rotated_velocity[0]
class LroMiniRfIsisLabelNaifSpiceDriver(Radar, NaifSpice, IsisLabel, Driver):
@property
def instrument_id(self):
"""
The short text name for the instrument
Returns an instrument id uniquely identifying the instrument. Used to acquire
instrument codes from Spice Lib bods2c routine.
Returns
-------
str
The short text name for the instrument
"""
return super().instrument_id
@property
def wavelength(self):
"""
Returns the wavelength in meters used for image acquistion.
Returns
-------
: double
Wavelength in meters used to create an image
"""
# Get float value of frequency in GHz
frequency = self.label['IsisCube']['Instrument']['Frequency'].value
wavelength = spice.clight() / frequency / 1000.0
return wavelength
@property
def scaled_pixel_width(self):
"""
Returns the scaled pixel width
Returns
-------
: double
scaled pixel width
"""
return self.label['IsisCube']['Instrument']['ScaledPixelHeight'];
# Default line_exposure_duration assumes that time is given in milliseconds and coverts
# in this case, the time is already given in seconds.
@property
def line_exposure_duration(self):
"""
Line exposure duration in seconds. The sum of the burst and the delay for the return.
Returns
-------
: double
scaled pixel width
"""
return self.label['IsisCube']['Instrument']['LineExposureDuration']
@property
def range_conversion_coefficients(self):
"""
Range conversion coefficients
Returns
-------
: List
range conversion coefficients
"""
range_coefficients_orig = self.label['IsisCube']['Instrument']['RangeCoefficientSet']
# The first elt of each list is time, which we handle separately in range_conversion_time
range_coefficients = [elt[1:] for elt in range_coefficients_orig]
return range_coefficients
@property
def range_conversion_times(self):
"""
Times, in et, associated with range conversion coefficients
Returns
-------
: List
times for range conversion coefficients
"""
range_coefficients_utc = self.label['IsisCube']['Instrument']['RangeCoefficientSet']
range_coefficients_et = [spice.str2et(elt[0]) for elt in range_coefficients_utc]
return range_coefficients_et
@property
def ephemeris_start_time(self):
"""
Returns the start and stop ephemeris times for the image.
Returns
-------
: float
start time
"""
return spice.str2et(str(self.utc_start_time))
@property
def ephemeris_stop_time(self):
"""
Returns the stop ephemeris times for the image.
Returns
-------
: float
stop time
"""
return spice.str2et(str(self.utc_stop_time))
@property
def look_direction(self):
"""
Direction of the look (left or right)
Returns
-------
: string
left or right
"""
return self.label['IsisCube']['Instrument']['LookDirection'].lower() | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/lro_drivers.py | lro_drivers.py |
import spiceypy as spice
import ale
from ale.base.base import Driver
from ale.base.type_distortion import NoDistortion
from ale.base.data_naif import NaifSpice
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import LineScanner
import pvl
class OdyThemisIrIsisLabelNaifSpiceDriver(LineScanner, IsisLabel, NaifSpice, NoDistortion, Driver):
"""
Driver for Themis IR ISIS cube
"""
@property
def instrument_id(self):
inst_id = super().instrument_id
if inst_id not in ["THEMIS_IR"]:
raise Exception(f"{inst_id} is not a valid THEMIS IR instrument name. Expecting THEMIS_IR")
return inst_id
@property
def sensor_model_version(self):
return 1
@property
def spacecraft_name(self):
name = super().spacecraft_name.replace('_', ' ')
if name != "MARS ODYSSEY":
raise Exception("{name} for label is not a valid Mars Odyssey spacecraft name")
return name
@property
def ikid(self):
return self.label['IsisCube']['Kernels']['NaifFrameCode']
@property
def line_exposure_duration(self):
"""
returns line exposure duration
Taken from ISIS ThemisIr Camera Model
"""
return (33.2871/1000 * self.line_summing)
@property
def ephemeris_start_time(self):
og_start_time = super().ephemeris_start_time
offset = self.label["IsisCube"]["Instrument"]["SpacecraftClockOffset"]
if isinstance(offset, pvl._collections.Units):
units = offset.units
if "ms" in units.lower():
offset = offset.value * 0.001
else:
# if not milliseconds, the units are probably seconds
offset = offset.value
return og_start_time + offset
@property
def focal_length(self):
return 202.059
@property
def detector_center_line(self):
return 0
@property
def detector_center_sample(self):
return 0
@property
def sensor_name(self):
return self.label['IsisCube']['Instrument']['SpacecraftName']
class OdyThemisVisIsisLabelNaifSpiceDriver(LineScanner, IsisLabel, NaifSpice, NoDistortion, Driver):
""""
Driver for Themis VIS ISIS cube
"""
@property
def instrument_id(self):
inst_id = super().instrument_id
if inst_id not in ["THEMIS_VIS"]:
raise Exception(f"{inst_id} is not a valid THEMIS VIS instrument name. Expecting \"THEMIS_VIS\"")
return inst_id
@property
def sensor_model_version(self):
return 1
@property
def spacecraft_name(self):
name = super().spacecraft_name.replace('_', ' ')
if name != "MARS ODYSSEY":
raise Exception("{name} for label is not a valid Mars Odyssey spacecraft name")
return name
@property
def ikid(self):
return self.label['IsisCube']['Kernels']['NaifFrameCode']
@property
def ephemeris_start_time(self):
"""
The starting ephemeris time, in seconds
Formula derived from ISIS3's ThemisVis Camera model
Returns
-------
: double
Starting ephemeris time in seconds
"""
og_start_time = super().ephemeris_start_time
offset = self.label["IsisCube"]["Instrument"]["SpacecraftClockOffset"]
if isinstance(offset, pvl._collections.Units):
units = offset.units
if "ms" in units.lower():
offset = offset.value * 0.001
else:
# if not milliseconds, the units are probably seconds
offset = offset.value
return og_start_time + offset - (self.line_exposure_duration/2)
@property
def line_exposure_duration(self):
"""
The line exposure duration of the image, in seconds
Returns
-------
: float
Line exposure duration in seconds
"""
line_exposure_duration = self.label['IsisCube']['Instrument']['ExposureDuration']
if isinstance(line_exposure_duration, pvl._collections.Units):
units = line_exposure_duration.units
if "ms" in units.lower():
line_exposure_duration = line_exposure_duration.value * 0.001
else:
# if not milliseconds, the units are probably seconds
line_exposure_duration = line_exposure_duration.value
else:
# if no units are available, assume the exposure duration is given in milliseconds
line_exposure_duration = line_exposure_duration * 0.001
return line_exposure_duration
@property
def focal_length(self):
return 202.059
@property
def detector_center_line(self):
return 0
@property
def detector_center_sample(self):
return 0
@property
def sensor_name(self):
return self.label['IsisCube']['Instrument']['SpacecraftName'] | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/ody_drivers.py | ody_drivers.py |
from ale import util
from ale.base.data_naif import NaifSpice
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import Framer
from ale.base.type_distortion import NoDistortion
from ale.base.base import Driver
class JunoJunoCamIsisLabelNaifSpiceDriver(Framer, IsisLabel, NaifSpice, NoDistortion, Driver):
"""
Driver for reading Juno ISIS labels.
"""
@property
def instrument_id(self):
"""
Expects instrument_id to be defined in the IsisLabel mixin. This should be
a string of the form 'JNC'
Returns
-------
: str
instrument id
"""
look_up = {'JNC': 'JUNO_JUNOCAM'}
return look_up[super().instrument_id]
@property
def ephemeris_start_time(self):
"""
Junos camera is split into stacked frames where an image is made
of sets of RGBM chuncks. We need to account for these chuncks since
ISIS produces some number of cubes N where N = M*4.
Computation obtained from JunoCamera.cpp
Returns
-------
: double
Starting ephemeris time
"""
if not hasattr(self, '_ephemeris_start_time'):
initial_time = super().ephemeris_start_time
frame_number = self.label['IsisCube']['Instrument']['FrameNumber']
inter_frame_delay = self.label['IsisCube']['Instrument']['InterFrameDelay'].value
start_time_bias = self.naif_keywords[f'INS{self.ikid}_START_TIME_BIAS']
inter_frame_delay_bias = self.naif_keywords[f'INS{self.ikid}_INTERFRAME_DELTA']
self._ephemeris_start_time = initial_time + start_time_bias + (frame_number - 1) * (inter_frame_delay + inter_frame_delay_bias)
return self._ephemeris_start_time
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1
@property
def naif_keywords(self):
"""
Updated set of naif keywords containing the NaifIkCode for the specific
Juno filter used when taking the image.
Returns
-------
: dict
Dictionary of keywords and values that ISIS creates and attaches to the label
"""
filter_code = self.label['IsisCube']['BandBin']['NaifIkCode']
return {**super().naif_keywords, **util.query_kernel_pool(f"*{filter_code}*")} | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/juno_drivers.py | juno_drivers.py |
from glob import glob
import os
import struct
import pvl
import spiceypy as spice
import numpy as np
from ale.base import Driver
from ale.base.data_naif import NaifSpice
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import Framer
from ale.base.type_distortion import NoDistortion
class TGOCassisIsisLabelNaifSpiceDriver(Framer, IsisLabel, NaifSpice, NoDistortion, Driver):
"""
Driver for reading TGO Cassis ISIS3 Labels. These are Labels that have been ingested
into ISIS from PDS EDR images but have not been spiceinit'd yet.
"""
@property
def instrument_id(self):
"""
Returns an instrument id for unquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Expects instrument_id to be defined in the Pds3Label mixin. This should
be a string of the form CaSSIS
Returns
-------
: str
instrument id
"""
id_lookup = {
'CaSSIS': 'TGO_CASSIS',
}
return id_lookup[super().instrument_id]
@property
def ephemeris_start_time(self):
"""
Returns the ephemeris_start_time of the image.
Expects spacecraft_clock_start_count to be defined. This should be a float
containing the start clock count of the spacecraft.
Expects spacecraft_id to be defined. This should be the integer Naif ID code
for the spacecraft.
Returns
-------
: float
ephemeris start time of the image.
"""
return spice.utc2et(str(self.label['IsisCube']['Instrument']['StartTime']))
@property
def sensor_frame_id(self):
return -143420
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1
@property
def sensor_name(self):
return self.label['IsisCube']['Instrument']['SpacecraftName'] | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/tgo_drivers.py | tgo_drivers.py |
from glob import glob
import os
import struct
import pvl
import spiceypy as spice
import numpy as np
from ale.base import Driver
from ale.base.type_distortion import NoDistortion
from ale.base.data_naif import NaifSpice
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import Framer
class NewHorizonsLorriIsisLabelNaifSpiceDriver(Framer, IsisLabel, NaifSpice, NoDistortion, Driver):
"""
Driver for reading New Horizons LORRI ISIS3 Labels. These are Labels that have been
ingested into ISIS from PDS EDR images but have not been spiceinit'd yet.
"""
@property
def instrument_id(self):
"""
Returns an instrument id for uniquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Returns
-------
: str
instrument id
"""
id_lookup = {
"LORRI" : "NH_LORRI"
}
return id_lookup[super().instrument_id]
@property
def ikid(self):
"""
Overridden to grab the ikid from the Isis Cube since there is no way to
obtain this value with a spice bods2c call. Isis sets this value during
ingestion, based on the original fits file.
For LORRI, there are two options associated with different binning modes:
1x1 binning: -98301
4x4 binning: -98302
Returns
-------
: integer
Naif Integer ID code for the instrument
"""
return self.label['IsisCube']['Kernels']['NaifFrameCode']
@property
def detector_center_line(self):
return float(spice.gdpool('INS{}_BORESIGHT'.format(self.ikid), 0, 3)[0])
@property
def detector_center_sample(self):
return float(spice.gdpool('INS{}_BORESIGHT'.format(self.ikid), 0, 3)[1])
@property
def sensor_name(self):
return self.label['IsisCube']['Instrument']['SpacecraftName'] | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/nh_drivers.py | nh_drivers.py |
import os
from glob import glob
import numpy as np
import spiceypy as spice
from ale.base import Driver
from ale.base.data_naif import NaifSpice
from ale.base.label_pds3 import Pds3Label
from ale.base.type_sensor import LineScanner
class KaguyaTcPds3NaifSpiceDriver(LineScanner, Pds3Label, NaifSpice, Driver):
"""
Driver for a PDS3 Kaguya Terrain Camera (TC) images. Specifically level2b0 mono and stereo images.
NOTES
-----
* Kaguaya has adjusted values for some of its keys, usually suffixed with `CORRECTED_`.
These corrected values should always be preffered over the original values.
* The Kaguya TC doesn't use a generic Distortion Model, uses on unique to the TC.
Therefore, methods normally in the Distortion classes are reimplemented here.
"""
@property
def utc_start_time(self):
"""
Returns corrected utc start time.
If no corrected form is found, defaults to the form specified in parent class.
Returns
-------
: str
Start time of the image in UTC YYYY-MM-DDThh:mm:ss[.fff]
"""
return self.label.get('CORRECTED_START_TIME', super().utc_start_time)
@property
def utc_stop_time(self):
"""
Returns corrected utc start time.
If no corrected form is found, defaults to the form specified in parent class.
Returns
-------
: str
Stop time of the image in UTC YYYY-MM-DDThh:mm:ss[.fff]
"""
return self.label.get('CORRECTED_STOP_TIME', super().utc_stop_time)
@property
def instrument_id(self):
"""
Id takes the form of LISM_<INSTRUMENT_ID>_<SD><COMPRESS><SWATH> where
INSTRUMENT_ID = TC1/TC2
SD = S/D short for single or double, which in turn means whether the
label belongs to a mono or stereo image.
COMPRESS = D/T short for DCT or through, we assume image has been
decompressed already
SWATCH = swatch mode, different swatch modes have different FOVs
Returns
-------
: str
instrument id
"""
instrument = super().instrument_id
swath = self.label.get("SWATH_MODE_ID")[0]
sd = self.label.get("PRODUCT_SET_ID").split("_")[1].upper()
id = "LISM_{}_{}T{}".format(instrument, sd, swath)
return id
@property
def sensor_frame_id(self):
"""
Returns the sensor frame id. Depends on the instrument that was used to
capture the image.
Returns
-------
: int
Sensor frame id
"""
return spice.namfrm("LISM_{}_HEAD".format(super().instrument_id))
@property
def instrument_host_name(self):
"""
Returns the name of the instrument host. Kaguya/SELENE labels do not have an
explicit instrument host name in the pvl, so we use the spacecraft name.
Returns
-------
: str
Spacecraft name as a proxy for instrument host name.
"""
return self.label.get("SPACECRAFT_NAME", None)
@property
def ikid(self):
"""
Returns ikid of LISM_TC1 or LISM_TC2, depending which camera was used
for capturing the image.
Some keys are stored in the IK kernel under a general ikid for TC1/TC2
presumably because they are not affected by the addtional parameters encoded in
the ikid returned by self.ikid. This method exists for those gdpool calls.
Expects instrument_id to be defined in the Pds3Label mixin. This should be
a string containing either TC1 or TC2
Returns
-------
: int
ikid of LISM_TC1 or LISM_TC2
"""
return spice.bods2c("LISM_{}".format(super().instrument_id))
@property
def spacecraft_name(self):
"""
Returns "MISSION_NAME" as a proxy for spacecraft_name.
No NAIF code exists for the spacecraft name 'SELENE-M.' The NAIF code
exists only for 'SELENE' or 'KAGUYA' -- 'SELENE' is captured as
'MISSION_NAME'
Returns
-------
: str
mission name
"""
return self.label.get('MISSION_NAME')
@property
def spacecraft_clock_stop_count(self):
"""
The original SC_CLOCK_STOP_COUNT key is often incorrect and cannot be trusted.
Therefore we get this information from CORRECTED_SC_CLOCK_STOP_COUNT
Returns
-------
: float
spacecraft clock stop count in seconds
"""
return self.label.get('CORRECTED_SC_CLOCK_STOP_COUNT').value
@property
def spacecraft_clock_start_count(self):
"""
The original SC_CLOCK_START_COUNT key is often incorrect and cannot be trusted.
Therefore we get this information from CORRECTED_SC_CLOCK_START_COUNT
Returns
-------
: float
spacecraft clock start count in seconds
"""
return self.label.get('CORRECTED_SC_CLOCK_START_COUNT').value
@property
def ephemeris_start_time(self):
"""
Returns the ephemeris start time of the image. Expects spacecraft_id to
be defined. This should be the integer naif ID code of the spacecraft.
Returns
-------
: float
ephemeris start time of the image
"""
return spice.sct2e(self.spacecraft_id, self.spacecraft_clock_start_count)
@property
def detector_center_line(self):
"""
Returns the center detector line of the detector. Expects tc_id to be
defined. This should be a string of the form LISM_TC1 or LISM_TC2.
We subtract 0.5 from the center line because as per the IK:
Center of the first pixel is defined as "1.0".
Returns
-------
: int
The detector line of the principle point
"""
return spice.gdpool('INS{}_CENTER'.format(self.ikid), 0, 2)[1] - 0.5
@property
def detector_center_sample(self):
"""
Returns the center detector sample of the detector. Expects tc_id to be
defined. This should be a string of the form LISM_TC1 or LISM_TC2.
We subtract 0.5 from the center sample because as per the IK:
Center of the first pixel is defined as "1.0".
Returns
-------
: int
The detector sample of the principle point
"""
return spice.gdpool('INS{}_CENTER'.format(self.ikid), 0, 2)[0] - 0.5
@property
def focal2pixel_samples(self):
"""
Calculated using 1/pixel pitch
Expects tc_id to be defined. This should be a string of the form
LISM_TC1 or LISM_TC2.
Returns
-------
: list
focal plane to detector samples
"""
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0]
return [0, 0, -1/pixel_size]
@property
def focal2pixel_lines(self):
"""
Calculated using 1/pixel pitch
Expects tc_id to be defined. This should be a string of the form
LISM_TC1 or LISM_TC2.
Returns
-------
: list
focal plane to detector lines
"""
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0]
if self.spacecraft_direction < 0:
return [0, -1/pixel_size, 0]
elif self.spacecraft_direction > 0:
return [0, 1/pixel_size, 0]
@property
def _odkx(self):
"""
Returns the x coefficients of the optical distortion model.
Expects tc_id to be defined. This should be a string of the form
LISM_TC1 or LISM_TC2.
Returns
-------
: list
Optical distortion x coefficients
"""
return spice.gdpool('INS{}_DISTORTION_COEF_X'.format(self.ikid),0, 4).tolist()
@property
def _odky(self):
"""
Returns the y coefficients of the optical distortion model.
Expects tc_id to be defined. This should be a string of the form
LISM_TC1 or LISM_TC2.
Returns
-------
: list
Optical distortion y coefficients
"""
return spice.gdpool('INS{}_DISTORTION_COEF_Y'.format(self.ikid), 0, 4).tolist()
@property
def boresight_x(self):
"""
Returns the x focal plane coordinate of the boresight.
Expects ikid to be defined. This should be the NAIF integer ID for the
sensor.
Returns
-------
: float
Boresight focal plane x coordinate
"""
return spice.gdpool('INS{}_BORESIGHT'.format(self.ikid), 0, 1)[0]
@property
def boresight_y(self):
"""
Returns the y focal plane coordinate of the boresight.
Expects ikid to be defined. This should be the NAIF integer ID for the
sensor.
Returns
-------
: float
Boresight focal plane x coordinate
"""
return spice.gdpool('INS{}_BORESIGHT'.format(self.ikid), 1, 1)[0]
@property
def exposure_duration(self):
"""
Returns Line Exposure Duration
Kaguya TC has an unintuitive key for this called CORRECTED_SAMPLING_INTERVAL.
The original LINE_EXPOSURE_DURATION PDS3 keys is often incorrect and cannot
be trusted.
Returns
-------
: float
Line exposure duration
"""
# It's a list, but only sometimes.
# seems to depend on whether you are using the original zipped archives or
# if its downloaded from Jaxa's image search:
# (https://darts.isas.jaxa.jp/planet/pdap/selene/product_search.html#)
try:
return self.label['CORRECTED_SAMPLING_INTERVAL'][0].value * 0.001 # Scale to seconds
except:
return self.label['CORRECTED_SAMPLING_INTERVAL'].value * 0.001 # Scale to seconds
@property
def focal_length(self):
"""
Returns camera focal length
Expects tc_id to be defined. This should be a string of the form
LISM_TC1 or LISM_TC2.
Returns
-------
: float
Camera focal length
"""
return float(spice.gdpool('INS{}_FOCAL_LENGTH'.format(self.ikid), 0, 1)[0])
@property
def usgscsm_distortion_model(self):
"""
Kaguya uses a unique radial distortion model so we need to overwrite the
method packing the distortion model into the ISD.
from the IK:
Line-of-sight vector of pixel no. n can be expressed as below.
Distortion coefficients information:
INS<INSTID>_DISTORTION_COEF_X = ( a0, a1, a2, a3)
INS<INSTID>_DISTORTION_COEF_Y = ( b0, b1, b2, b3),
Distance r from the center:
r = - (n - INS<INSTID>_CENTER) * INS<INSTID>_PIXEL_SIZE.
Line-of-sight vector v is calculated as
v[X] = INS<INSTID>BORESIGHT[X] + a0 + a1*r + a2*r^2 + a3*r^3 ,
v[Y] = INS<INSTID>BORESIGHT[Y] + r+a0 + a1*r +a2*r^2 + a3*r^3 ,
v[Z] = INS<INSTID>BORESIGHT[Z]
Expects odkx and odky to be defined. These should be a list of optical
distortion x and y coefficients respectively.
Returns
-------
: dict
radial distortion model
"""
return {
"kaguyalism": {
"x" : self._odkx,
"y" : self._odky,
"boresight_x" : self.boresight_x,
"boresight_y" : self.boresight_y
}
}
@property
def detector_start_sample(self):
"""
Returns starting detector sample
Starting sample varies from swath mode (either FULL, NOMINAL or HALF).
From Kaguya IK kernel:
+-----------------------------------------+--------------+----------------------+---------+
| Sensor | Start Pixel | End Pixel (+dummy) | NAIF ID |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1 | 1 | 4096 | -131351 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2 | 1 | 4096 | -131371 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_WDF (Double DCT Full) | 1 | 4096 | -131352 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_WTF (Double Through Full) | 1 | 1600 | -131353 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_SDF (Single DCT Full) | 1 | 4096 | -131354 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_STF (Single Through Full) | 1 | 3208 | -131355 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_WDN (Double DCT Nominal) | 297 | 3796(+4) | -131356 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_WTN (Double Through Nominal) | 297 | 1896 | -131357 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_SDN (Single DCT Nominal) | 297 | 3796(+4) | -131358 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_STN (Single Through Nominal) | 297 | 3504 | -131359 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_WDH (Double DCT Half) | 1172 | 2921(+2) | -131360 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_WTH (Double Through Half) | 1172 | 2771 | -131361 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_SDH (Single DCT Half) | 1172 | 2921(+2) | -131362 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_STH (Single Through Half) | 1172 | 2923 | -131363 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC1_SSH (Single SP_support Half) | 1172 | 2921 | -131364 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_WDF (Double DCT Full) | 1 | 4096 | -131372 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_WTF (Double Through Full) | 1 | 1600 | -131373 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_SDF (Single DCT Full) | 1 | 4096 | -131374 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_STF (Single Through Full) | 1 | 3208 | -131375 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_WDN (Double DCT Nominal) | 297 | 3796(+4) | -131376 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_WTN (Double Through Nominal) | 297 | 1896 | -131377 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_SDN (Single DCT Nominal) | 297 | 3796(+4) | -131378 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_STN (Single Through Nominal) | 297 | 3504 | -131379 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_WDH (Double DCT Half) | 1172 | 2921(+2) | -131380 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_WTH (Double Through Half) | 1172 | 2771 | -131381 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_SDH (Single DCT Half) | 1172 | 2921(+2) | -131382 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_STH (Single Through Half) | 1172 | 2923 | -131383 |
+-----------------------------------------+--------------+----------------------+---------+
| LISM_TC2_SSH (Single SP_support Half) | 1172 | 2921 | -131384 |
+-----------------------------------------+--------------+----------------------+---------+
Returns
-------
: int
Detector sample corresponding to the first image sample
"""
return self.label["FIRST_PIXEL_NUMBER"] - .5
@property
def detector_start_line(self):
if self.spacecraft_direction < 0:
return super().detector_start_line
elif self.spacecraft_direction > 0:
return 1
@property
def spacecraft_direction(self):
"""
Gets the moving direction of the spacecraft from the label, where -1 is moving
as intended and 1 is moving inverted.
Returns
-------
: int
Moving direction of the spacecraft
"""
return int(self.label['SATELLITE_MOVING_DIRECTION'])
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1
class KaguyaMiPds3NaifSpiceDriver(LineScanner, Pds3Label, NaifSpice, Driver):
"""
Driver for a PDS3 Kaguya Multiband Imager (Mi) images. Specifically level2b2 Vis and Nir images.
NOTES
-----
* Kaguaya has adjusted values for some of its keys, usually suffixed with `CORRECTED_`.
These corrected values should always be preferred over the original values.
"""
@property
def utc_start_time(self):
"""
Returns corrected utc start time.
If no corrected form is found, defaults to the form specified in parent class.
Returns
-------
: str
Start time of the image in UTC YYYY-MM-DDThh:mm:ss[.fff]
"""
return self.label.get('CORRECTED_START_TIME', super().utc_start_time)
@property
def utc_stop_time(self):
"""
Returns corrected utc start time.
If no corrected form is found, defaults to the form specified in parent class.
Returns
-------
: str
Stop time of the image in UTC YYYY-MM-DDThh:mm:ss[.fff]
"""
return self.label.get('CORRECTED_STOP_TIME', super().utc_stop_time)
@property
def base_band(self):
"""
Which band the bands are registered to.
"""
band_map = {
"MV1" : "MI-VIS1",
"MV2" : "MI-VIS2",
"MV3" : "MI-VIS3",
"MV4" : "MI-VIS4",
"MV5" : "MI-VIS5",
"MN1" : "MI-NIR1",
"MN2" : "MI-NIR2",
"MN3" : "MI-NIR3",
"MN4" : "MI-NIR4"
}
base_band = band_map[self.label.get("BASE_BAND")]
return base_band
@property
def instrument_id(self):
"""
Id takes the form of LISM_<BASE_BAND> where <BASE_BAND> is which band
the bands were registered to.
Returns
-------
: str
instrument id
"""
id = f"LISM_{self.base_band}"
return id
@property
def sensor_frame_id(self):
"""
Returns the sensor frame id. Depends on the instrument that was used to
capture the image.
Returns
-------
: int
Sensor frame id
"""
spectra = self.base_band[3]
return spice.namfrm(f"LISM_MI_{spectra}_HEAD")
@property
def spacecraft_name(self):
"""
Returns "MISSION_NAME" as a proxy for spacecraft_name.
No NAIF code exists for the spacecraft name 'SELENE-M.' The NAIF code
exists only for 'SELENE' or 'KAGUYA' -- 'SELENE' is captured as
'MISSION_NAME'
Returns
-------
: str
mission name
"""
return self.label.get('MISSION_NAME')
@property
def spacecraft_clock_stop_count(self):
"""
The original SC_CLOCK_STOP_COUNT key is often incorrect and cannot be trusted.
Therefore we get this information from CORRECTED_SC_CLOCK_STOP_COUNT
Returns
-------
: float
spacecraft clock stop count in seconds
"""
return self.label.get('CORRECTED_SC_CLOCK_STOP_COUNT').value
@property
def spacecraft_clock_start_count(self):
"""
The original SC_CLOCK_START_COUNT key is often incorrect and cannot be trusted.
Therefore we get this information from CORRECTED_SC_CLOCK_START_COUNT
Returns
-------
: float
spacecraft clock start count in seconds
"""
return self.label.get('CORRECTED_SC_CLOCK_START_COUNT').value
@property
def ephemeris_start_time(self):
"""
Returns the ephemeris start time of the image. Expects spacecraft_id to
be defined. This should be the integer naif ID code of the spacecraft.
Returns
-------
: float
ephemeris start time of the image
"""
return spice.sct2e(self.spacecraft_id, self.spacecraft_clock_start_count)
@property
def detector_center_line(self):
"""
Returns the center detector line of the detector. Expects ikid to be
defined. This should be the NAIF integer ID code for the sensor.
We subtract 0.5 from the center line because as per the IK:
Center of the first pixel is defined as "1.0".
Returns
-------
: int
The detector line of the principle point
"""
return spice.gdpool('INS{}_CENTER'.format(self.ikid), 0, 2)[1] - 0.5
@property
def detector_center_sample(self):
"""
Returns the center detector sample of the detector. Expects ikid to be
defined. This should be the NAIF integer ID code for the sensor.
We subtract 0.5 from the center sample because as per the IK:
Center of the first pixel is defined as "1.0".
Returns
-------
: int
The detector sample of the principle point
"""
return spice.gdpool('INS{}_CENTER'.format(self.ikid), 0, 2)[0] - 0.5
@property
def focal2pixel_samples(self):
"""
Calculated using 1/pixel pitch
Expects ikid to be defined. This should be the NAIF integer ID code
for the sensor.
Returns
-------
: list
focal plane to detector samples
"""
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0]
return [0, 0, -1/pixel_size]
@property
def focal2pixel_lines(self):
"""
Calculated using 1/pixel pitch
Expects ikid to be defined. This should be the NAIF integer ID code
for the sensor.
Returns
-------
: list
focal plane to detector lines
"""
pixel_size = spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0]
return [0, 1/pixel_size, 0]
@property
def _odkx(self):
"""
Returns the x coefficients of the optical distortion model.
Expects ikid to be defined. This should be the NAIF integer ID code
for the sensor.
Returns
-------
: list
Optical distortion x coefficients
"""
return spice.gdpool('INS{}_DISTORTION_COEF_X'.format(self.ikid),0, 4).tolist()
@property
def _odky(self):
"""
Returns the y coefficients of the optical distortion model.
Expects tc_id to be defined. This should be a string of the form
LISM_TC1 or LISM_TC2.
Returns
-------
: list
Optical distortion y coefficients
"""
return spice.gdpool('INS{}_DISTORTION_COEF_Y'.format(self.ikid), 0, 4).tolist()
@property
def boresight_x(self):
"""
Returns the x focal plane coordinate of the boresight.
Expects ikid to be defined. This should be the NAIF integer ID for the
sensor.
Returns
-------
: float
Boresight focal plane x coordinate
"""
return spice.gdpool('INS{}_BORESIGHT'.format(self.ikid), 0, 1)[0]
@property
def boresight_y(self):
"""
Returns the y focal plane coordinate of the boresight.
Expects ikid to be defined. This should be the NAIF integer ID for the
sensor.
Returns
-------
: float
Boresight focal plane x coordinate
"""
return spice.gdpool('INS{}_BORESIGHT'.format(self.ikid), 1, 1)[0]
@property
def line_exposure_duration(self):
"""
Returns Line Exposure Duration
Kaguya has an unintuitive key for this called CORRECTED_SAMPLING_INTERVAL.
The original LINE_EXPOSURE_DURATION PDS3 keys is often incorrect and cannot
be trusted.
Returns
-------
: float
Line exposure duration
"""
# It's a list, but only sometimes.
# seems to depend on whether you are using the original zipped archives or
# if its downloaded from Jaxa's image search:
# (https://darts.isas.jaxa.jp/planet/pdap/selene/product_search.html#)
try:
return self.label['CORRECTED_SAMPLING_INTERVAL'][0].value * 0.001 # Scale to seconds
except:
return self.label['CORRECTED_SAMPLING_INTERVAL'].value * 0.001 # Scale to seconds
@property
def focal_length(self):
"""
Returns camera focal length
Expects ikid to be defined. This should be the NAIF ID for the base band.
Returns
-------
: float
Camera focal length
"""
return float(spice.gdpool('INS{}_FOCAL_LENGTH'.format(self.ikid), 0, 1)[0])
@property
def usgscsm_distortion_model(self):
"""
Kaguya uses a unique radial distortion model so we need to overwrite the
method packing the distortion model into the ISD.
from the IK:
Line-of-sight vector of pixel no. n can be expressed as below.
Distortion coefficients information:
INS<INSTID>_DISTORTION_COEF_X = ( a0, a1, a2, a3)
INS<INSTID>_DISTORTION_COEF_Y = ( b0, b1, b2, b3),
Distance r from the center:
r = - (n - INS<INSTID>_CENTER) * INS<INSTID>_PIXEL_SIZE.
Line-of-sight vector v is calculated as
v[X] = INS<INSTID>BORESIGHT[X] + a0 + a1*r + a2*r^2 + a3*r^3 ,
v[Y] = INS<INSTID>BORESIGHT[Y] + r+a0 + a1*r +a2*r^2 + a3*r^3 ,
v[Z] = INS<INSTID>BORESIGHT[Z]
Expects odkx and odky to be defined. These should be a list of optical
distortion x and y coefficients respectively.
Returns
-------
: dict
radial distortion model
"""
return {
"kaguyalism": {
"x" : self._odkx,
"y" : self._odky,
"boresight_x" : self.boresight_x,
"boresight_y" : self.boresight_y
}
}
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 1 | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/selene_drivers.py | selene_drivers.py |
import spiceypy as spice
import ale
from ale.base.data_naif import NaifSpice
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import Framer
from ale.base.base import Driver
class VikingIsisLabelNaifSpiceDriver(Framer, IsisLabel, NaifSpice, Driver):
@property
def spacecraft_name(self):
"""
Overridden to work with spice calls.
Returns
-------
: str
Name of the spacecraft.
"""
name_lookup = {
'VIKING_ORBITER_1': 'VIKING ORBITER 1',
'VIKING_ORBITER_2': 'VIKING ORBITER 2'
}
return name_lookup[super().spacecraft_name]
@property
def alt_ikid(self):
"""
Viking Orbiter 1 & 2 each have an alternate naif id code as defined in the
SCLK kernels.
Expects spacecraft name to be defined.
Returns -27999 for vo1 and -30999 for vo2
Returns
-------
: integer
Alternate Naif Integer ID code for the instrument
"""
alt_id_lookup = {
'VIKING ORBITER 1': -27999,
'VIKING ORBITER 2':-30999
}
return alt_id_lookup[self.spacecraft_name]
@property
def ikid(self):
"""
Overridden to grab the ikid from the Isis Cube since there is no way to
obtain this value with a spice bods2c call.
Returns
-------
: integer
Naif Integer ID code for the instrument
"""
return self.label['IsisCube']['Kernels']['NaifFrameCode']
@property
def ephemeris_start_time(self):
"""
Overridden to use the alternate instrument ID. Also computes an offset to match
what is being done in ISIS code.
Expects spacecraft_clock_start_count to be defined.
Returns
-------
: float
ephemeris start time of the image
"""
ephemeris_start_time = spice.scs2e(self.alt_ikid, str(self.spacecraft_clock_start_count))
if self.exposure_duration <= .420:
offset1 = 7.0 / 8.0 * 4.48
else:
offset1 = 3.0 / 8.0 * 4.48
offset2 = 1.0 / 64.0 * 4.48
return ephemeris_start_time + offset1 + offset2 | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/viking_drivers.py | viking_drivers.py |
import pvl
import zlib
import importlib
import inspect
import itertools
from itertools import chain
import os
from glob import glob
import json
import numpy as np
import datetime
from datetime import datetime, date
import traceback
from collections import OrderedDict
from ale.formatters.usgscsm_formatter import to_usgscsm
from ale.formatters.isis_formatter import to_isis
from ale.formatters.formatter import to_isd
from ale.base.data_isis import IsisSpice
from abc import ABC
# Explicit list of disabled drivers
__disabled_drivers__ = ["ody_drivers",
"hayabusa2_drivers",
"juno_drivers",
"tgo_drivers"]
# dynamically load drivers
__all__ = [os.path.splitext(os.path.basename(d))[0] for d in glob(os.path.join(os.path.dirname(__file__), '*_drivers.py'))]
__all__ = [driver for driver in __all__ if driver not in __disabled_drivers__]
__driver_modules__ = [importlib.import_module('.'+m, package='ale.drivers') for m in __all__]
__formatters__ = {'usgscsm': to_usgscsm,
'isis': to_isis,
'ale' : to_isd}
def sort_drivers(drivers=[]):
return list(sorted(drivers, key=lambda x:IsisSpice in x.__bases__, reverse=False))
class AleJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, datetime.date):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def load(label, props={}, formatter='ale', verbose=False):
"""
Attempt to load a given label from all possible drivers.
This function opens up the label file and attempts to produce an ISD in the
format specified using the supplied properties. Drivers are tried sequentially
until an ISD is successfully created. Drivers that use external ephemeris
data are tested before drivers that use attached epehemeris data.
Parameters
----------
label : str
String path to the given label file
props : dict
A dictionary of optional keywords/parameters for use in driver
loading. Each driver specifies its own set of properties to use.
For example, Drivers that use the NaifSpice mix-in use the 'kernels'
property to specify an explicit set of kernels and load order.
formatter : {'ale', 'isis', 'usgscsm'}
Output format for the ISD. As of 0.8.0, it is recommended that
the `ale` formatter is used. The `isis` and `usgscsm` formatters
are retrained for backwards compatability.
verbose : bool
If True, displays debug output specifying which drivers were
attempted and why they failed.
Returns
-------
dict
The ISD as a dictionary
"""
if isinstance(formatter, str):
formatter = __formatters__[formatter]
drivers = chain.from_iterable(inspect.getmembers(dmod, lambda x: inspect.isclass(x) and "_driver" in x.__module__) for dmod in __driver_modules__)
drivers = sort_drivers([d[1] for d in drivers])
for driver in drivers:
if verbose:
print(f'Trying {driver}')
try:
res = driver(label, props=props)
# get instrument_id to force early failure
res.instrument_id
with res as driver:
isd = formatter(driver)
if verbose:
print("Success with: ", driver)
print("ISD:\n", json.dumps(isd, indent=2, cls=AleJsonEncoder))
return isd
except Exception as e:
if verbose:
print(f'Failed: {e}\n')
traceback.print_exc()
raise Exception('No Such Driver for Label')
def loads(label, props='', formatter='ale', verbose=False):
"""
Attempt to load a given label from all possible drivers.
This function is the same as load, except it returns a JSON formatted string.
Returns
-------
str
The ISD as a JSON formatted string
See Also
--------
load
"""
res = load(label, props, formatter, verbose=verbose)
return json.dumps(res, cls=AleJsonEncoder) | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/__init__.py | __init__.py |
from glob import glob
import os
import pvl
import spiceypy as spice
import numpy as np
from ale.base import Driver
from ale.base.data_naif import NaifSpice
from ale.base.label_pds3 import Pds3Label
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import Framer
from ale.base.data_isis import IsisSpice
ID_LOOKUP = {
'MDIS-WAC': 'MSGR_MDIS_WAC',
'MDIS-NAC':'MSGR_MDIS_NAC',
}
class MessengerMdisIsisLabelIsisSpiceDriver(Framer, IsisLabel, IsisSpice, Driver):
@property
def spacecraft_name(self):
"""
Spacecraft name used in various SPICE calls to acquire
ephemeris data. Messenger MDIS img PDS3 labels do not the have a SPACECRAFT_NAME keyword,
so we override it here to find INSTRUMENT_HOST_NAME in the label.
Returns
-------
: str
Spacecraft name
"""
return self.instrument_host_name
@property
def fikid(self):
"""
Naif ID code used in calculating focal length
Expects filter_number to be defined. This should be an integer containing
the filter number from the pds3 label.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
Returns
-------
: int
Naif ID code used in calculating focal length
"""
if isinstance(self, Framer):
fn = super().filter_number
if fn == 'N/A':
fn = 0
else:
fn = 0
return self.ikid - int(fn)
@property
def instrument_id(self):
"""
Returns an instrument id for unquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Expects instrument_id to be defined in the Pds3Label mixin. This should
be a string of the form MDIS-WAC or MDIS-NAC.
Returns
-------
: str
instrument id
"""
return ID_LOOKUP[super().instrument_id]
class MessengerMdisPds3NaifSpiceDriver(Framer, Pds3Label, NaifSpice, Driver):
"""
Driver for reading MDIS PDS3 labels. Requires a Spice mixin to acquire addtional
ephemeris and instrument data located exclusively in spice kernels.
"""
@property
def spacecraft_name(self):
"""
Spacecraft name used in various SPICE calls to acquire
ephemeris data. Messenger MDIS img PDS3 labels do not the have a SPACECRAFT_NAME keyword,
so we override it here to find INSTRUMENT_HOST_NAME in the label.
Returns
-------
: str
Spacecraft name
"""
return self.instrument_host_name
@property
def fikid(self):
"""
Naif ID code used in calculating focal length
Expects filter_number to be defined. This should be an integer containing
the filter number from the pds3 label.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
Returns
-------
: int
Naif ID code used in calculating focal length
"""
if isinstance(self, Framer):
fn = super().filter_number
if fn == 'N/A':
fn = 0
else:
fn = 0
return self.ikid - int(fn)
@property
def instrument_id(self):
"""
Returns an instrument id for unquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Expects instrument_id to be defined in the Pds3Label mixin. This should
be a string of the form MDIS-WAC or MDIS-NAC.
Returns
-------
: str
instrument id
"""
return ID_LOOKUP[super().instrument_id]
@property
def sampling_factor(self):
"""
Returns the summing factor from the PDS3 label. For example a return value of 2
indicates that 2 lines and 2 samples (4 pixels) were summed and divided by 4
to produce the output pixel value.
NOTE: This is overwritten for the messenger driver as the value is stored in "MESS:PIXELBIN"
Returns
-------
: int
Number of samples and lines combined from the original data to produce a single pixel in this image
"""
pixel_bin = self.label['MESS:PIXELBIN']
if pixel_bin == 0:
pixel_bin = 1
return pixel_bin * 2
@property
def focal_length(self):
"""
Computes Focal Length from Kernels
MDIS has tempature dependant focal lengh and coefficients need to
be acquired from IK Spice kernels (coeff describe focal length as a
function of tempature). Focal plane temps are acquired from a PDS3 label.
Returns
-------
: double
focal length in meters
"""
coeffs = spice.gdpool('INS{}_FL_TEMP_COEFFS'.format(self.fikid), 0, 6)
# reverse coeffs, MDIS coeffs are listed a_0, a_1, a_2 ... a_n where
# numpy wants them a_n, a_n-1, a_n-2 ... a_0
f_t = np.poly1d(coeffs[::-1])
# eval at the focal_plane_temperature
return f_t(self.label['FOCAL_PLANE_TEMPERATURE'].value)
@property
def detector_center_sample(self):
"""
Returns center detector sample acquired from Spice Kernels.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
NOTE: This value is defined in an ISIS iak as 512.5, but we subtract 0.5 from the
ISIS center sample because ISIS detector coordinates are 0.5 based.
Returns
-------
: float
center detector sample
"""
return 512
@property
def detector_center_line(self):
"""
Returns center detector line acquired from Spice Kernels.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
NOTE: This value is defined in an ISIS iak as 512.5, but we subtract 0.5 from the
ISIS center sample because ISIS detector coordinates are 0.5 based.
Returns
-------
: float
center detector line
"""
return 512
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 2
@property
def usgscsm_distortion_model(self):
"""
Returns a dictionary containing the distortion model.
Expects odtx and odty are defined. These should be the optical distortion
x and y coefficients respectively.
Returns
-------
: dict
radial distortion model
"""
return {
"transverse": {
"x" : self.odtx,
"y" : self.odty
}
}
@property
def pixel_size(self):
"""
Overriden because the MESSENGER IK uses PIXEL_PITCH and the units
are already millimeters
Returns
-------
: float pixel size
"""
return spice.gdpool('INS{}_PIXEL_PITCH'.format(self.ikid), 0, 1)
class MessengerMdisIsisLabelNaifSpiceDriver(IsisLabel, NaifSpice, Framer, Driver):
"""
Driver for reading MDIS ISIS3 Labels. These are Labels that have been ingested
into ISIS from PDS EDR images. Any SPCIE data attached by the spiceinit application
will be ignored.
"""
@property
def platform_name(self):
"""
Returns the name of the platform containing the sensor. This is usually
the spacecraft name.
Messenger MDIS ISIS labels use upper camel case so this converts it to
all upper case.
Returns
-------
: str
Spacecraft name
"""
return super().platform_name.upper()
@property
def instrument_id(self):
"""
Returns an instrument id for unquely identifying the instrument, but often
also used to be piped into Spice Kernels to acquire IKIDs. Therefore they
the same ID the Spice expects in bods2c calls.
Expects instrument_id to be defined in the Pds3Label mixin. This should
be a string of the form MDIS-WAC or MDIS-NAC.
Returns
-------
: str
instrument id
"""
return ID_LOOKUP[super().instrument_id]
@property
def ephemeris_start_time(self):
"""
Returns the ephemeris_start_time of the image.
Expects spacecraft_clock_start_count to be defined. This should be a float
containing the start clock count of the spacecraft.
Expects spacecraft_id to be defined. This should be the integer Naif ID code
for the spacecraft.
Returns
-------
: float
ephemeris start time of the image.
"""
if not hasattr(self, '_ephemeris_start_time'):
sclock = self.spacecraft_clock_start_count
self._starting_ephemeris_time = spice.scs2e(self.spacecraft_id, sclock)
return self._starting_ephemeris_time
@property
def usgscsm_distortion_model(self):
"""
Returns a dictionary containing the distortion model.
Expects odtx and odty are defined. These should be the optical distortion
x and y coefficients respectively.
Returns
-------
: dict
radial distortion model
"""
return {
"transverse": {
"x" : self.odtx,
"y" : self.odty
}
}
@property
def fikid(self):
"""
Naif ID code used in calculating focal length
Expects filter_number to be defined. This should be an integer containing
the filter number from the pds3 label.
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
Returns
-------
: int
Naif ID code used in calculating focal length
"""
if(self.instrument_id == 'MSGR_MDIS_WAC'):
fn = self.label['IsisCube']['BandBin']['Number']
if fn == 'N/A':
fn = 0
return self.ikid - int(fn)
return self.ikid
@property
def focal_length(self):
"""
Computes Focal Length from Kernels
MDIS has tempature dependant focal lengh and coefficients need to
be acquired from IK Spice kernels (coeff describe focal length as a
function of tempature). Focal plane temps are acquired from a PDS3 label.
Returns
-------
: double
focal length in meters
"""
coeffs = spice.gdpool('INS{}_FL_TEMP_COEFFS'.format(self.fikid), 0, 6)
# reverse coeffs, MDIS coeffs are listed a_0, a_1, a_2 ... a_n where
# numpy wants them a_n, a_n-1, a_n-2 ... a_0
f_t = np.poly1d(coeffs[::-1])
# eval at the focal_plane_temperature
return f_t(self.label['IsisCube']['Instrument']['FocalPlaneTemperature'].value)
@property
def detector_center_sample(self):
"""
Returns center detector sample acquired from Spice Kernels
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
We subtract 0.5 from the ISIS center sample because ISIS detector
coordinates are 0.5 based.
Returns
-------
: float
detector center sample
"""
return float(spice.gdpool('INS{}_CCD_CENTER'.format(self.ikid), 0, 3)[0]) - 0.5
@property
def detector_center_line(self):
"""
Returns center detector line acquired from Spice Kernels
Expects ikid to be defined. This should be the integer Naid ID code for
the instrument.
We subtract 0.5 from the ISIS center line because ISIS detector
coordinates are 0.5 based.
Returns
-------
: float
detector center line
"""
return float(spice.gdpool('INS{}_CCD_CENTER'.format(self.ikid), 0, 3)[1]) - 0.5
@property
def sensor_model_version(self):
"""
Returns
-------
: int
ISIS sensor model version
"""
return 2
@property
def pixel_size(self):
"""
Overriden because the MESSENGER IK uses PIXEL_PITCH and the units
are already millimeters
Returns
-------
: float pixel size
"""
return spice.gdpool('INS{}_PIXEL_PITCH'.format(self.ikid), 0, 1)
@property
def sampling_factor(self):
"""
Returns the summing factor from the PDS3 label. For example a return value of 2
indicates that 2 lines and 2 samples (4 pixels) were summed and divided by 4
to produce the output pixel value.
NOTE: This is overwritten for the messenger driver as the value is stored in "MESS:PIXELBIN"
Returns
-------
: int
Number of samples and lines combined from the original data to produce a single pixel in this image
"""
pixel_bin = self.label['IsisCube']['Instrument']['PixelBinningMode']
if pixel_bin == 0:
pixel_bin = 1
return pixel_bin * 2 | Ale | /Ale-0.8.4.tar.gz/Ale-0.8.4/ale/drivers/mess_drivers.py | mess_drivers.py |
AlekSIS (School Information System) — App كتاب السجل (class register and school records)
========================================================================================
AlekSIS
-------
This is an application for use with the `AlekSIS®`_ platform.
Features
--------
This AlekSIS app currently provides the following features for managing digital class registers:
* For users:
* Manage and assign group roles
* Manage custom excuse types
* Manage extra marks (e. g. forgotten homework)
* Manage group notes for every lesson
* Manage lesson documentations for every lesson
* Manage personal notes for every lesson
* Show all owned groups of the current person
* Show all students of the current person
* Show filterable (week) overview for lesson documentations and personal/group notes
* Manage absence of persons
* Show overview of all students with statistics
Licence
-------
::
Copyright © 2019, 2021 Dominik George <[email protected]>
Copyright © 2019, 2020 Tom Teichler <[email protected]>
Copyright © 2019 mirabilos <[email protected]>
Copyright © 2020, 2021, 2022 Jonathan Weth <[email protected]>
Copyright © 2020, 2021 Julian Leucker <[email protected]>
Copyright © 2020, 2022 Hangzhi Yu <[email protected]>
Copyright © 2021 Lloyd Meins <[email protected]>
Licenced under the EUPL, version 1.2 or later, by Teckids e.V. (Bonn, Germany).
Please see the LICENCE.rst file accompanying this distribution for the
full licence text or on the `European Union Public Licence`_ website
https://joinup.ec.europa.eu/collection/eupl/guidelines-users-and-developers
(including all other official language versions).
Trademark
---------
AlekSIS® is a registered trademark of the AlekSIS open source project, represented
by Teckids e.V. Please refer to the `trademark policy`_ for hints on using the trademark
AlekSIS®.
.. _AlekSIS®: https://edugit.org/AlekSIS/Official/AlekSIS
.. _European Union Public Licence: https://eupl.eu/
.. _trademark policy: https://aleksis.org/pages/about
| AlekSIS-App-Alsijil | /aleksis_app_alsijil-3.0.1.tar.gz/aleksis_app_alsijil-3.0.1/README.rst | README.rst |
======================================
EUROPEAN UNION PUBLIC LICENCE v. 1.2
======================================
--------------------------------------
EUPL © the European Union 2007, 2016
--------------------------------------
This European Union Public Licence (the ‘EUPL’) applies to the Work
(as defined below) which is provided under the terms of this Licence.
Any use of the Work, other than as authorised under this Licence is
prohibited (to the extent such use is covered by a right of the
copyright holder of the Work).
The Work is provided under the terms of this Licence when the Licensor
(as defined below) has placed the following notice immediately following
the copyright notice for the Work:
Licensed under the EUPL
or has expressed by any other means his willingness to license under
the EUPL.
1. Definitions
==============
In this Licence, the following terms have the following meaning:
* ‘The Licence’: this Licence.
* ‘The Original Work’: the work or software distributed or communicated
by the Licensor under this Licence, available as Source Code and also
as Executable Code as the case may be.
* ‘Derivative Works’: the works or software that could be created by the
Licensee, based upon the Original Work or modifications thereof. This
Licence does not define the extent of modification or dependence on
the Original Work required in order to classify a work as a Derivative
Work; this extent is determined by copyright law applicable in the
country mentioned in Article 15.
* ‘The Work’: the Original Work or its Derivative Works.
* ‘The Source Code’: the human-readable form of the Work which is the
most convenient for people to study and modify.
* ‘The Executable Code’: any code which has generally been compiled and
which is meant to be interpreted by a computer as a program.
* ‘The Licensor’: the natural or legal person that distributes or
communicates the Work under the Licence.
* ‘Contributor(s)’: any natural or legal person who modifies the Work
under the Licence, or otherwise contributes to the creation of a
Derivative Work.
* ‘The Licensee’ or ‘You’: any natural or legal person who makes any
usage of the Work under the terms of the Licence.
* ‘Distribution’ or ‘Communication’: any act of selling, giving,
lending, renting, distributing, communicating, transmitting, or
otherwise making available, online or offline, copies of the Work or
providing access to its essential functionalities at the disposal of
any other natural or legal person.
2. Scope of the rights granted by the Licence
=============================================
The Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
sublicensable licence to do the following, for the duration of copyright
vested in the Original Work:
* use the Work in any circumstance and for all usage,
* reproduce the Work,
* modify the Work, and make Derivative Works based upon the Work,
* communicate to the public, including the right to make available or
display the Work or copies thereof to the public and perform publicly,
as the case may be, the Work,
* distribute the Work or copies thereof,
* lend and rent the Work or copies thereof,
* sublicense rights in the Work or copies thereof.
Those rights can be exercised on any media, supports and formats,
whether now known or later invented, as far as the applicable law
permits so.
In the countries where moral rights apply, the Licensor waives his right
to exercise his moral right to the extent allowed by law in order to
make effective the licence of the economic rights here above listed.
The Licensor grants to the Licensee royalty-free, non-exclusive usage
rights to any patents held by the Licensor, to the extent necessary to
make use of the rights granted on the Work under this Licence.
3. Communication of the Source Code
===================================
The Licensor may provide the Work either in its Source Code form, or as
Executable Code. If the Work is provided as Executable Code, the
Licensor provides in addition a machine-readable copy of the Source Code
of the Work along with each copy of the Work that the Licensor
distributes or indicates, in a notice following the copyright notice
attached to the Work, a repository where the Source Code is easily and
freely accessible for as long as the Licensor continues to distribute or
communicate the Work.
4. Limitations on copyright
===========================
Nothing in this Licence is intended to deprive the Licensee of the
benefits from any exception or limitation to the exclusive rights of the
rights owners in the Work, of the exhaustion of those rights or of other
applicable limitations thereto.
5. Obligations of the Licensee
==============================
The grant of the rights mentioned above is subject to some restrictions
and obligations imposed on the Licensee. Those obligations are the
following:
*Attribution right*: The Licensee shall keep intact all copyright,
patent or trademarks notices and all notices that refer to the Licence
and to the disclaimer of warranties. The Licensee must include a copy
of such notices and a copy of the Licence with every copy of the Work
he/she distributes or communicates. The Licensee must cause any
Derivative Work to carry prominent notices stating that the Work has
been modified and the date of modification.
*Copyleft clause*: If the Licensee distributes or communicates copies
of the Original Works or Derivative Works, this Distribution or
Communication will be done under the terms of this Licence or of a
later version of this Licence unless the Original Work is expressly
distributed only under this version of the Licence — for example by
communicating ‘EUPL v. 1.2 only’. The Licensee (becoming Licensor)
cannot offer or impose any additional terms or conditions on the Work
or Derivative Work that alter or restrict the terms of the Licence.
*Compatibility clause*: If the Licensee Distributes or Communicates
Derivative Works or copies thereof based upon both the Work and another
work licensed under a Compatible Licence, this Distribution or
Communication can be done under the terms of this Compatible Licence.
For the sake of this clause, ‘Compatible Licence’ refers to the licences
listed in the appendix attached to this Licence. Should the Licensee’s
obligations under the Compatible Licence conflict with his/her
obligations under this Licence, the obligations of the Compatible
Licence shall prevail.
*Provision of Source Code*: When distributing or communicating copies
of the Work, the Licensee will provide a machine-readable copy of the
Source Code or indicate a repository where this Source will be easily
and freely available for as long as the Licensee continues to distribute
or communicate the Work. Legal Protection: This Licence does not grant
permission to use the trade names, trademarks, service marks, or names
of the Licensor, except as required for reasonable and customary use
in describing the origin of the Work and reproducing the content of
the copyright notice.
6. Chain of Authorship
======================
The original Licensor warrants that the copyright in the Original Work
granted hereunder is owned by him/her or licensed to him/her and that
he/she has the power and authority to grant the Licence.
Each Contributor warrants that the copyright in the modifications he/she
brings to the Work are owned by him/her or licensed to him/her and that
he/she has the power and authority to grant the Licence.
Each time You accept the Licence, the original Licensor and subsequent
Contributors grant You a licence to their contributions to the Work,
under the terms of this Licence.
7. Disclaimer of Warranty
=========================
The Work is a work in progress, which is continuously improved by
numerous Contributors. It is not a finished work and may therefore
contain defects or ‘bugs’ inherent to this type of development. For
the above reason, the Work is provided under the Licence on an ‘as is’
basis and without warranties of any kind concerning the Work, including
without limitation merchantability, fitness for a particular purpose,
absence of defects or errors, accuracy, non-infringement of intellectual
property rights other than copyright as stated in Article 6 of this
Licence.
This disclaimer of warranty is an essential part of the Licence and a
condition for the grant of any rights to the Work.
8. Disclaimer of Liability
==========================
Except in the cases of wilful misconduct or damages directly caused to
natural persons, the Licensor will in no event be liable for any direct
or indirect, material or moral, damages of any kind, arising out of the
Licence or of the use of the Work, including without limitation, damages
for loss of goodwill, work stoppage, computer failure or malfunction,
loss of data or any commercial damage, even if the Licensor has been
advised of the possibility of such damage. However, the Licensor will be
liable under statutory product liability laws as far such laws apply to
the Work.
9. Additional agreements
========================
While distributing the Work, You may choose to conclude an additional
agreement, defining obligations or services consistent with this
Licence. However, if accepting obligations, You may act only on your own
behalf and on your sole responsibility, not on behalf of the original
Licensor or any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability incurred
by, or claims asserted against such Contributor by the fact You have
accepted any warranty or additional liability.
10. Acceptance of the Licence
=============================
The provisions of this Licence can be accepted by clicking on an icon
‘I agree’ placed under the bottom of a window displaying the text of
this Licence or by affirming consent in any other similar way, in
accordance with the rules of applicable law. Clicking on that icon
indicates your clear and irrevocable acceptance of this Licence and
all of its terms and conditions.
Similarly, you irrevocably accept this Licence and all of its terms
and conditions by exercising any rights granted to You by Article 2
of this Licence, such as the use of the Work, the creation by You of
a Derivative Work or the Distribution or Communication by You of the
Work or copies thereof.
11. Information to the public
=============================
In case of any Distribution or Communication of the Work by means of
electronic communication by You (for example, by offering to download
the Work from a remote location) the distribution channel or media (for
example, a website) must at least provide to the public the information
requested by the applicable law regarding the Licensor, the Licence and
the way it may be accessible, concluded, stored and reproduced by the
Licensee.
12. Termination of the Licence
==============================
The Licence and the rights granted hereunder will terminate
automatically upon any breach by the Licensee of the terms of the
Licence.
Such a termination will not terminate the licences of any person who
has received the Work from the Licensee under the Licence, provided
such persons remain in full compliance with the Licence.
13. Miscellaneous
=================
Without prejudice of Article 9 above, the Licence represents the
complete agreement between the Parties as to the Work.
If any provision of the Licence is invalid or unenforceable under
applicable law, this will not affect the validity or enforceability of
the Licence as a whole. Such provision will be construed or reformed so
as necessary to make it valid and enforceable.
The European Commission may publish other linguistic versions or new
versions of this Licence or updated versions of the Appendix, so far
this is required and reasonable, without reducing the scope of the
rights granted by the Licence.
New versions of the Licence will be published with a unique
version number.
All linguistic versions of this Licence, approved by the European
Commission, have identical value. Parties can take advantage of the
linguistic version of their choice.
14. Jurisdiction
================
Without prejudice to specific agreement between parties,
* any litigation resulting from the interpretation of this License,
arising between the European Union institutions, bodies, offices or
agencies, as a Licensor, and any Licensee, will be subject to the
jurisdiction of the Court of Justice of the European Union, as laid
down in article 272 of the Treaty on the Functioning of the European
Union,
* any litigation arising between other parties and resulting from the
interpretation of this License, will be subject to the exclusive
jurisdiction of the competent court where the Licensor resides or
conducts its primary business.
15. Applicable Law
==================
Without prejudice to specific agreement between parties,
* this Licence shall be governed by the law of the European Union Member
State where the Licensor has his seat, resides or has his registered
office,
* this licence shall be governed by Belgian law if the Licensor has no
seat, residence or registered office inside a European Union Member
State.
Appendix
========
‘Compatible Licences’ according to Article 5 EUPL are:
* GNU General Public License (GPL) v. 2, v. 3
* GNU Affero General Public License (AGPL) v. 3
* Open Software License (OSL) v. 2.1, v. 3.0
* Eclipse Public License (EPL) v. 1.0
* CeCILL v. 2.0, v. 2.1
* Mozilla Public Licence (MPL) v. 2
* GNU Lesser General Public Licence (LGPL) v. 2.1, v. 3
* Creative Commons Attribution-ShareAlike v. 3.0 Unported
(CC BY-SA 3.0) for works other than software
* European Union Public Licence (EUPL) v. 1.1, v. 1.2
* Québec Free and Open-Source Licence — Reciprocity (LiLiQ-R)
or Strong Reciprocity (LiLiQ-R+)
The European Commission may update this Appendix to later versions of
the above licences without producing a new version of the EUPL, as long
as they provide the rights granted in Article 2 of this Licence and
protect the covered Source Code from exclusive appropriation.
All other changes or additions to this Appendix require the production
of a new EUPL version.
| AlekSIS-App-Alsijil | /aleksis_app_alsijil-3.0.1.tar.gz/aleksis_app_alsijil-3.0.1/LICENCE.rst | LICENCE.rst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.